From a1ca65bd808f146acd9ec28d1a2ffb14be418c59 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 6 Aug 2019 15:47:52 +0300 Subject: [PATCH] putio: add new backend --- README.md | 2 +- backend/all/all.go | 1 + backend/putio/fs.go | 691 ++++++++++++++++++++++++++++++++ backend/putio/object.go | 276 +++++++++++++ backend/putio/putio.go | 72 ++++ backend/putio/putio_test.go | 16 + bin/make_manual.py | 1 + docs/content/about.md | 2 +- docs/content/docs.md | 1 + docs/content/overview.md | 2 + docs/content/putio.md | 97 +++++ docs/content/webdav.md | 25 -- docs/layouts/chrome/navbar.html | 1 + fstest/test_all/config.yaml | 4 + 14 files changed, 1164 insertions(+), 27 deletions(-) create mode 100644 backend/putio/fs.go create mode 100644 backend/putio/object.go create mode 100644 backend/putio/putio.go create mode 100644 backend/putio/putio_test.go create mode 100644 docs/content/putio.md diff --git a/README.md b/README.md index 1d1cc198a..f9ce53eda 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * pCloud [:page_facing_up:](https://rclone.org/pcloud/) * premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/) - * put.io [:page_facing_up:](https://rclone.org/webdav/#put-io) + * put.io [:page_facing_up:](https://rclone.org/putio/) * QingStor [:page_facing_up:](https://rclone.org/qingstor/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) diff --git a/backend/all/all.go b/backend/all/all.go index 2a5951579..6ccf6163c 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -25,6 +25,7 @@ import ( _ "github.com/rclone/rclone/backend/opendrive" _ "github.com/rclone/rclone/backend/pcloud" _ "github.com/rclone/rclone/backend/premiumizeme" + _ "github.com/rclone/rclone/backend/putio" _ "github.com/rclone/rclone/backend/qingstor" _ "github.com/rclone/rclone/backend/s3" _ "github.com/rclone/rclone/backend/sftp" diff --git a/backend/putio/fs.go b/backend/putio/fs.go new file mode 100644 index 000000000..31698c611 --- /dev/null +++ b/backend/putio/fs.go @@ -0,0 +1,691 @@ +package putio + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/putdotio/go-putio/putio" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/readers" +) + +// Fs represents a remote Putio server +type Fs struct { + name string // name of this remote + root string // the path we are working on + features *fs.Features // optional features + client *putio.Client // client for making API calls to Put.io + pacer *fs.Pacer // To pace the API calls + dirCache *dircache.DirCache // Map of directory path to directory id + oAuthClient *http.Client +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("Putio root '%s'", f.root) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// shouldRetry returns a boolean as to whether this err deserves to be +// retried. It returns the err as a convenience +func shouldRetry(err error) (bool, error) { + if err == nil { + return false, nil + } + if fserrors.ShouldRetry(err) { + return true, err + } + if perr, ok := err.(*putio.ErrorResponse); ok { + if perr.Response.StatusCode == 429 || perr.Response.StatusCode >= 500 { + return true, err + } + } + return false, err +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(name, root string, m configmap.Mapper) (f fs.Fs, err error) { + // defer log.Trace(name, "root=%v", root)("f=%+v, err=%v", &f, &err) + oAuthClient, _, err := oauthutil.NewClient(name, m, putioConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to configure putio") + } + p := &Fs{ + name: name, + root: root, + pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + client: putio.NewClient(oAuthClient), + oAuthClient: oAuthClient, + } + p.features = (&fs.Features{ + DuplicateFiles: true, + ReadMimeType: true, + CanHaveEmptyDirectories: true, + }).Fill(p) + p.dirCache = dircache.New(root, "0", p) + ctx := context.Background() + // Find the current root + err = p.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *p + tempF.dirCache = dircache.New(newRoot, "0", &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return p, nil + } + _, err := tempF.NewObject(ctx, remote) + if err != nil { + // unable to list folder so return old f + return p, nil + } + // XXX: update the old f here instead of returning tempF, since + // `features` were already filled with functions having *f as a receiver. + // See https://github.com/rclone/rclone/issues/2182 + p.dirCache = tempF.dirCache + p.root = tempF.root + return p, fs.ErrorIsFile + } + // fs.Debugf(p, "Root id: %s", p.dirCache.RootID()) + return p, nil +} + +func itoa(i int64) string { + return strconv.FormatInt(i, 10) +} + +func atoi(a string) int64 { + i, err := strconv.ParseInt(a, 10, 64) + if err != nil { + panic(err) + } + return i +} + +// CreateDir makes a directory with pathID as parent and name leaf +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) { + // defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("newID=%v, err=%v", newID, &err) + parentID := atoi(pathID) + var entry putio.File + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "creating folder. part: %s, parentID: %d", leaf, parentID) + entry, err = f.client.Files.CreateFolder(ctx, leaf, parentID) + return shouldRetry(err) + }) + return itoa(entry.ID), err +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) { + // defer log.Trace(f, "pathID=%v, leaf=%v", pathID, leaf)("pathIDOut=%v, found=%v, err=%v", pathIDOut, found, &err) + if pathID == "0" && leaf == "" { + // that's the root directory + return pathID, true, nil + } + fileID := atoi(pathID) + var children []putio.File + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "listing file: %d", fileID) + children, _, err = f.client.Files.List(ctx, fileID) + return shouldRetry(err) + }) + if err != nil { + if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 { + err = nil + } + return + } + for _, child := range children { + if child.Name == leaf { + found = true + pathIDOut = itoa(child.ID) + if !child.IsDir() { + err = fs.ErrorNotAFile + } + return + } + } + return +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + return nil, err + } + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + parentID := atoi(directoryID) + var children []putio.File + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "listing files inside List: %d", parentID) + children, _, err = f.client.Files.List(ctx, parentID) + return shouldRetry(err) + }) + if err != nil { + return + } + for _, child := range children { + remote := path.Join(dir, child.Name) + // fs.Debugf(f, "child: %s", remote) + if child.IsDir() { + f.dirCache.Put(remote, itoa(child.ID)) + d := fs.NewDir(remote, child.UpdatedAt.Time) + entries = append(entries, d) + } else { + o, err := f.newObjectWithInfo(ctx, remote, child) + if err != nil { + return nil, err + } + entries = append(entries, o) + } + } + return +} + +// Put the object +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { + // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) + exisitingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return exisitingObj, exisitingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // Not found so create it + return f.PutUnchecked(ctx, in, src, options...) + default: + return nil, err + } +} + +// PutUnchecked uploads the object +// +// This will create a duplicate if we upload a new file without +// checking to see if there is one already - use Put() for that. +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (o fs.Object, err error) { + // defer log.Trace(f, "src=%+v", src)("o=%+v, err=%v", &o, &err) + size := src.Size() + remote := src.Remote() + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return nil, err + } + loc, err := f.createUpload(ctx, leaf, size, directoryID, src.ModTime(ctx)) + if err != nil { + return nil, err + } + fileID, err := f.sendUpload(ctx, loc, size, in) + if err != nil { + return nil, err + } + var entry putio.File + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "getting file: %d", fileID) + entry, err = f.client.Files.Get(ctx, fileID) + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + return f.newObjectWithInfo(ctx, remote, entry) +} + +func (f *Fs) createUpload(ctx context.Context, name string, size int64, parentID string, modTime time.Time) (location string, err error) { + // defer log.Trace(f, "name=%v, size=%v, parentID=%v, modTime=%v", name, size, parentID, modTime.String())("location=%v, err=%v", location, &err) + err = f.pacer.Call(func() (bool, error) { + req, err := http.NewRequest("POST", "https://upload.put.io/files/", nil) + if err != nil { + return false, err + } + req.Header.Set("tus-resumable", "1.0.0") + req.Header.Set("upload-length", strconv.FormatInt(size, 10)) + b64name := base64.StdEncoding.EncodeToString([]byte(name)) + b64true := base64.StdEncoding.EncodeToString([]byte("true")) + b64parentID := base64.StdEncoding.EncodeToString([]byte(parentID)) + b64modifiedAt := base64.StdEncoding.EncodeToString([]byte(modTime.Format(time.RFC3339))) + req.Header.Set("upload-metadata", fmt.Sprintf("name %s,no-torrent %s,parent_id %s,updated-at %s", b64name, b64true, b64parentID, b64modifiedAt)) + resp, err := f.oAuthClient.Do(req) + retry, err := shouldRetry(err) + if retry { + return true, err + } + if err != nil { + return false, err + } + if resp.StatusCode != 201 { + return false, fmt.Errorf("unexpected status code from upload create: %d", resp.StatusCode) + } + location = resp.Header.Get("location") + if location == "" { + return false, errors.New("empty location header from upload create") + } + return false, nil + }) + return +} + +func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.Reader) (fileID int64, err error) { + // defer log.Trace(f, "location=%v, size=%v", location, size)("fileID=%v, err=%v", fileID, &err) + if size == 0 { + err = f.pacer.Call(func() (bool, error) { + fs.Debugf(f, "Sending zero length chunk") + fileID, err = f.transferChunk(ctx, location, 0, bytes.NewReader([]byte{}), 0) + return shouldRetry(err) + }) + return + } + var start int64 + buf := make([]byte, defaultChunkSize) + for start < size { + reqSize := size - start + if reqSize >= int64(defaultChunkSize) { + reqSize = int64(defaultChunkSize) + } + chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, reqSize) + + // Transfer the chunk + err = f.pacer.Call(func() (bool, error) { + fs.Debugf(f, "Sending chunk. start: %d length: %d", start, reqSize) + // TODO get file offset and seek to the position + fileID, err = f.transferChunk(ctx, location, start, chunk, reqSize) + return shouldRetry(err) + }) + if err != nil { + return + } + + start += reqSize + } + return +} + +func (f *Fs) transferChunk(ctx context.Context, location string, start int64, chunk io.ReadSeeker, chunkSize int64) (fileID int64, err error) { + // defer log.Trace(f, "location=%v, start=%v, chunkSize=%v", location, start, chunkSize)("fileID=%v, err=%v", fileID, &err) + _, _ = chunk.Seek(0, io.SeekStart) + req, err := f.makeUploadPatchRequest(location, chunk, start, chunkSize) + if err != nil { + return 0, err + } + req = req.WithContext(ctx) + res, err := f.oAuthClient.Do(req) + if err != nil { + return 0, err + } + defer func() { + _ = res.Body.Close() + }() + if res.StatusCode != 204 { + return 0, fmt.Errorf("unexpected status code while transferring chunk: %d", res.StatusCode) + } + sfid := res.Header.Get("putio-file-id") + if sfid != "" { + fileID, err = strconv.ParseInt(sfid, 10, 64) + if err != nil { + return 0, err + } + } + return fileID, nil +} + +func (f *Fs) makeUploadPatchRequest(location string, in io.Reader, offset, length int64) (*http.Request, error) { + req, err := http.NewRequest("PATCH", location, in) + if err != nil { + return nil, err + } + req.Header.Set("tus-resumable", "1.0.0") + req.Header.Set("upload-offset", strconv.FormatInt(offset, 10)) + req.Header.Set("content-length", strconv.FormatInt(length, 10)) + req.Header.Set("content-type", "application/offset+octet-stream") + return req, nil +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) + err = f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + if dir != "" { + _, err = f.dirCache.FindDir(ctx, dir, true) + } + return err +} + +// Rmdir deletes the container +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { + // defer log.Trace(f, "dir=%v", dir)("err=%v", &err) + + root := strings.Trim(path.Join(f.root, dir), "/") + + // can't remove root + if root == "" { + return errors.New("can't remove root directory") + } + + // check directory exists + directoryID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return errors.Wrap(err, "Rmdir") + } + dirID := atoi(directoryID) + + // check directory empty + var children []putio.File + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "listing files: %d", dirID) + children, _, err = f.client.Files.List(ctx, dirID) + return shouldRetry(err) + }) + if err != nil { + return errors.Wrap(err, "Rmdir") + } + if len(children) != 0 { + return errors.New("directory not empty") + } + + // remove it + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "deleting file: %d", dirID) + err = f.client.Files.Delete(ctx, dirID) + return shouldRetry(err) + }) + f.dirCache.FlushDir(dir) + return err +} + +// Precision returns the precision +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Purge deletes all the files and the container +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context) (err error) { + // defer log.Trace(f, "")("err=%v", &err) + + if f.root == "" { + return errors.New("can't purge root directory") + } + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + return err + } + + rootID := atoi(f.dirCache.RootID()) + // Let putio delete the filesystem tree + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "deleting file: %d", rootID) + err = f.client.Files.Delete(ctx, rootID) + return shouldRetry(err) + }) + f.dirCache.ResetRoot() + return err +} + +// Copy src to this remote using server side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) { + // defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err) + srcObj, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantCopy + } + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return nil, err + } + err = f.pacer.Call(func() (bool, error) { + params := url.Values{} + params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10)) + params.Set("parent_id", directoryID) + params.Set("name", leaf) + req, err := f.client.NewRequest(ctx, "POST", "/v2/files/copy", strings.NewReader(params.Encode())) + if err != nil { + return false, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + // fs.Debugf(f, "copying file (%d) to parent_id: %s", srcObj.file.ID, directoryID) + _, err = f.client.Do(req, nil) + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + return f.NewObject(ctx, remote) +} + +// Move src to this remote using server side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (o fs.Object, err error) { + // defer log.Trace(f, "src=%+v, remote=%v", src, remote)("o=%+v, err=%v", &o, &err) + srcObj, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantMove + } + leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true) + if err != nil { + return nil, err + } + err = f.pacer.Call(func() (bool, error) { + params := url.Values{} + params.Set("file_id", strconv.FormatInt(srcObj.file.ID, 10)) + params.Set("parent_id", directoryID) + params.Set("name", leaf) + req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode())) + if err != nil { + return false, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + // fs.Debugf(f, "moving file (%d) to parent_id: %s", srcObj.file.ID, directoryID) + _, err = f.client.Do(req, nil) + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + return f.NewObject(ctx, remote) +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) { + // defer log.Trace(f, "src=%+v, srcRemote=%v, dstRemote", src, srcRemote, dstRemote)("err=%v", &err) + srcFs, ok := src.(*Fs) + if !ok { + return fs.ErrorCantDirMove + } + srcPath := path.Join(srcFs.root, srcRemote) + dstPath := path.Join(f.root, dstRemote) + + // Refuse to move to or from the root + if srcPath == "" || dstPath == "" { + return errors.New("can't move root directory") + } + + // find the root src directory + err = srcFs.dirCache.FindRoot(ctx, false) + if err != nil { + return err + } + + // find the root dst directory + if dstRemote != "" { + err = f.dirCache.FindRoot(ctx, true) + if err != nil { + return err + } + } else { + if f.dirCache.FoundRoot() { + return fs.ErrorDirExists + } + } + + // Find ID of dst parent, creating subdirs if necessary + var leaf, dstDirectoryID string + findPath := dstRemote + if dstRemote == "" { + findPath = f.root + } + leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true) + if err != nil { + return err + } + + // Check destination does not exist + if dstRemote != "" { + _, err = f.dirCache.FindDir(ctx, dstRemote, false) + if err == fs.ErrorDirNotFound { + // OK + } else if err != nil { + return err + } else { + return fs.ErrorDirExists + } + } + + // Find ID of src + srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false) + if err != nil { + return err + } + + err = f.pacer.Call(func() (bool, error) { + params := url.Values{} + params.Set("file_id", srcID) + params.Set("parent_id", dstDirectoryID) + params.Set("name", leaf) + req, err := f.client.NewRequest(ctx, "POST", "/v2/files/move", strings.NewReader(params.Encode())) + if err != nil { + return false, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + // fs.Debugf(f, "moving file (%s) to parent_id: %s", srcID, dstDirectoryID) + _, err = f.client.Do(req, nil) + return shouldRetry(err) + }) + srcFs.dirCache.FlushDir(srcRemote) + return err +} + +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + // defer log.Trace(f, "")("usage=%+v, err=%v", usage, &err) + var ai putio.AccountInfo + err = f.pacer.Call(func() (bool, error) { + // fs.Debugf(f, "getting account info") + ai, err = f.client.Account.Info(ctx) + return shouldRetry(err) + }) + if err != nil { + return nil, errors.Wrap(err, "about failed") + } + return &fs.Usage{ + Total: fs.NewUsageValue(ai.Disk.Size), // quota of bytes that can be used + Used: fs.NewUsageValue(ai.Disk.Used), // bytes in use + Free: fs.NewUsageValue(ai.Disk.Avail), // bytes which can be uploaded before reaching the quota + }, nil +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.CRC32) +} + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + // defer log.Trace(f, "")("") + f.dirCache.ResetRoot() +} + +// CleanUp the trash in the Fs +func (f *Fs) CleanUp(ctx context.Context) (err error) { + // defer log.Trace(f, "")("err=%v", &err) + return f.pacer.Call(func() (bool, error) { + req, err := f.client.NewRequest(ctx, "POST", "/v2/trash/empty", nil) + if err != nil { + return false, err + } + // fs.Debugf(f, "emptying trash") + _, err = f.client.Do(req, nil) + return shouldRetry(err) + }) +} diff --git a/backend/putio/object.go b/backend/putio/object.go new file mode 100644 index 000000000..2ffe24b37 --- /dev/null +++ b/backend/putio/object.go @@ -0,0 +1,276 @@ +package putio + +import ( + "context" + "io" + "net/http" + "net/url" + "path" + "strconv" + "time" + + "github.com/pkg/errors" + "github.com/putdotio/go-putio/putio" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" +) + +// Object describes a Putio object +// +// Putio Objects always have full metadata +type Object struct { + fs *Fs // what this object is part of + file *putio.File + remote string // The remote path + modtime time.Time +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) { + // defer log.Trace(f, "remote=%v", remote)("o=%+v, err=%v", &o, &err) + obj := &Object{ + fs: f, + remote: remote, + } + err = obj.readEntryAndSetMetadata(ctx) + if err != nil { + return nil, err + } + return obj, err +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info putio.File) (o fs.Object, err error) { + // defer log.Trace(f, "remote=%v, info=+v", remote, &info)("o=%+v, err=%v", &o, &err) + obj := &Object{ + fs: f, + remote: remote, + } + err = obj.setMetadataFromEntry(info) + if err != nil { + return nil, err + } + return obj, err +} + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the dropbox special hash +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.CRC32 { + return "", hash.ErrUnsupported + } + err := o.readEntryAndSetMetadata(ctx) + if err != nil { + return "", errors.Wrap(err, "failed to read hash from metadata") + } + return o.file.CRC32, nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + if o.file == nil { + return 0 + } + return o.file.Size +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + if o.file == nil { + return "" + } + return itoa(o.file.ID) +} + +// MimeType returns the content type of the Object if +// known, or "" if not +func (o *Object) MimeType(ctx context.Context) string { + err := o.readEntryAndSetMetadata(ctx) + if err != nil { + return "" + } + return o.file.ContentType +} + +// setMetadataFromEntry sets the fs data from a putio.File +// +// This isn't a complete set of metadata and has an inacurate date +func (o *Object) setMetadataFromEntry(info putio.File) error { + o.file = &info + o.modtime = info.UpdatedAt.Time + return nil +} + +// Reads the entry for a file from putio +func (o *Object) readEntry(ctx context.Context) (f *putio.File, err error) { + // defer log.Trace(o, "")("f=%+v, err=%v", f, &err) + leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false) + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + var resp struct { + File putio.File `json:"file"` + } + err = o.fs.pacer.Call(func() (bool, error) { + // fs.Debugf(o, "requesting child. directoryID: %s, name: %s", directoryID, leaf) + req, err := o.fs.client.NewRequest(ctx, "GET", "/v2/files/"+directoryID+"/child?name="+url.PathEscape(leaf), nil) + if err != nil { + return false, err + } + _, err = o.fs.client.Do(req, &resp) + if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode == 404 { + return false, fs.ErrorObjectNotFound + } + return shouldRetry(err) + }) + return &resp.File, err +} + +// Read entry if not set and set metadata from it +func (o *Object) readEntryAndSetMetadata(ctx context.Context) error { + if o.file != nil { + return nil + } + entry, err := o.readEntry(ctx) + if err != nil { + return err + } + return o.setMetadataFromEntry(*entry) +} + +// Returns the remote path for the object +func (o *Object) remotePath() string { + return path.Join(o.fs.root, o.remote) +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + if o.modtime.IsZero() { + err := o.readEntryAndSetMetadata(ctx) + if err != nil { + fs.Debugf(o, "Failed to read metadata: %v", err) + return time.Now() + } + } + return o.modtime +} + +// SetModTime sets the modification time of the local fs object +// +// Commits the datastore +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { + // defer log.Trace(o, "modTime=%v", modTime.String())("err=%v", &err) + req, err := o.fs.client.NewRequest(ctx, "POST", "/v2/files/touch?file_id="+strconv.FormatInt(o.file.ID, 10)+"&updated_at="+url.QueryEscape(modTime.Format(time.RFC3339)), nil) + if err != nil { + return err + } + // fs.Debugf(o, "setting modtime: %s", modTime.String()) + _, err = o.fs.client.Do(req, nil) + if err != nil { + return err + } + o.modtime = modTime + if o.file != nil { + o.file.UpdatedAt.Time = modTime + } + return nil +} + +// Storable returns whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + // defer log.Trace(o, "")("err=%v", &err) + var storageURL string + err = o.fs.pacer.Call(func() (bool, error) { + storageURL, err = o.fs.client.Files.URL(ctx, o.file.ID, true) + return shouldRetry(err) + }) + if err != nil { + return + } + + var resp *http.Response + headers := fs.OpenOptionHeaders(options) + err = o.fs.pacer.Call(func() (bool, error) { + req, _ := http.NewRequest(http.MethodGet, storageURL, nil) + req.Header.Set("User-Agent", o.fs.client.UserAgent) + + // merge headers with extra headers + for header, value := range headers { + req.Header.Set(header, value) + } + // fs.Debugf(o, "opening file: id=%d", o.file.ID) + resp, err = http.DefaultClient.Do(req) + return shouldRetry(err) + }) + if perr, ok := err.(*putio.ErrorResponse); ok && perr.Response.StatusCode >= 400 && perr.Response.StatusCode <= 499 { + _ = resp.Body.Close() + return nil, fserrors.NoRetryError(err) + } + return resp.Body, err +} + +// Update the already existing object +// +// Copy the reader into the object updating modTime and size +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + // defer log.Trace(o, "src=%+v", src)("err=%v", &err) + remote := o.remotePath() + if ignoredFiles.MatchString(remote) { + fs.Logf(o, "File name disallowed - not uploading") + return nil + } + err = o.Remove(ctx) + if err != nil { + return err + } + newObj, err := o.fs.PutUnchecked(ctx, in, src, options...) + if err != nil { + return err + } + *o = *(newObj.(*Object)) + return err +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) (err error) { + // defer log.Trace(o, "")("err=%v", &err) + return o.fs.pacer.Call(func() (bool, error) { + // fs.Debugf(o, "removing file: id=%d", o.file.ID) + err = o.fs.client.Files.Delete(ctx, o.file.ID) + return shouldRetry(err) + }) +} diff --git a/backend/putio/putio.go b/backend/putio/putio.go new file mode 100644 index 000000000..e56bae2bb --- /dev/null +++ b/backend/putio/putio.go @@ -0,0 +1,72 @@ +package putio + +import ( + "log" + "regexp" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/oauthutil" + "golang.org/x/oauth2" +) + +// Constants +const ( + rcloneClientID = "4131" + rcloneObscuredClientSecret = "cMwrjWVmrHZp3gf1ZpCrlyGAmPpB-YY5BbVnO1fj-G9evcd8" + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 // bigger for slower decay, exponential + defaultChunkSize = 48 * fs.MebiByte +) + +var ( + // Description of how to auth for this app + putioConfig = &oauth2.Config{ + Scopes: []string{}, + Endpoint: oauth2.Endpoint{ + AuthURL: "https://api.put.io/v2/oauth2/authenticate", + TokenURL: "https://api.put.io/v2/oauth2/access_token", + }, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneObscuredClientSecret), + RedirectURL: oauthutil.RedirectLocalhostURL, + } + // A regexp matching path names for ignoring unnecessary files + ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r)$`) +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "putio", + Description: "Put.io", + NewFs: NewFs, + Config: func(name string, m configmap.Mapper) { + err := oauthutil.ConfigNoOffline("putio", name, m, putioConfig) + if err != nil { + log.Fatalf("Failed to configure token: %v", err) + } + }, + }) +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.PutUncheckeder = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ dircache.DirCacher = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.CleanUpper = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Object = (*Object)(nil) + _ fs.MimeTyper = (*Object)(nil) + _ fs.IDer = (*Object)(nil) +) diff --git a/backend/putio/putio_test.go b/backend/putio/putio_test.go new file mode 100644 index 000000000..5cad6dac7 --- /dev/null +++ b/backend/putio/putio_test.go @@ -0,0 +1,16 @@ +// Test Put.io filesystem interface +package putio + +import ( + "testing" + + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestPutio:", + NilObject: (*Object)(nil), + }) +} diff --git a/bin/make_manual.py b/bin/make_manual.py index e3f0368fb..a8d5df94e 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -48,6 +48,7 @@ docs = [ "swift.md", "pcloud.md", "premiumize.md", + "putio.md", "sftp.md", "union.md", "webdav.md", diff --git a/docs/content/about.md b/docs/content/about.md index 54376bc67..bc6811205 100644 --- a/docs/content/about.md +++ b/docs/content/about.md @@ -46,7 +46,7 @@ Rclone is a command line program to sync files and directories to and from: * {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} * {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} * {{< provider name="premiumize.me" home="https://premiumize.me/" config="/premiumizeme/" >}} -* {{< provider name="put.io" home="https://put.io/" config="/webdav/#put-io" >}} +* {{< provider name="put.io" home="https://put.io/" config="/putio/" >}} * {{< provider name="QingStor" home="https://www.qingcloud.com/products/storage" config="/qingstor/" >}} * {{< provider name="Rackspace Cloud Files" home="https://www.rackspace.com/cloud/files" config="/swift/" >}} * {{< provider name="rsync.net" home="https://rsync.net/products/rclone.html" config="/sftp/#rsync-net" >}} diff --git a/docs/content/docs.md b/docs/content/docs.md index 0887b0530..29620dce0 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -44,6 +44,7 @@ See the following for detailed instructions for * [OpenDrive](/opendrive/) * [Pcloud](/pcloud/) * [premiumize.me](/premiumizeme/) + * [put.io](/putio/) * [QingStor](/qingstor/) * [SFTP](/sftp/) * [Union](/union/) diff --git a/docs/content/overview.md b/docs/content/overview.md index 076ed2679..69eff7d91 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -38,6 +38,7 @@ Here is an overview of the major features of each cloud storage system. | Openstack Swift | MD5 | Yes | No | No | R/W | | pCloud | MD5, SHA1 | Yes | No | No | W | | premiumize.me | - | No | Yes | No | R | +| put.io | CRC-32 | Yes | No | Yes | R | | QingStor | MD5 | No | No | No | R/W | | SFTP | MD5, SHA1 ‡ | Yes | Depends | No | - | | WebDAV | MD5, SHA1 ††| Yes ††† | Depends | No | - | @@ -154,6 +155,7 @@ operations more efficient. | Openstack Swift | Yes † | Yes | No | No | No | Yes | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | No | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | premiumize.me | Yes | No | Yes | Yes | No | No | No | Yes | Yes | Yes | +| put.io | Yes | No | Yes | Yes | Yes | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | QingStor | No | Yes | No | No | No | Yes | No | No [#2178](https://github.com/rclone/rclone/issues/2178) | No | No | | SFTP | No | No | Yes | Yes | No | No | Yes | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | | WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ‡ | No [#2178](https://github.com/rclone/rclone/issues/2178) | Yes | Yes | diff --git a/docs/content/putio.md b/docs/content/putio.md new file mode 100644 index 000000000..c14ea61c5 --- /dev/null +++ b/docs/content/putio.md @@ -0,0 +1,97 @@ +--- +title: "put.io" +description: "Rclone docs for put.io" +date: "2019-08-08" +--- + + put.io +--------------------------------- + +Paths are specified as `remote:path` + +put.io paths may be as deep as required, eg +`remote:directory/subdirectory`. + +The initial setup for put.io involves getting a token from put.io +which you need to do in your browser. `rclone config` walks you +through it. + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found - make a new one +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> putio +Type of storage to configure. +Enter a string value. Press Enter for the default (""). +Choose a number from below, or type in your own value +[snip] +25 / Put.io + \ "putio" +[snip] +Storage> putio +** See help for putio backend at: https://rclone.org/putio/ ** + +Remote config +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine +y) Yes +n) No +y/n> y +If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth +Log in and authorize rclone for access +Waiting for code... +Got code +-------------------- +[putio] +type = putio +token = {"access_token":"XXXXXXXX","expiry":"0001-01-01T00:00:00Z"} +-------------------- +y) Yes this is OK +e) Edit this remote +d) Delete this remote +y/e/d> y +Current remotes: + +Name Type +==== ==== +putio putio + +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> q +``` + +Note that rclone runs a webserver on your local machine to collect the +token as returned from Google if you use auto config mode. This only +runs from the moment it opens your browser to the moment you get back +the verification code. This is on `http://127.0.0.1:53682/` and this +it may require you to unblock it temporarily if you are running a host +firewall, or use manual mode. + +You can then use it like this, + +List directories in top level of your put.io + + rclone lsd remote: + +List all the files in your put.io + + rclone ls remote: + +To copy a local directory to a put.io directory called backup + + rclone copy /home/source remote:backup diff --git a/docs/content/webdav.md b/docs/content/webdav.md index f2d1f6df2..514cdd74d 100644 --- a/docs/content/webdav.md +++ b/docs/content/webdav.md @@ -189,31 +189,6 @@ Owncloud does. This [may be fixed](https://github.com/nextcloud/nextcloud-snap/issues/365) in the future. -### Put.io ### - -put.io can be accessed in a read only way using webdav. - -Configure the `url` as `https://webdav.put.io` and use your normal -account username and password for `user` and `pass`. Set the `vendor` -to `other`. - -Your config file should end up looking like this: - -``` -[putio] -type = webdav -url = https://webdav.put.io -vendor = other -user = YourUserName -pass = encryptedpassword -``` - -If you are using `put.io` with `rclone mount` then use the -`--read-only` flag to signal to the OS that it can't write to the -mount. - -For more help see [the put.io webdav docs](http://help.put.io/apps-and-integrations/ftp-and-webdav). - ### Sharepoint ### Rclone can be used with Sharepoint provided by OneDrive for Business diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 63369f471..2af58c29e 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -80,6 +80,7 @@
  • Openstack Swift
  • pCloud
  • premiumize.me
  • +
  • put.io
  • SFTP
  • Union (merge backends)
  • WebDAV
  • diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index 890a776c5..f5c653670 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -125,3 +125,7 @@ backends: - backend: "premiumizeme" remote: "TestPremiumizeMe:" fastlist: false + - backend: "putio" + remote: "TestPutio:" + subdir: false + fastlist: false