From d4df3f2154f6de90f9501147f7bdba049a4b2935 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Sat, 30 Jan 2016 18:08:44 +0000 Subject: [PATCH] acd: Download files >= 9GB with their tempLink direct from s3 This files the problem downloading files > 10GB. Fixes #204 Fixes #313 --- amazonclouddrive/amazonclouddrive.go | 34 ++++++++++++++++++++-------- docs/content/amazonclouddrive.md | 16 +++++++++++++ 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/amazonclouddrive/amazonclouddrive.go b/amazonclouddrive/amazonclouddrive.go index 934098502..dc8dfb0a7 100644 --- a/amazonclouddrive/amazonclouddrive.go +++ b/amazonclouddrive/amazonclouddrive.go @@ -27,6 +27,7 @@ import ( "github.com/ncw/rclone/fs" "github.com/ncw/rclone/oauthutil" "github.com/ncw/rclone/pacer" + "github.com/spf13/pflag" "golang.org/x/oauth2" ) @@ -44,6 +45,8 @@ const ( // Globals var ( + // Flags + tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink // Description of how to auth for this app acdConfig = &oauth2.Config{ Scopes: []string{"clouddrive:read_all", "clouddrive:write"}, @@ -76,15 +79,17 @@ func init() { Help: "Amazon Application Client Secret - leave blank normally.", }}, }) + pflag.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.") } // Fs represents a remote acd server type Fs struct { - name string // name of this remote - c *acd.Client // the connection to the acd server - root string // the path we are working on - dirCache *dircache.DirCache // Map of directory path to directory id - pacer *pacer.Pacer // pacer for API calls + name string // name of this remote + c *acd.Client // the connection to the acd server + noAuthClient *http.Client // unauthenticated http client + root string // the path we are working on + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *pacer.Pacer // pacer for API calls } // Object describes a acd object @@ -146,10 +151,11 @@ func NewFs(name, root string) (fs.Fs, error) { c := acd.NewClient(oAuthClient) c.UserAgent = fs.UserAgent f := &Fs{ - name: name, - root: root, - c: c, - pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), + name: name, + root: root, + c: c, + pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer), + noAuthClient: fs.Config.Client(), } // Update endpoints @@ -741,10 +747,18 @@ func (o *Object) Storable() bool { // Open an object for read func (o *Object) Open() (in io.ReadCloser, err error) { + bigObject := o.Size() >= int64(tempLinkThreshold) + if bigObject { + fs.Debug(o, "Dowloading large object via tempLink") + } file := acd.File{Node: o.info} var resp *http.Response err = o.fs.pacer.Call(func() (bool, error) { - in, resp, err = file.Open() + if !bigObject { + in, resp, err = file.Open() + } else { + in, resp, err = file.OpenTempURL(o.fs.noAuthClient) + } return shouldRetry(resp, err) }) return in, err diff --git a/docs/content/amazonclouddrive.md b/docs/content/amazonclouddrive.md index ba74d4a99..10b2a00e0 100644 --- a/docs/content/amazonclouddrive.md +++ b/docs/content/amazonclouddrive.md @@ -96,6 +96,22 @@ don't provide an API to permanently delete files, nor to empty the trash, so you will have to do that with one of Amazon's apps or via the Amazon cloud drive website. +### Specific options ### + +Here are the command line options specific to this cloud storage +system. + +#### --acd-templink-threshold=SIZE #### + +Files this size or more will be downloaded via their `tempLink`. This +is to work around a problem with Amazon Cloud Drive which blocks +downloads of files bigger than about 10GB. The default for this is +9GB which shouldn't need to be changed. + +To download files above this threshold, rclone requests a `tempLink` +which downloads the file through a temporary URL directly from the +underlying S3 storage. + ### Limitations ### Note that Amazon cloud drive is case insensitive so you can't have a