2015-09-22 19:47:16 +02:00
// Package amazonclouddrive provides an interface to the Amazon Cloud
// Drive object storage system.
2015-09-03 00:37:42 +02:00
package amazonclouddrive
/ *
FIXME make searching for directory in id and file in id more efficient
- use the name : search parameter - remember the escaping rules
- use Folder GetNode and GetFile
FIXME make the default for no files and no dirs be ( FILE & FOLDER ) so
we ignore assets completely !
* /
import (
"fmt"
"io"
"log"
2015-09-10 00:23:37 +02:00
"net/http"
2015-09-03 00:37:42 +02:00
"regexp"
"strings"
2016-08-08 20:05:06 +02:00
"sync/atomic"
2015-09-03 00:37:42 +02:00
"time"
"github.com/ncw/go-acd"
"github.com/ncw/rclone/dircache"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/oauthutil"
2015-09-11 20:18:41 +02:00
"github.com/ncw/rclone/pacer"
2016-10-26 19:42:41 +02:00
"github.com/ncw/rclone/rest"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2016-01-30 19:08:44 +01:00
"github.com/spf13/pflag"
2015-09-03 00:37:42 +02:00
"golang.org/x/oauth2"
)
const (
2016-02-28 20:57:19 +01:00
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
2016-08-14 13:04:43 +02:00
rcloneEncryptedClientSecret = "ZP12wYlGw198FtmqfOxyNAGXU3fwVcQdmt--ba1d00wJnUs0LOzvVyXVDbqhbcUqnr5Vd1QejwWmiv1Ep7UJG1kUQeuBP5n9goXWd5MrAf0"
2016-02-28 20:57:19 +01:00
folderKind = "FOLDER"
fileKind = "FILE"
assetKind = "ASSET"
statusAvailable = "AVAILABLE"
timeFormat = time . RFC3339 // 2014-03-07T22:31:12.173Z
minSleep = 20 * time . Millisecond
warnFileSize = 50 << 30 // Display warning for files larger than this size
2015-09-03 00:37:42 +02:00
)
// Globals
var (
2016-01-30 19:08:44 +01:00
// Flags
tempLinkThreshold = fs . SizeSuffix ( 9 << 30 ) // Download files bigger than this via the tempLink
2016-10-17 17:18:56 +02:00
uploadWaitLimit = pflag . DurationP ( "acd-upload-wait-limit" , "" , 60 * time . Second , "Don't wait for completed uploads to appear if they took less than this time." )
2016-08-06 00:42:49 +02:00
uploadWaitTime = pflag . DurationP ( "acd-upload-wait-time" , "" , 2 * 60 * time . Second , "Time to wait after a failed complete upload to see if it appears." )
2016-10-17 17:18:56 +02:00
uploadWaitPerGB = pflag . DurationP ( "acd-upload-wait-per-gb" , "" , 30 * time . Second , "Additional time per GB to wait after a failed complete upload to see if it appears." )
2015-09-03 00:37:42 +02:00
// Description of how to auth for this app
acdConfig = & oauth2 . Config {
Scopes : [ ] string { "clouddrive:read_all" , "clouddrive:write" } ,
Endpoint : oauth2 . Endpoint {
AuthURL : "https://www.amazon.com/ap/oa" ,
TokenURL : "https://api.amazon.com/auth/o2/token" ,
} ,
ClientID : rcloneClientID ,
2016-08-14 13:04:43 +02:00
ClientSecret : fs . MustReveal ( rcloneEncryptedClientSecret ) ,
2015-09-12 15:17:39 +02:00
RedirectURL : oauthutil . RedirectURL ,
2015-09-03 00:37:42 +02:00
}
)
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "amazon cloud drive" ,
2016-07-11 13:42:44 +02:00
Description : "Amazon Drive" ,
2016-02-15 19:11:53 +01:00
NewFs : NewFs ,
2015-09-03 00:37:42 +02:00
Config : func ( name string ) {
2016-01-04 16:13:36 +01:00
err := oauthutil . Config ( "amazon cloud drive" , name , acdConfig )
2015-09-03 00:37:42 +02:00
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
}
} ,
Options : [ ] fs . Option { {
2016-01-07 16:20:32 +01:00
Name : fs . ConfigClientID ,
2015-10-03 15:23:12 +02:00
Help : "Amazon Application Client Id - leave blank normally." ,
2015-09-03 00:37:42 +02:00
} , {
2016-01-07 16:20:32 +01:00
Name : fs . ConfigClientSecret ,
2015-10-03 15:23:12 +02:00
Help : "Amazon Application Client Secret - leave blank normally." ,
2015-09-03 00:37:42 +02:00
} } ,
} )
2016-01-30 19:08:44 +01:00
pflag . VarP ( & tempLinkThreshold , "acd-templink-threshold" , "" , "Files >= this size will be downloaded via their tempLink." )
2015-09-03 00:37:42 +02:00
}
2015-11-07 12:14:46 +01:00
// Fs represents a remote acd server
type Fs struct {
2016-05-23 19:03:22 +02:00
name string // name of this remote
c * acd . Client // the connection to the acd server
noAuthClient * http . Client // unauthenticated http client
root string // the path we are working on
dirCache * dircache . DirCache // Map of directory path to directory id
pacer * pacer . Pacer // pacer for API calls
ts * oauthutil . TokenSource // token source for oauth
2016-08-08 20:05:06 +02:00
uploads int32 // number of uploads in progress - atomic access required
2015-09-03 00:37:42 +02:00
}
2015-11-07 12:14:46 +01:00
// Object describes a acd object
2015-09-03 00:37:42 +02:00
//
// Will definitely have info but maybe not meta
2015-11-07 12:14:46 +01:00
type Object struct {
fs * Fs // what this object is part of
2015-09-03 00:37:42 +02:00
remote string // The remote path
info * acd . Node // Info from the acd object if known
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-09-03 00:37:42 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-03 00:37:42 +02:00
return f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2016-07-11 13:42:44 +02:00
return fmt . Sprintf ( "amazon drive root '%s'" , f . root )
2015-09-03 00:37:42 +02:00
}
// Pattern to match a acd path
var matcher = regexp . MustCompile ( ` ^([^/]*)(.*)$ ` )
// parsePath parses an acd 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
return
}
2015-09-14 22:00:44 +02:00
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = [ ] int {
2016-02-16 15:45:22 +01:00
400 , // Bad request (seen in "Next token is expired")
401 , // Unauthorized (seen in "Token has expired")
408 , // Request Timeout
2015-09-14 22:00:44 +02:00
429 , // Rate exceeded.
500 , // Get occasional 500 Internal Server Error
2015-09-17 19:12:37 +02:00
503 , // Service Unavailable
2016-02-16 15:45:22 +01:00
504 , // Gateway Time-out
2015-09-14 22:00:44 +02:00
}
2015-09-11 20:18:41 +02:00
// shouldRetry returns a boolean as to whether this resp and err
// deserve to be retried. It returns the err as a convenience
2016-05-23 19:03:22 +02:00
func ( f * Fs ) shouldRetry ( resp * http . Response , err error ) ( bool , error ) {
2016-05-28 16:45:39 +02:00
if resp != nil {
if resp . StatusCode == 401 {
f . ts . Invalidate ( )
fs . Log ( f , "401 error received - invalidating token" )
return true , err
}
// Work around receiving this error sporadically on authentication
//
// HTTP code 403: "403 Forbidden", reponse body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
if resp . StatusCode == 403 && strings . Contains ( err . Error ( ) , "Authorization header requires" ) {
fs . Log ( f , "403 \"Authorization header requires...\" error received - retry" )
return true , err
}
2016-05-23 19:03:22 +02:00
}
2015-10-14 18:37:53 +02:00
return fs . ShouldRetry ( err ) || fs . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
2015-09-11 20:18:41 +02:00
}
2015-11-07 12:14:46 +01:00
// NewFs constructs an Fs from the path, container:path
2015-09-03 00:37:42 +02:00
func NewFs ( name , root string ) ( fs . Fs , error ) {
root = parsePath ( root )
2016-05-23 19:03:22 +02:00
oAuthClient , ts , err := oauthutil . NewClient ( name , acdConfig )
2015-09-03 00:37:42 +02:00
if err != nil {
2016-07-11 13:42:44 +02:00
log . Fatalf ( "Failed to configure Amazon Drive: %v" , err )
2015-09-03 00:37:42 +02:00
}
c := acd . NewClient ( oAuthClient )
2015-11-07 12:14:46 +01:00
f := & Fs {
2016-01-30 19:08:44 +01:00
name : name ,
root : root ,
c : c ,
pacer : pacer . New ( ) . SetMinSleep ( minSleep ) . SetPacer ( pacer . AmazonCloudDrivePacer ) ,
noAuthClient : fs . Config . Client ( ) ,
2016-05-23 19:03:22 +02:00
ts : ts ,
2015-09-03 00:37:42 +02:00
}
// Update endpoints
2015-09-11 20:18:41 +02:00
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
_ , resp , err = f . c . Account . GetEndpoints ( )
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "failed to get endpoints" )
2015-09-03 00:37:42 +02:00
}
// Get rootID
2016-08-08 20:05:06 +02:00
rootInfo , err := f . getRootInfo ( )
2015-09-03 00:37:42 +02:00
if err != nil || rootInfo . Id == nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "failed to get root" )
2015-09-03 00:37:42 +02:00
}
2016-08-08 20:05:06 +02:00
// Renew the token in the background
go f . renewToken ( )
2015-09-03 00:37:42 +02:00
f . dirCache = dircache . New ( root , * rootInfo . Id , f )
// Find the current root
err = f . dirCache . FindRoot ( false )
if err != nil {
// Assume it is a file
newRoot , remote := dircache . SplitPath ( root )
newF := * f
newF . dirCache = dircache . New ( newRoot , * rootInfo . Id , & newF )
newF . root = newRoot
// Make new Fs which is the parent
err = newF . dirCache . FindRoot ( false )
if err != nil {
// No root so return old f
return f , nil
}
2016-06-25 22:23:20 +02:00
_ , err := newF . newObjectWithInfo ( remote , nil )
if err != nil {
if err == fs . ErrorObjectNotFound {
// File doesn't exist so return old f
return f , nil
}
return nil , err
2015-09-03 00:37:42 +02:00
}
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
return & newF , fs . ErrorIsFile
2015-09-03 00:37:42 +02:00
}
return f , nil
}
2016-08-08 20:05:06 +02:00
// getRootInfo gets the root folder info
func ( f * Fs ) getRootInfo ( ) ( rootInfo * acd . Folder , err error ) {
var resp * http . Response
err = f . pacer . Call ( func ( ) ( bool , error ) {
rootInfo , resp , err = f . c . Nodes . GetRoot ( )
return f . shouldRetry ( resp , err )
} )
return rootInfo , err
}
// Renew the token - runs in the background
//
// Renews the token whenever it expires. Useful when there are lots
// of uploads in progress and the token doesn't get renewed. Amazon
// seem to cancel your uploads if you don't renew your token for 2hrs.
func ( f * Fs ) renewToken ( ) {
expiry := f . ts . OnExpiry ( )
for {
<- expiry
uploads := atomic . LoadInt32 ( & f . uploads )
if uploads != 0 {
fs . Debug ( f , "Token expired - %d uploads in progress - refreshing" , uploads )
// Do a transaction
_ , err := f . getRootInfo ( )
if err == nil {
fs . Debug ( f , "Token refresh successful" )
} else {
fs . ErrorLog ( f , "Token refresh failed: %v" , err )
}
} else {
fs . Debug ( f , "Token expired but no uploads in progress - doing nothing" )
}
}
}
func ( f * Fs ) startUpload ( ) {
atomic . AddInt32 ( & f . uploads , 1 )
}
func ( f * Fs ) stopUpload ( ) {
atomic . AddInt32 ( & f . uploads , - 1 )
}
2016-06-25 22:58:34 +02:00
// Return an Object from a path
2015-09-03 00:37:42 +02:00
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * acd . Node ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
o := & Object {
fs : f ,
2015-09-03 00:37:42 +02:00
remote : remote ,
}
if info != nil {
// Set info but not meta
o . info = info
} else {
err := o . readMetaData ( ) // reads info and meta, returning an error
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2015-09-03 00:37:42 +02:00
}
}
2016-06-25 22:23:20 +02:00
return o , nil
2015-09-03 00:37:42 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2016-06-25 22:58:34 +02:00
return f . newObjectWithInfo ( remote , nil )
2015-09-03 00:37:42 +02:00
}
2015-09-22 19:47:16 +02:00
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2015-11-07 12:14:46 +01:00
func ( f * Fs ) FindLeaf ( pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2015-09-22 19:47:16 +02:00
//fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf)
folder := acd . FolderFromId ( pathID , f . c . Nodes )
2015-09-11 20:18:41 +02:00
var resp * http . Response
var subFolder * acd . Folder
err = f . pacer . Call ( func ( ) ( bool , error ) {
subFolder , resp , err = folder . GetFolder ( leaf )
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
if err == acd . ErrorNodeNotFound {
//fs.Debug(f, "...Not found")
return "" , false , nil
}
//fs.Debug(f, "...Error %v", err)
return "" , false , err
}
if subFolder . Status != nil && * subFolder . Status != statusAvailable {
2016-01-20 17:32:15 +01:00
fs . Debug ( f , "Ignoring folder %q in state %q" , leaf , * subFolder . Status )
2015-09-03 00:37:42 +02:00
time . Sleep ( 1 * time . Second ) // FIXME wait for problem to go away!
return "" , false , nil
}
//fs.Debug(f, "...Found(%q, %v)", *subFolder.Id, leaf)
return * subFolder . Id , true , nil
}
2015-09-22 19:47:16 +02:00
// CreateDir makes a directory with pathID as parent and name leaf
2015-11-07 12:14:46 +01:00
func ( f * Fs ) CreateDir ( pathID , leaf string ) ( newID string , err error ) {
2015-09-22 19:47:16 +02:00
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
folder := acd . FolderFromId ( pathID , f . c . Nodes )
2015-09-11 20:18:41 +02:00
var resp * http . Response
var info * acd . Folder
err = f . pacer . Call ( func ( ) ( bool , error ) {
info , resp , err = folder . CreateFolder ( leaf )
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
2015-09-11 20:18:41 +02:00
//fmt.Printf("...Error %v\n", err)
2015-09-03 00:37:42 +02:00
return "" , err
}
2015-09-11 20:18:41 +02:00
//fmt.Printf("...Id %q\n", *info.Id)
2015-09-03 00:37:42 +02:00
return * info . Id , nil
}
// list the objects into the function supplied
//
// If directories is set it only sends directories
// User function to process a File item from listAll
//
// Should return true to finish processing
type listAllFn func ( * acd . Node ) bool
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2015-11-07 12:14:46 +01:00
func ( f * Fs ) listAll ( dirID string , title string , directoriesOnly bool , filesOnly bool , fn listAllFn ) ( found bool , err error ) {
2015-09-22 19:47:16 +02:00
query := "parents:" + dirID
2015-09-03 00:37:42 +02:00
if directoriesOnly {
query += " AND kind:" + folderKind
} else if filesOnly {
query += " AND kind:" + fileKind
} else {
// FIXME none of these work
//query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
//query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
}
opts := acd . NodeListOptions {
Filters : query ,
}
var nodes [ ] * acd . Node
2016-05-14 18:15:42 +02:00
var out [ ] * acd . Node
2015-09-10 00:23:37 +02:00
//var resp *http.Response
2015-09-03 00:37:42 +02:00
for {
2015-09-11 20:18:41 +02:00
var resp * http . Response
2016-05-14 18:15:42 +02:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2015-09-11 20:18:41 +02:00
nodes , resp , err = f . c . Nodes . GetNodes ( & opts )
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
2016-04-21 21:06:21 +02:00
return false , err
2015-09-03 00:37:42 +02:00
}
if nodes == nil {
break
}
for _ , node := range nodes {
if node . Name != nil && node . Id != nil && node . Kind != nil && node . Status != nil {
// Ignore nodes if not AVAILABLE
if * node . Status != statusAvailable {
continue
}
2016-05-14 18:15:42 +02:00
// Store the nodes up in case we have to retry the listing
out = append ( out , node )
2015-09-03 00:37:42 +02:00
}
}
}
2016-05-14 18:15:42 +02:00
// Send the nodes now
for _ , node := range out {
if fn ( node ) {
found = true
break
}
}
2015-09-03 00:37:42 +02:00
return
}
2016-04-21 21:06:21 +02:00
// ListDir reads the directory specified by the job into out, returning any more jobs
func ( f * Fs ) ListDir ( out fs . ListOpts , job dircache . ListDirJob ) ( jobs [ ] dircache . ListDirJob , err error ) {
fs . Debug ( f , "Reading %q" , job . Path )
2016-05-14 18:15:42 +02:00
maxTries := fs . Config . LowLevelRetries
for tries := 1 ; tries <= maxTries ; tries ++ {
_ , err = f . listAll ( job . DirID , "" , false , false , func ( node * acd . Node ) bool {
remote := job . Path + * node . Name
switch * node . Kind {
case folderKind :
if out . IncludeDirectory ( remote ) {
dir := & fs . Dir {
Name : remote ,
Bytes : - 1 ,
Count : - 1 ,
}
dir . When , _ = time . Parse ( timeFormat , * node . ModifiedDate ) // FIXME
if out . AddDir ( dir ) {
return true
}
if job . Depth > 0 {
jobs = append ( jobs , dircache . ListDirJob { DirID : * node . Id , Path : remote + "/" , Depth : job . Depth - 1 } )
}
2015-09-12 21:59:14 +02:00
}
2016-05-14 18:15:42 +02:00
case fileKind :
2016-06-25 22:23:20 +02:00
o , err := f . newObjectWithInfo ( remote , node )
if err != nil {
out . SetError ( err )
return true
}
if out . Add ( o ) {
return true
2016-04-21 21:06:21 +02:00
}
2016-05-14 18:15:42 +02:00
default :
// ignore ASSET etc
2015-09-03 00:37:42 +02:00
}
2016-05-14 18:15:42 +02:00
return false
} )
if fs . IsRetryError ( err ) {
fs . Debug ( f , "Directory listing error for %q: %v - low level retry %d/%d" , job . Path , err , tries , maxTries )
continue
2015-09-03 00:37:42 +02:00
}
2016-05-14 18:15:42 +02:00
if err != nil {
return nil , err
}
break
}
2016-04-21 21:06:21 +02:00
fs . Debug ( f , "Finished reading %q" , job . Path )
return jobs , err
2015-09-03 00:37:42 +02:00
}
2016-04-21 21:06:21 +02:00
// List walks the path returning iles and directories into out
2016-04-23 22:46:52 +02:00
func ( f * Fs ) List ( out fs . ListOpts , dir string ) {
f . dirCache . List ( f , out , dir )
2015-09-03 00:37:42 +02:00
}
2016-08-06 00:42:49 +02:00
// checkUpload checks to see if an error occurred after the file was
// completely uploaded.
//
// If it was then it waits for a while to see if the file really
// exists and is the right size and returns an updated info.
//
// If the file wasn't found or was the wrong size then it returns the
// original error.
//
// This is a workaround for Amazon sometimes returning
//
// * 408 REQUEST_TIMEOUT
// * 504 GATEWAY_TIMEOUT
// * 500 Internal server error
//
// At the end of large uploads. The speculation is that the timeout
// is waiting for the sha1 hashing to complete and the file may well
// be properly uploaded.
2016-10-17 17:18:56 +02:00
func ( f * Fs ) checkUpload ( resp * http . Response , in io . Reader , src fs . ObjectInfo , inInfo * acd . File , inErr error , uploadTime time . Duration ) ( fixedError bool , info * acd . File , err error ) {
2016-08-06 00:42:49 +02:00
// Return if no error - all is well
if inErr == nil {
return false , inInfo , inErr
}
2016-09-12 18:47:01 +02:00
// If not one of the errors we can fix return
2016-10-17 17:18:56 +02:00
// if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
// return false, inInfo, inErr
// }
// check to see if we read to the end
2016-08-06 00:42:49 +02:00
buf := make ( [ ] byte , 1 )
n , err := in . Read ( buf )
if ! ( n == 0 && err == io . EOF ) {
2016-10-17 17:18:56 +02:00
fs . Debug ( src , "Upload error detected but didn't finish upload: %v" , inErr )
2016-08-06 00:42:49 +02:00
return false , inInfo , inErr
}
2016-10-17 17:18:56 +02:00
// Only wait for items which have been in transit for > uploadWaitLimit
if uploadTime < * uploadWaitLimit {
fs . Debug ( src , "Upload error detected but not waiting since it only took %v to upload: %v" , uploadTime , inErr )
return false , inInfo , inErr
}
// Time we should wait for the upload
uploadWaitPerByte := float64 ( * uploadWaitPerGB ) / 1024 / 1024 / 1024
timeToWait := time . Duration ( uploadWaitPerByte * float64 ( src . Size ( ) ) ) + * uploadWaitTime
const sleepTime = 5 * time . Second // sleep between tries
retries := int ( ( timeToWait + sleepTime - 1 ) / sleepTime ) // number of retries, rounded up
2016-08-06 00:42:49 +02:00
fs . Debug ( src , "Error detected after finished upload - waiting to see if object was uploaded correctly: %v" , inErr )
remote := src . Remote ( )
for i := 1 ; i <= retries ; i ++ {
o , err := f . NewObject ( remote )
if err == fs . ErrorObjectNotFound {
fs . Debug ( src , "Object not found - waiting (%d/%d)" , i , retries )
} else if err != nil {
fs . Debug ( src , "Object returned error - waiting (%d/%d): %v" , i , retries , err )
} else {
if src . Size ( ) == o . Size ( ) {
2016-10-17 17:18:56 +02:00
fs . Debug ( src , "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error" , src . Size ( ) , i , retries , sleepTime * time . Duration ( i - 1 ) )
2016-08-06 00:42:49 +02:00
info = & acd . File {
Node : o . ( * Object ) . info ,
}
return true , info , nil
}
fs . Debug ( src , "Object found but wrong size %d vs %d - waiting (%d/%d)" , src . Size ( ) , o . Size ( ) , i , retries )
}
time . Sleep ( sleepTime )
}
2016-10-17 17:18:56 +02:00
fs . Debug ( src , "Giving up waiting for object - returning original error: %v" , inErr )
2016-08-06 00:42:49 +02:00
return false , inInfo , inErr
}
2015-09-03 00:37:42 +02:00
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2016-02-18 12:35:25 +01:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo ) ( fs . Object , error ) {
remote := src . Remote ( )
size := src . Size ( )
2015-11-07 12:14:46 +01:00
// Temporary Object under construction
o := & Object {
fs : f ,
2015-09-03 00:37:42 +02:00
remote : remote ,
}
2016-06-12 16:06:27 +02:00
// Check if object already exists
err := o . readMetaData ( )
switch err {
case nil :
return o , o . Update ( in , src )
2016-06-25 22:23:20 +02:00
case fs . ErrorObjectNotFound :
2016-06-12 16:06:27 +02:00
// Not found so create it
default :
return nil , err
}
// If not create it
2015-09-03 00:37:42 +02:00
leaf , directoryID , err := f . dirCache . FindPath ( remote , true )
if err != nil {
return nil , err
}
2016-01-04 13:23:33 +01:00
if size > warnFileSize {
fs . Debug ( f , "Warning: file %q may fail because it is too big. Use --max-size=%dGB to skip large files." , remote , warnFileSize >> 30 )
}
2015-11-07 12:14:46 +01:00
folder := acd . FolderFromId ( directoryID , o . fs . c . Nodes )
2015-09-10 00:23:37 +02:00
var info * acd . File
var resp * http . Response
2015-09-11 20:18:41 +02:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2016-10-17 17:18:56 +02:00
start := time . Now ( )
2016-08-08 20:05:06 +02:00
f . startUpload ( )
2016-02-18 12:35:25 +01:00
if src . Size ( ) != 0 {
2015-09-11 20:18:41 +02:00
info , resp , err = folder . Put ( in , leaf )
} else {
info , resp , err = folder . PutSized ( in , size , leaf )
}
2016-08-08 20:05:06 +02:00
f . stopUpload ( )
2016-08-06 00:42:49 +02:00
var ok bool
2016-10-17 17:18:56 +02:00
ok , info , err = f . checkUpload ( resp , in , src , info , err , time . Since ( start ) )
2016-08-06 00:42:49 +02:00
if ok {
return false , nil
}
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
return nil , err
}
o . info = info . Node
return o , nil
}
// Mkdir creates the container if it doesn't exist
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Mkdir ( ) error {
2015-09-03 00:37:42 +02:00
return f . dirCache . FindRoot ( true )
}
// purgeCheck remotes the root directory, if check is set then it
// refuses to do so if it has anything in
2015-11-07 12:14:46 +01:00
func ( f * Fs ) purgeCheck ( check bool ) error {
2015-09-03 00:37:42 +02:00
if f . root == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't purge root directory" )
2015-09-03 00:37:42 +02:00
}
dc := f . dirCache
err := dc . FindRoot ( false )
if err != nil {
return err
}
rootID := dc . RootID ( )
if check {
// check directory is empty
empty := true
2015-10-04 23:08:31 +02:00
_ , err = f . listAll ( rootID , "" , false , false , func ( node * acd . Node ) bool {
2015-09-03 00:37:42 +02:00
switch * node . Kind {
case folderKind :
empty = false
return true
case fileKind :
empty = false
return true
default :
fs . Debug ( "Found ASSET %s" , * node . Id )
}
return false
} )
if err != nil {
return err
}
if ! empty {
2016-06-12 16:06:02 +02:00
return errors . New ( "directory not empty" )
2015-09-03 00:37:42 +02:00
}
}
node := acd . NodeFromId ( rootID , f . c . Nodes )
2015-09-10 00:23:37 +02:00
var resp * http . Response
2015-09-11 20:18:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = node . Trash ( )
2016-05-23 19:03:22 +02:00
return f . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
return err
}
f . dirCache . ResetRoot ( )
if err != nil {
return err
}
return nil
}
// Rmdir deletes the root folder
//
// Returns an error if it isn't empty
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Rmdir ( ) error {
2015-09-03 00:37:42 +02:00
return f . purgeCheck ( true )
}
2015-09-22 19:47:16 +02:00
// Precision return the precision of this Fs
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2015-09-03 00:37:42 +02:00
return fs . ModTimeNotSupported
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
func ( f * Fs ) Hashes ( ) fs . HashSet {
return fs . HashSet ( fs . HashMD5 )
}
2015-09-03 00:37:42 +02:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-07 12:14:46 +01:00
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
// srcObj, ok := src.(*Object)
2015-09-03 00:37:42 +02:00
// if !ok {
// fs.Debug(src, "Can't copy - not same remote type")
// return nil, fs.ErrorCantCopy
// }
2015-11-07 12:14:46 +01:00
// srcFs := srcObj.fs
2015-09-03 00:37:42 +02:00
// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
// if err != nil {
// return nil, err
// }
2016-06-25 22:58:34 +02:00
// return f.NewObject(remote), nil
2015-09-03 00:37:42 +02:00
//}
// Purge deletes all the files and the container
//
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Purge ( ) error {
2015-09-03 00:37:42 +02:00
return f . purgeCheck ( false )
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2015-09-03 00:37:42 +02:00
}
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2015-09-03 00:37:42 +02:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remote ( ) string {
2015-09-03 00:37:42 +02:00
return o . remote
}
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
func ( o * Object ) Hash ( t fs . HashType ) ( string , error ) {
if t != fs . HashMD5 {
return "" , fs . ErrHashUnsupported
}
2015-09-03 00:37:42 +02:00
if o . info . ContentProperties . Md5 != nil {
return * o . info . ContentProperties . Md5 , nil
}
return "" , nil
}
// Size returns the size of an object in bytes
2015-11-07 12:14:46 +01:00
func ( o * Object ) Size ( ) int64 {
2015-09-03 00:37:42 +02:00
return int64 ( * o . info . ContentProperties . Size )
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2016-06-25 22:23:20 +02:00
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
2015-11-07 12:14:46 +01:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2015-09-03 00:37:42 +02:00
if o . info != nil {
return nil
}
2015-11-07 12:14:46 +01:00
leaf , directoryID , err := o . fs . dirCache . FindPath ( o . remote , false )
2015-09-03 00:37:42 +02:00
if err != nil {
2016-06-25 22:23:20 +02:00
if err == fs . ErrorDirNotFound {
return fs . ErrorObjectNotFound
}
2015-09-03 00:37:42 +02:00
return err
}
2015-11-07 12:14:46 +01:00
folder := acd . FolderFromId ( directoryID , o . fs . c . Nodes )
2015-09-11 20:18:41 +02:00
var resp * http . Response
var info * acd . File
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2015-09-11 20:18:41 +02:00
info , resp , err = folder . GetFile ( leaf )
2016-05-23 19:03:22 +02:00
return o . fs . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
2016-06-25 22:23:20 +02:00
if err == acd . ErrorNodeNotFound {
return fs . ErrorObjectNotFound
}
2015-09-03 00:37:42 +02:00
return err
}
o . info = info . Node
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2015-11-07 12:14:46 +01:00
func ( o * Object ) ModTime ( ) time . Time {
2015-09-03 00:37:42 +02:00
err := o . readMetaData ( )
if err != nil {
2016-06-18 10:32:14 +02:00
fs . Log ( o , "Failed to read metadata: %v" , err )
2015-09-03 00:37:42 +02:00
return time . Now ( )
}
modTime , err := time . Parse ( timeFormat , * o . info . ModifiedDate )
if err != nil {
2016-06-18 10:32:14 +02:00
fs . Log ( o , "Failed to read mtime from object: %v" , err )
2015-09-03 00:37:42 +02:00
return time . Now ( )
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the local fs object
2016-03-22 16:07:10 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2015-09-03 00:37:42 +02:00
// FIXME not implemented
2016-03-22 16:07:10 +01:00
return fs . ErrorCantSetModTime
2015-09-03 00:37:42 +02:00
}
2015-09-22 19:47:16 +02:00
// Storable returns a boolean showing whether this object storable
2015-11-07 12:14:46 +01:00
func ( o * Object ) Storable ( ) bool {
2015-09-03 00:37:42 +02:00
return true
}
// Open an object for read
2016-09-10 12:29:57 +02:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2016-01-30 19:08:44 +01:00
bigObject := o . Size ( ) >= int64 ( tempLinkThreshold )
if bigObject {
fs . Debug ( o , "Dowloading large object via tempLink" )
}
2015-09-03 00:37:42 +02:00
file := acd . File { Node : o . info }
2015-09-10 00:23:37 +02:00
var resp * http . Response
2016-09-10 12:29:57 +02:00
headers := fs . OpenOptionHeaders ( options )
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2016-01-30 19:08:44 +01:00
if ! bigObject {
2016-09-10 12:29:57 +02:00
in , resp , err = file . OpenHeaders ( headers )
2016-01-30 19:08:44 +01:00
} else {
2016-10-26 19:42:41 +02:00
in , resp , err = file . OpenTempURLHeaders ( rest . ClientWithHeaderReset ( o . fs . noAuthClient , headers ) , headers )
2016-01-30 19:08:44 +01:00
}
2016-05-23 19:03:22 +02:00
return o . fs . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
return in , err
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2016-02-18 12:35:25 +01:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo ) error {
size := src . Size ( )
2015-09-03 00:37:42 +02:00
file := acd . File { Node : o . info }
2015-09-10 00:23:37 +02:00
var info * acd . File
var resp * http . Response
var err error
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2016-10-17 17:18:56 +02:00
start := time . Now ( )
2016-08-08 20:05:06 +02:00
o . fs . startUpload ( )
2015-09-11 20:18:41 +02:00
if size != 0 {
info , resp , err = file . OverwriteSized ( in , size )
} else {
info , resp , err = file . Overwrite ( in )
}
2016-08-08 20:05:06 +02:00
o . fs . stopUpload ( )
2016-08-06 00:42:49 +02:00
var ok bool
2016-10-17 17:18:56 +02:00
ok , info , err = o . fs . checkUpload ( resp , in , src , info , err , time . Since ( start ) )
2016-08-06 00:42:49 +02:00
if ok {
return false , nil
}
2016-05-23 19:03:22 +02:00
return o . fs . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
if err != nil {
return err
}
o . info = info . Node
return nil
}
// Remove an object
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remove ( ) error {
2015-09-11 20:18:41 +02:00
var resp * http . Response
var err error
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2015-09-11 20:18:41 +02:00
resp , err = o . info . Trash ( )
2016-05-23 19:03:22 +02:00
return o . fs . shouldRetry ( resp , err )
2015-09-11 20:18:41 +02:00
} )
2015-09-03 00:37:42 +02:00
return err
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
if o . info . ContentProperties . ContentType != nil {
return * o . info . ContentProperties . ContentType
}
return ""
}
2015-09-03 00:37:42 +02:00
// Check the interfaces are satisfied
var (
2015-11-07 12:14:46 +01:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
// _ fs.Copier = (*Fs)(nil)
// _ fs.Mover = (*Fs)(nil)
// _ fs.DirMover = (*Fs)(nil)
2016-09-21 23:13:24 +02:00
_ fs . Object = ( * Object ) ( nil )
_ fs . MimeTyper = & Object { }
2015-09-03 00:37:42 +02:00
)