diff --git a/amazonclouddrive/amazonclouddrive.go b/amazonclouddrive/amazonclouddrive.go index 85edcacd0..27baa25e6 100644 --- a/amazonclouddrive/amazonclouddrive.go +++ b/amazonclouddrive/amazonclouddrive.go @@ -77,8 +77,8 @@ func init() { }) } -// FsAcd represents a remote acd server -type FsAcd struct { +// Fs represents a remote acd server +type Fs struct { name string // name of this remote c *acd.Client // the connection to the acd server root string // the path we are working on @@ -86,11 +86,11 @@ type FsAcd struct { pacer *pacer.Pacer // pacer for API calls } -// FsObjectAcd describes a acd object +// Object describes a acd object // // Will definitely have info but maybe not meta -type FsObjectAcd struct { - acd *FsAcd // what this object is part of +type Object struct { + fs *Fs // what this object is part of remote string // The remote path info *acd.Node // Info from the acd object if known } @@ -98,17 +98,17 @@ type FsObjectAcd struct { // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsAcd) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsAcd) Root() string { +func (f *Fs) Root() string { return f.root } -// String converts this FsAcd to a string -func (f *FsAcd) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { return fmt.Sprintf("Amazon cloud drive root '%s'", f.root) } @@ -135,7 +135,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) { return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err } -// NewFs constructs an FsAcd from the path, container:path +// NewFs constructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { root = parsePath(root) oAuthClient, err := oauthutil.NewClient(name, acdConfig) @@ -145,7 +145,7 @@ func NewFs(name, root string) (fs.Fs, error) { c := acd.NewClient(oAuthClient) c.UserAgent = fs.UserAgent - f := &FsAcd{ + f := &Fs{ name: name, root: root, c: c, @@ -202,9 +202,9 @@ func NewFs(name, root string) (fs.Fs, error) { // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object { - o := &FsObjectAcd{ - acd: f, +func (f *Fs) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object { + o := &Object{ + fs: f, remote: remote, } if info != nil { @@ -223,12 +223,12 @@ func (f *FsAcd) newFsObjectWithInfo(remote string, info *acd.Node) fs.Object { // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsAcd) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { //fs.Debug(f, "FindLeaf(%q, %q)", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response @@ -255,7 +255,7 @@ func (f *FsAcd) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err } // CreateDir makes a directory with pathID as parent and name leaf -func (f *FsAcd) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf) folder := acd.FolderFromId(pathID, f.c.Nodes) var resp *http.Response @@ -283,7 +283,7 @@ type listAllFn func(*acd.Node) bool // Lists the directory required calling the user function on each item found // // If the user fn ever returns true then it early exits with found = true -func (f *FsAcd) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { +func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { query := "parents:" + dirID if directoriesOnly { query += " AND kind:" + folderKind @@ -336,7 +336,7 @@ OUTER: // // This fetches the minimum amount of stuff but does more API calls // which makes it slow -func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error { +func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error { var subError error // Make the API request var wg sync.WaitGroup @@ -377,7 +377,7 @@ func (f *FsAcd) listDirRecursive(dirID string, path string, out fs.ObjectsChan) } // List walks the path returning a channel of FsObjects -func (f *FsAcd) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(out) @@ -397,7 +397,7 @@ func (f *FsAcd) List() fs.ObjectsChan { } // ListDir lists the directories -func (f *FsAcd) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) @@ -430,17 +430,17 @@ func (f *FsAcd) ListDir() fs.DirChan { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { - // Temporary FsObject under construction - o := &FsObjectAcd{ - acd: f, +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { + // Temporary Object under construction + o := &Object{ + fs: f, remote: remote, } leaf, directoryID, err := f.dirCache.FindPath(remote, true) if err != nil { return nil, err } - folder := acd.FolderFromId(directoryID, o.acd.c.Nodes) + folder := acd.FolderFromId(directoryID, o.fs.c.Nodes) var info *acd.File var resp *http.Response err = f.pacer.CallNoRetry(func() (bool, error) { @@ -459,13 +459,13 @@ func (f *FsAcd) Put(in io.Reader, remote string, modTime time.Time, size int64) } // Mkdir creates the container if it doesn't exist -func (f *FsAcd) Mkdir() error { +func (f *Fs) Mkdir() error { return f.dirCache.FindRoot(true) } // purgeCheck remotes the root directory, if check is set then it // refuses to do so if it has anything in -func (f *FsAcd) purgeCheck(check bool) error { +func (f *Fs) purgeCheck(check bool) error { if f.root == "" { return fmt.Errorf("Can't purge root directory") } @@ -520,12 +520,12 @@ func (f *FsAcd) purgeCheck(check bool) error { // Rmdir deletes the root folder // // Returns an error if it isn't empty -func (f *FsAcd) Rmdir() error { +func (f *Fs) Rmdir() error { return f.purgeCheck(true) } // Precision return the precision of this Fs -func (f *FsAcd) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } @@ -538,13 +538,13 @@ func (f *FsAcd) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -//func (f *FsAcd) Copy(src fs.Object, remote string) (fs.Object, error) { -// srcObj, ok := src.(*FsObjectAcd) +//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { +// srcObj, ok := src.(*Object) // if !ok { // fs.Debug(src, "Can't copy - not same remote type") // return nil, fs.ErrorCantCopy // } -// srcFs := srcObj.acd +// srcFs := srcObj.fs // _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil) // if err != nil { // return nil, err @@ -557,19 +557,19 @@ func (f *FsAcd) Precision() time.Duration { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *FsAcd) Purge() error { +func (f *Fs) Purge() error { return f.purgeCheck(false) } // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectAcd) Fs() fs.Fs { - return o.acd +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectAcd) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -577,12 +577,12 @@ func (o *FsObjectAcd) String() string { } // Remote returns the remote path -func (o *FsObjectAcd) Remote() string { +func (o *Object) Remote() string { return o.remote } // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectAcd) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { if o.info.ContentProperties.Md5 != nil { return *o.info.ContentProperties.Md5, nil } @@ -590,25 +590,25 @@ func (o *FsObjectAcd) Md5sum() (string, error) { } // Size returns the size of an object in bytes -func (o *FsObjectAcd) Size() int64 { +func (o *Object) Size() int64 { return int64(*o.info.ContentProperties.Size) } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *FsObjectAcd) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if o.info != nil { return nil } - leaf, directoryID, err := o.acd.dirCache.FindPath(o.remote, false) + leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false) if err != nil { return err } - folder := acd.FolderFromId(directoryID, o.acd.c.Nodes) + folder := acd.FolderFromId(directoryID, o.fs.c.Nodes) var resp *http.Response var info *acd.File - err = o.acd.pacer.Call(func() (bool, error) { + err = o.fs.pacer.Call(func() (bool, error) { info, resp, err = folder.GetFile(leaf) return shouldRetry(resp, err) }) @@ -625,7 +625,7 @@ func (o *FsObjectAcd) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectAcd) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) @@ -640,21 +640,21 @@ func (o *FsObjectAcd) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *FsObjectAcd) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { // FIXME not implemented return } // Storable returns a boolean showing whether this object storable -func (o *FsObjectAcd) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) { +func (o *Object) Open() (in io.ReadCloser, err error) { file := acd.File{Node: o.info} var resp *http.Response - err = o.acd.pacer.Call(func() (bool, error) { + err = o.fs.pacer.Call(func() (bool, error) { in, resp, err = file.Open() return shouldRetry(resp, err) }) @@ -664,12 +664,12 @@ func (o *FsObjectAcd) Open() (in io.ReadCloser, err error) { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { file := acd.File{Node: o.info} var info *acd.File var resp *http.Response var err error - err = o.acd.pacer.CallNoRetry(func() (bool, error) { + err = o.fs.pacer.CallNoRetry(func() (bool, error) { if size != 0 { info, resp, err = file.OverwriteSized(in, size) } else { @@ -685,10 +685,10 @@ func (o *FsObjectAcd) Update(in io.Reader, modTime time.Time, size int64) error } // Remove an object -func (o *FsObjectAcd) Remove() error { +func (o *Object) Remove() error { var resp *http.Response var err error - err = o.acd.pacer.Call(func() (bool, error) { + err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.info.Trash() return shouldRetry(resp, err) }) @@ -697,10 +697,10 @@ func (o *FsObjectAcd) Remove() error { // Check the interfaces are satisfied var ( - _ fs.Fs = (*FsAcd)(nil) - _ fs.Purger = (*FsAcd)(nil) - // _ fs.Copier = (*FsAcd)(nil) - // _ fs.Mover = (*FsAcd)(nil) - // _ fs.DirMover = (*FsAcd)(nil) - _ fs.Object = (*FsObjectAcd)(nil) + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + // _ fs.Copier = (*Fs)(nil) + // _ fs.Mover = (*Fs)(nil) + // _ fs.DirMover = (*Fs)(nil) + _ fs.Object = (*Object)(nil) ) diff --git a/amazonclouddrive/amazonclouddrive_test.go b/amazonclouddrive/amazonclouddrive_test.go index b3c45a051..3357fde1d 100644 --- a/amazonclouddrive/amazonclouddrive_test.go +++ b/amazonclouddrive/amazonclouddrive_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*amazonclouddrive.FsObjectAcd)(nil)) + fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil)) fstests.RemoteName = "TestAmazonCloudDrive:" } diff --git a/drive/drive.go b/drive/drive.go index 1cf55a4b1..59a379aa4 100644 --- a/drive/drive.go +++ b/drive/drive.go @@ -82,8 +82,8 @@ func init() { pflag.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.") } -// FsDrive represents a remote drive server -type FsDrive struct { +// Fs represents a remote drive server +type Fs struct { name string // name of this remote svc *drive.Service // the connection to the drive server root string // the path we are working on @@ -93,31 +93,31 @@ type FsDrive struct { pacer *pacer.Pacer // To pace the API calls } -// FsObjectDrive describes a drive object -type FsObjectDrive struct { - drive *FsDrive // what this object is part of - remote string // The remote path - id string // Drive Id of this object - url string // Download URL of this object - md5sum string // md5sum of the object - bytes int64 // size of the object - modifiedDate string // RFC3339 time it was last modified +// Object describes a drive object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + id string // Drive Id of this object + url string // Download URL of this object + md5sum string // md5sum of the object + bytes int64 // size of the object + modifiedDate string // RFC3339 time it was last modified } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsDrive) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsDrive) Root() string { +func (f *Fs) Root() string { return f.root } -// String converts this FsDrive to a string -func (f *FsDrive) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { return fmt.Sprintf("Google drive root '%s'", f.root) } @@ -161,7 +161,7 @@ type listAllFn func(*drive.File) bool // If the user fn ever returns true then it early exits with found = true // // Search params: https://developers.google.com/drive/search-parameters -func (f *FsDrive) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { +func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) { query := fmt.Sprintf("trashed=false") if dirID != "" { query += fmt.Sprintf(" and '%s' in parents", dirID) @@ -216,7 +216,7 @@ func isPowerOfTwo(x int64) bool { } } -// NewFs contstructs an FsDrive from the path, container:path +// NewFs contstructs an Fs from the path, container:path func NewFs(name, path string) (fs.Fs, error) { if !isPowerOfTwo(int64(chunkSize)) { return nil, fmt.Errorf("drive: chunk size %v isn't a power of two", chunkSize) @@ -235,7 +235,7 @@ func NewFs(name, path string) (fs.Fs, error) { return nil, err } - f := &FsDrive{ + f := &Fs{ name: name, root: root, pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant), @@ -286,9 +286,9 @@ func NewFs(name, path string) (fs.Fs, error) { } // Return an FsObject from a path -func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) { - fs := &FsObjectDrive{ - drive: f, +func (f *Fs) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Object, error) { + fs := &Object{ + fs: f, remote: remote, } if info != nil { @@ -306,7 +306,7 @@ func (f *FsDrive) newFsObjectWithInfoErr(remote string, info *drive.File) (fs.Ob // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object { +func (f *Fs) newFsObjectWithInfo(remote string, info *drive.File) fs.Object { fs, _ := f.newFsObjectWithInfoErr(remote, info) // Errors have already been logged return fs @@ -315,12 +315,12 @@ func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) fs.Object // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsDrive) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // FindLeaf finds a directory of name leaf in the folder with ID pathID -func (f *FsDrive) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { +func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) { // Find the leaf in pathID found, err = f.listAll(pathID, leaf, true, false, func(item *drive.File) bool { if item.Title == leaf { @@ -333,7 +333,7 @@ func (f *FsDrive) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, e } // CreateDir makes a directory with pathID as parent and name leaf -func (f *FsDrive) CreateDir(pathID, leaf string) (newID string, err error) { +func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) { // fmt.Println("Making", path) // Define the metadata for the directory we are going to create. createInfo := &drive.File{ @@ -359,7 +359,7 @@ func (f *FsDrive) CreateDir(pathID, leaf string) (newID string, err error) { // // This fetches the minimum amount of stuff but does more API calls // which makes it slow -func (f *FsDrive) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error { +func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error { var subError error // Make the API request var wg sync.WaitGroup @@ -407,7 +407,7 @@ func (f *FsDrive) listDirRecursive(dirID string, path string, out fs.ObjectsChan // // This is fast in terms of number of API calls, but slow in terms of // fetching more data than it needs -func (f *FsDrive) listDirFull(dirID string, path string, out fs.ObjectsChan) error { +func (f *Fs) listDirFull(dirID string, path string, out fs.ObjectsChan) error { // Orphans waiting for their parent orphans := make(map[string][]*drive.File) @@ -469,7 +469,7 @@ func (f *FsDrive) listDirFull(dirID string, path string, out fs.ObjectsChan) err } // List walks the path returning a channel of FsObjects -func (f *FsDrive) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(out) @@ -493,7 +493,7 @@ func (f *FsDrive) List() fs.ObjectsChan { } // ListDir walks the path returning a channel of directories -func (f *FsDrive) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) @@ -522,13 +522,13 @@ func (f *FsDrive) ListDir() fs.DirChan { } // Creates a drive.File info from the parameters passed in and a half -// finished FsObjectDrive which must have setMetaData called on it +// finished Object which must have setMetaData called on it // // Used to create new objects -func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) (*FsObjectDrive, *drive.File, error) { - // Temporary FsObject under construction - o := &FsObjectDrive{ - drive: f, +func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Object, *drive.File, error) { + // Temporary Object under construction + o := &Object{ + fs: f, remote: remote, bytes: size, } @@ -558,7 +558,7 @@ func (f *FsDrive) createFileInfo(remote string, modTime time.Time, size int64) ( // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { o, createInfo, err := f.createFileInfo(remote, modTime, size) if err != nil { return nil, err @@ -587,14 +587,14 @@ func (f *FsDrive) Put(in io.Reader, remote string, modTime time.Time, size int64 } // Mkdir creates the container if it doesn't exist -func (f *FsDrive) Mkdir() error { +func (f *Fs) Mkdir() error { return f.dirCache.FindRoot(true) } // Rmdir deletes the container // // Returns an error if it isn't empty -func (f *FsDrive) Rmdir() error { +func (f *Fs) Rmdir() error { err := f.dirCache.FindRoot(false) if err != nil { return err @@ -629,7 +629,7 @@ func (f *FsDrive) Rmdir() error { } // Precision of the object storage system -func (f *FsDrive) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return time.Millisecond } @@ -642,8 +642,8 @@ func (f *FsDrive) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectDrive) +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy @@ -655,8 +655,8 @@ func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) { } var info *drive.File - err = o.drive.pacer.Call(func() (bool, error) { - info, err = o.drive.svc.Files.Copy(srcObj.id, createInfo).Do() + err = o.fs.pacer.Call(func() (bool, error) { + info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Do() return shouldRetry(err) }) if err != nil { @@ -672,7 +672,7 @@ func (f *FsDrive) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *FsDrive) Purge() error { +func (f *Fs) Purge() error { if f.root == "" { return fmt.Errorf("Can't purge root directory") } @@ -704,8 +704,8 @@ func (f *FsDrive) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *FsDrive) Move(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectDrive) +func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove @@ -735,8 +735,8 @@ func (f *FsDrive) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *FsDrive) DirMove(src fs.Fs) error { - srcFs, ok := src.(*FsDrive) +func (f *Fs) DirMove(src fs.Fs) error { + srcFs, ok := src.(*Fs) if !ok { fs.Debug(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove @@ -771,12 +771,12 @@ func (f *FsDrive) DirMove(src fs.Fs) error { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectDrive) Fs() fs.Fs { - return o.drive +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectDrive) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -784,22 +784,22 @@ func (o *FsObjectDrive) String() string { } // Remote returns the remote path -func (o *FsObjectDrive) Remote() string { +func (o *Object) Remote() string { return o.remote } // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectDrive) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { return o.md5sum, nil } // Size returns the size of an object in bytes -func (o *FsObjectDrive) Size() int64 { +func (o *Object) Size() int64 { return o.bytes } // setMetaData sets the fs data from a drive.File -func (o *FsObjectDrive) setMetaData(info *drive.File) { +func (o *Object) setMetaData(info *drive.File) { o.id = info.Id o.url = info.DownloadUrl o.md5sum = strings.ToLower(info.Md5Checksum) @@ -808,17 +808,17 @@ func (o *FsObjectDrive) setMetaData(info *drive.File) { } // readMetaData gets the info if it hasn't already been fetched -func (o *FsObjectDrive) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if o.id != "" { return nil } - leaf, directoryID, err := o.drive.dirCache.FindPath(o.remote, false) + leaf, directoryID, err := o.fs.dirCache.FindPath(o.remote, false) if err != nil { return err } - found, err := o.drive.listAll(directoryID, leaf, false, true, func(item *drive.File) bool { + found, err := o.fs.listAll(directoryID, leaf, false, true, func(item *drive.File) bool { if item.Title == leaf { o.setMetaData(item) return true @@ -840,7 +840,7 @@ func (o *FsObjectDrive) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectDrive) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) @@ -855,7 +855,7 @@ func (o *FsObjectDrive) ModTime() time.Time { } // SetModTime sets the modification time of the drive fs object -func (o *FsObjectDrive) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() @@ -868,8 +868,8 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) { } // Set modified date var info *drive.File - err = o.drive.pacer.Call(func() (bool, error) { - info, err = o.drive.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do() + err = o.fs.pacer.Call(func() (bool, error) { + info, err = o.fs.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Do() return shouldRetry(err) }) if err != nil { @@ -882,12 +882,12 @@ func (o *FsObjectDrive) SetModTime(modTime time.Time) { } // Storable returns a boolean as to whether this object is storable -func (o *FsObjectDrive) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) { +func (o *Object) Open() (in io.ReadCloser, err error) { if o.url == "" { return nil, fmt.Errorf("Forbidden to download - check sharing permission") } @@ -897,8 +897,8 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) { } req.Header.Set("User-Agent", fs.UserAgent) var res *http.Response - err = o.drive.pacer.Call(func() (bool, error) { - res, err = o.drive.client.Do(req) + err = o.fs.pacer.Call(func() (bool, error) { + res, err = o.fs.client.Do(req) return shouldRetry(err) }) if err != nil { @@ -916,7 +916,7 @@ func (o *FsObjectDrive) Open() (in io.ReadCloser, err error) { // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { updateInfo := &drive.File{ Id: o.id, ModifiedDate: modTime.Format(timeFormatOut), @@ -927,8 +927,8 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro var info *drive.File if size == 0 || size < int64(driveUploadCutoff) { // Don't retry, return a retry error instead - err = o.drive.pacer.CallNoRetry(func() (bool, error) { - info, err = o.drive.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do() + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + info, err = o.fs.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in).Do() return shouldRetry(err) }) if err != nil { @@ -936,7 +936,7 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro } } else { // Upload the file in chunks - info, err = o.drive.Upload(in, size, fs.MimeType(o), updateInfo, o.remote) + info, err = o.fs.Upload(in, size, fs.MimeType(o), updateInfo, o.remote) if err != nil { return err } @@ -946,13 +946,13 @@ func (o *FsObjectDrive) Update(in io.Reader, modTime time.Time, size int64) erro } // Remove an object -func (o *FsObjectDrive) Remove() error { +func (o *Object) Remove() error { var err error - err = o.drive.pacer.Call(func() (bool, error) { + err = o.fs.pacer.Call(func() (bool, error) { if *driveUseTrash { - _, err = o.drive.svc.Files.Trash(o.id).Do() + _, err = o.fs.svc.Files.Trash(o.id).Do() } else { - err = o.drive.svc.Files.Delete(o.id).Do() + err = o.fs.svc.Files.Delete(o.id).Do() } return shouldRetry(err) }) @@ -961,10 +961,10 @@ func (o *FsObjectDrive) Remove() error { // Check the interfaces are satisfied var ( - _ fs.Fs = (*FsDrive)(nil) - _ fs.Purger = (*FsDrive)(nil) - _ fs.Copier = (*FsDrive)(nil) - _ fs.Mover = (*FsDrive)(nil) - _ fs.DirMover = (*FsDrive)(nil) - _ fs.Object = (*FsObjectDrive)(nil) + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Object = (*Object)(nil) ) diff --git a/drive/drive_test.go b/drive/drive_test.go index 5c5e5e484..94af9b933 100644 --- a/drive/drive_test.go +++ b/drive/drive_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*drive.FsObjectDrive)(nil)) + fstests.NilObject = fs.Object((*drive.Object)(nil)) fstests.RemoteName = "TestDrive:" } diff --git a/drive/upload.go b/drive/upload.go index ec1aebfe8..272a878be 100644 --- a/drive/upload.go +++ b/drive/upload.go @@ -36,7 +36,7 @@ const ( // resumableUpload is used by the generated APIs to provide resumable uploads. // It is not used by developers directly. type resumableUpload struct { - f *FsDrive + f *Fs remote string // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". URI string @@ -51,7 +51,7 @@ type resumableUpload struct { } // Upload the io.Reader in of size bytes with contentType and info -func (f *FsDrive) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) { +func (f *Fs) Upload(in io.Reader, size int64, contentType string, info *drive.File, remote string) (*drive.File, error) { fileID := info.Id var body io.Reader body, err := googleapi.WithoutDataWrapper.JSONReader(info) diff --git a/dropbox/dropbox.go b/dropbox/dropbox.go index 9adaeb2fe..9fd64ac02 100644 --- a/dropbox/dropbox.go +++ b/dropbox/dropbox.go @@ -92,8 +92,8 @@ func configHelper(name string) { } } -// FsDropbox represents a remote dropbox server -type FsDropbox struct { +// Fs represents a remote dropbox server +type Fs struct { name string // name of this remote db *dropbox.Dropbox // the connection to the dropbox server root string // the path we are working on @@ -101,29 +101,29 @@ type FsDropbox struct { slashRootSlash string // root with "/" prefix and postfix, lowercase } -// FsObjectDropbox describes a dropbox object -type FsObjectDropbox struct { - dropbox *FsDropbox // what this object is part of - remote string // The remote path - bytes int64 // size of the object - modTime time.Time // time it was last modified - hasMetadata bool // metadata is valid +// Object describes a dropbox object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + bytes int64 // size of the object + modTime time.Time // time it was last modified + hasMetadata bool // metadata is valid } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsDropbox) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsDropbox) Root() string { +func (f *Fs) Root() string { return f.root } -// String converts this FsDropbox to a string -func (f *FsDropbox) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { return fmt.Sprintf("Dropbox root '%s'", f.root) } @@ -144,7 +144,7 @@ func newDropbox(name string) (*dropbox.Dropbox, error) { return db, err } -// NewFs contstructs an FsDropbox from the path, container:path +// NewFs contstructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { if uploadChunkSize > maxUploadChunkSize { return nil, fmt.Errorf("Chunk size too big, must be < %v", maxUploadChunkSize) @@ -153,7 +153,7 @@ func NewFs(name, root string) (fs.Fs, error) { if err != nil { return nil, err } - f := &FsDropbox{ + f := &Fs{ name: name, db: db, } @@ -186,7 +186,7 @@ func NewFs(name, root string) (fs.Fs, error) { } // Sets root in f -func (f *FsDropbox) setRoot(root string) { +func (f *Fs) setRoot(root string) { f.root = strings.Trim(root, "/") lowerCaseRoot := strings.ToLower(f.root) @@ -200,10 +200,10 @@ func (f *FsDropbox) setRoot(root string) { // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object { - o := &FsObjectDropbox{ - dropbox: f, - remote: remote, +func (f *Fs) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.Object { + o := &Object{ + fs: f, + remote: remote, } if info != nil { o.setMetadataFromEntry(info) @@ -220,12 +220,12 @@ func (f *FsDropbox) newFsObjectWithInfo(remote string, info *dropbox.Entry) fs.O // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsDropbox) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // Strips the root off path and returns it -func (f *FsDropbox) stripRoot(path string) *string { +func (f *Fs) stripRoot(path string) *string { lowercase := strings.ToLower(path) if !strings.HasPrefix(lowercase, f.slashRootSlash) { @@ -239,7 +239,7 @@ func (f *FsDropbox) stripRoot(path string) *string { } // Walk the root returning a channel of FsObjects -func (f *FsDropbox) list(out fs.ObjectsChan) { +func (f *Fs) list(out fs.ObjectsChan) { // Track path component case, it could be different for entries coming from DropBox API // See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us // and https://github.com/ncw/rclone/issues/53 @@ -318,7 +318,7 @@ func (f *FsDropbox) list(out fs.ObjectsChan) { } // List walks the path returning a channel of FsObjects -func (f *FsDropbox) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { defer close(out) @@ -328,7 +328,7 @@ func (f *FsDropbox) List() fs.ObjectsChan { } // ListDir walks the path returning a channel of FsObjects -func (f *FsDropbox) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) @@ -379,14 +379,17 @@ func (rc *readCloser) Close() error { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *FsDropbox) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { - // Temporary FsObject under construction - o := &FsObjectDropbox{dropbox: f, remote: remote} +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { + // Temporary Object under construction + o := &Object{ + fs: f, + remote: remote, + } return o, o.Update(in, modTime, size) } // Mkdir creates the container if it doesn't exist -func (f *FsDropbox) Mkdir() error { +func (f *Fs) Mkdir() error { entry, err := f.db.Metadata(f.slashRoot, false, false, "", "", metadataLimit) if err == nil { if entry.IsDir { @@ -401,7 +404,7 @@ func (f *FsDropbox) Mkdir() error { // Rmdir deletes the container // // Returns an error if it isn't empty -func (f *FsDropbox) Rmdir() error { +func (f *Fs) Rmdir() error { entry, err := f.db.Metadata(f.slashRoot, true, false, "", "", 16) if err != nil { return err @@ -413,7 +416,7 @@ func (f *FsDropbox) Rmdir() error { } // Precision returns the precision -func (f *FsDropbox) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return fs.ModTimeNotSupported } @@ -426,15 +429,18 @@ func (f *FsDropbox) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectDropbox) +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - // Temporary FsObject under construction - dstObj := &FsObjectDropbox{dropbox: f, remote: remote} + // Temporary Object under construction + dstObj := &Object{ + fs: f, + remote: remote, + } srcPath := srcObj.remotePath() dstPath := dstObj.remotePath() @@ -451,7 +457,7 @@ func (f *FsDropbox) Copy(src fs.Object, remote string) (fs.Object, error) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *FsDropbox) Purge() error { +func (f *Fs) Purge() error { // Let dropbox delete the filesystem tree _, err := f.db.Delete(f.slashRoot) return err @@ -466,15 +472,18 @@ func (f *FsDropbox) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectDropbox) +func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } - // Temporary FsObject under construction - dstObj := &FsObjectDropbox{dropbox: f, remote: remote} + // Temporary Object under construction + dstObj := &Object{ + fs: f, + remote: remote, + } srcPath := srcObj.remotePath() dstPath := dstObj.remotePath() @@ -493,8 +502,8 @@ func (f *FsDropbox) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *FsDropbox) DirMove(src fs.Fs) error { - srcFs, ok := src.(*FsDropbox) +func (f *Fs) DirMove(src fs.Fs) error { + srcFs, ok := src.(*Fs) if !ok { fs.Debug(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove @@ -517,12 +526,12 @@ func (f *FsDropbox) DirMove(src fs.Fs) error { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectDropbox) Fs() fs.Fs { - return o.dropbox +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectDropbox) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -530,32 +539,32 @@ func (o *FsObjectDropbox) String() string { } // Remote returns the remote path -func (o *FsObjectDropbox) Remote() string { +func (o *Object) Remote() string { return o.remote } // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectDropbox) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { return "", nil } // Size returns the size of an object in bytes -func (o *FsObjectDropbox) Size() int64 { +func (o *Object) Size() int64 { return o.bytes } // setMetadataFromEntry sets the fs data from a dropbox.Entry // // This isn't a complete set of metadata and has an inacurate date -func (o *FsObjectDropbox) setMetadataFromEntry(info *dropbox.Entry) { +func (o *Object) setMetadataFromEntry(info *dropbox.Entry) { o.bytes = info.Bytes o.modTime = time.Time(info.ClientMtime) o.hasMetadata = true } // Reads the entry from dropbox -func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) { - entry, err := o.dropbox.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit) +func (o *Object) readEntry() (*dropbox.Entry, error) { + entry, err := o.fs.db.Metadata(o.remotePath(), false, false, "", "", metadataLimit) if err != nil { fs.Debug(o, "Error reading file: %s", err) return nil, fmt.Errorf("Error reading file: %s", err) @@ -564,7 +573,7 @@ func (o *FsObjectDropbox) readEntry() (*dropbox.Entry, error) { } // Read entry if not set and set metadata from it -func (o *FsObjectDropbox) readEntryAndSetMetadata() error { +func (o *Object) readEntryAndSetMetadata() error { // Last resort set time from client if !o.modTime.IsZero() { return nil @@ -578,8 +587,8 @@ func (o *FsObjectDropbox) readEntryAndSetMetadata() error { } // Returns the remote path for the object -func (o *FsObjectDropbox) remotePath() string { - return o.dropbox.slashRootSlash + o.remote +func (o *Object) remotePath() string { + return o.fs.slashRootSlash + o.remote } // Returns the key for the metadata database for a given path @@ -592,12 +601,12 @@ func metadataKey(path string) string { } // Returns the key for the metadata database -func (o *FsObjectDropbox) metadataKey() string { +func (o *Object) metadataKey() string { return metadataKey(o.remotePath()) } // readMetaData gets the info if it hasn't already been fetched -func (o *FsObjectDropbox) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if o.hasMetadata { return nil } @@ -609,7 +618,7 @@ func (o *FsObjectDropbox) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectDropbox) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) @@ -621,19 +630,19 @@ func (o *FsObjectDropbox) ModTime() time.Time { // SetModTime sets the modification time of the local fs object // // Commits the datastore -func (o *FsObjectDropbox) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { // FIXME not implemented return } // Storable returns whether this object is storable -func (o *FsObjectDropbox) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) { - in, _, err = o.dropbox.db.Download(o.remotePath(), "", 0) +func (o *Object) Open() (in io.ReadCloser, err error) { + in, _, err = o.fs.db.Download(o.remotePath(), "", 0) return } @@ -642,13 +651,13 @@ func (o *FsObjectDropbox) Open() (in io.ReadCloser, err error) { // Copy the reader into the object updating modTime and size // // The new object may have been created if an error is returned -func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { remote := o.remotePath() if ignoredFiles.MatchString(remote) { fs.ErrorLog(o, "File name disallowed - not uploading") return nil } - entry, err := o.dropbox.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "") + entry, err := o.fs.db.UploadByChunk(ioutil.NopCloser(in), int(uploadChunkSize), remote, true, "") if err != nil { return fmt.Errorf("Upload failed: %s", err) } @@ -657,17 +666,17 @@ func (o *FsObjectDropbox) Update(in io.Reader, modTime time.Time, size int64) er } // Remove an object -func (o *FsObjectDropbox) Remove() error { - _, err := o.dropbox.db.Delete(o.remotePath()) +func (o *Object) Remove() error { + _, err := o.fs.db.Delete(o.remotePath()) return err } // Check the interfaces are satisfied var ( - _ fs.Fs = (*FsDropbox)(nil) - _ fs.Copier = (*FsDropbox)(nil) - _ fs.Purger = (*FsDropbox)(nil) - _ fs.Mover = (*FsDropbox)(nil) - _ fs.DirMover = (*FsDropbox)(nil) - _ fs.Object = (*FsObjectDropbox)(nil) + _ fs.Fs = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Object = (*Object)(nil) ) diff --git a/dropbox/dropbox_test.go b/dropbox/dropbox_test.go index 677c92ee8..66b05672e 100644 --- a/dropbox/dropbox_test.go +++ b/dropbox/dropbox_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*dropbox.FsObjectDropbox)(nil)) + fstests.NilObject = fs.Object((*dropbox.Object)(nil)) fstests.RemoteName = "TestDropbox:" } diff --git a/fstest/fstests/gen_tests.go b/fstest/fstests/gen_tests.go index de137a4fb..a58070ae7 100644 --- a/fstest/fstests/gen_tests.go +++ b/fstest/fstests/gen_tests.go @@ -45,7 +45,6 @@ type Data struct { FsName string UpperFsName string TestName string - ObjectName string Fns []string } @@ -65,7 +64,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*{{ .FsName }}.{{ .ObjectName }})(nil)) + fstests.NilObject = fs.Object((*{{ .FsName }}.Object)(nil)) fstests.RemoteName = "{{ .TestName }}" } @@ -75,7 +74,7 @@ func init() { ` // Generate test file piping it through gofmt -func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName string) { +func generateTestProgram(t *template.Template, fns []string, Fsname string) { fsname := strings.ToLower(Fsname) TestName := "Test" + Fsname + ":" outfile := "../../" + fsname + "/" + fsname + "_test.go" @@ -89,7 +88,6 @@ func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName FsName: fsname, UpperFsName: Fsname, TestName: TestName, - ObjectName: ObjectName, Fns: fns, } @@ -126,13 +124,13 @@ func generateTestProgram(t *template.Template, fns []string, Fsname, ObjectName func main() { fns := findTestFunctions() t := template.Must(template.New("main").Parse(testProgram)) - generateTestProgram(t, fns, "Local", "FsObjectLocal") - generateTestProgram(t, fns, "Swift", "FsObjectSwift") - generateTestProgram(t, fns, "S3", "FsObjectS3") - generateTestProgram(t, fns, "Drive", "FsObjectDrive") - generateTestProgram(t, fns, "GoogleCloudStorage", "FsObjectStorage") - generateTestProgram(t, fns, "Dropbox", "FsObjectDropbox") - generateTestProgram(t, fns, "AmazonCloudDrive", "FsObjectAcd") - generateTestProgram(t, fns, "OneDrive", "Object") + generateTestProgram(t, fns, "Local") + generateTestProgram(t, fns, "Swift") + generateTestProgram(t, fns, "S3") + generateTestProgram(t, fns, "Drive") + generateTestProgram(t, fns, "GoogleCloudStorage") + generateTestProgram(t, fns, "Dropbox") + generateTestProgram(t, fns, "AmazonCloudDrive") + generateTestProgram(t, fns, "OneDrive") log.Printf("Done") } diff --git a/googlecloudstorage/googlecloudstorage.go b/googlecloudstorage/googlecloudstorage.go index 4829f2f23..86a054ee0 100644 --- a/googlecloudstorage/googlecloudstorage.go +++ b/googlecloudstorage/googlecloudstorage.go @@ -118,8 +118,8 @@ func init() { }) } -// FsStorage represents a remote storage server -type FsStorage struct { +// Fs represents a remote storage server +type Fs struct { name string // name of this remote svc *storage.Service // the connection to the storage server client *http.Client // authorized client @@ -130,35 +130,35 @@ type FsStorage struct { bucketAcl string // used when creating new buckets } -// FsObjectStorage describes a storage object +// Object describes a storage object // // Will definitely have info but maybe not meta -type FsObjectStorage struct { - storage *FsStorage // what this object is part of - remote string // The remote path - url string // download path - md5sum string // The MD5Sum of the object - bytes int64 // Bytes in the object - modTime time.Time // Modified time of the object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + url string // download path + md5sum string // The MD5Sum of the object + bytes int64 // Bytes in the object + modTime time.Time // Modified time of the object } // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsStorage) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsStorage) Root() string { +func (f *Fs) Root() string { if f.root == "" { return f.bucket } return f.bucket + "/" + f.root } -// String converts this FsStorage to a string -func (f *FsStorage) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { if f.root == "" { return fmt.Sprintf("Storage bucket %s", f.bucket) } @@ -180,7 +180,7 @@ func parsePath(path string) (bucket, directory string, err error) { return } -// NewFs contstructs an FsStorage from the path, bucket:path +// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { oAuthClient, err := oauthutil.NewClient(name, storageConfig) if err != nil { @@ -192,7 +192,7 @@ func NewFs(name, root string) (fs.Fs, error) { return nil, err } - f := &FsStorage{ + f := &Fs{ name: name, bucket: bucket, root: directory, @@ -237,10 +237,10 @@ func NewFs(name, root string) (fs.Fs, error) { // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object { - o := &FsObjectStorage{ - storage: f, - remote: remote, +func (f *Fs) newFsObjectWithInfo(remote string, info *storage.Object) fs.Object { + o := &Object{ + fs: f, + remote: remote, } if info != nil { o.setMetaData(info) @@ -257,14 +257,14 @@ func (f *FsStorage) newFsObjectWithInfo(remote string, info *storage.Object) fs. // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsStorage) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // list the objects into the function supplied // // If directories is set it only sends directories -func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) { +func (f *Fs) list(directories bool, fn func(string, *storage.Object)) { list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks) if directories { list = list.Delimiter("/") @@ -303,7 +303,7 @@ func (f *FsStorage) list(directories bool, fn func(string, *storage.Object)) { } // List walks the path returning a channel of FsObjects -func (f *FsStorage) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.bucket == "" { // Return no objects at top level list @@ -325,7 +325,7 @@ func (f *FsStorage) List() fs.ObjectsChan { } // ListDir lists the buckets -func (f *FsStorage) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.bucket == "" { // List the buckets @@ -379,14 +379,17 @@ func (f *FsStorage) ListDir() fs.DirChan { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *FsStorage) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { - // Temporary FsObject under construction - o := &FsObjectStorage{storage: f, remote: remote} +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { + // Temporary Object under construction + o := &Object{ + fs: f, + remote: remote, + } return o, o.Update(in, modTime, size) } // Mkdir creates the bucket if it doesn't exist -func (f *FsStorage) Mkdir() error { +func (f *Fs) Mkdir() error { _, err := f.svc.Buckets.Get(f.bucket).Do() if err == nil { // Bucket already exists @@ -408,12 +411,12 @@ func (f *FsStorage) Mkdir() error { // // Returns an error if it isn't empty: Error 409: The bucket you tried // to delete was not empty. -func (f *FsStorage) Rmdir() error { +func (f *Fs) Rmdir() error { return f.svc.Buckets.Delete(f.bucket).Do() } // Precision returns the precision -func (f *FsStorage) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return time.Nanosecond } @@ -426,18 +429,21 @@ func (f *FsStorage) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectStorage) +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - // Temporary FsObject under construction - dstObj := &FsObjectStorage{storage: f, remote: remote} + // Temporary Object under construction + dstObj := &Object{ + fs: f, + remote: remote, + } - srcBucket := srcObj.storage.bucket - srcObject := srcObj.storage.root + srcObj.remote + srcBucket := srcObj.fs.bucket + srcObject := srcObj.fs.root + srcObj.remote dstBucket := f.bucket dstObject := f.root + remote newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do() @@ -452,12 +458,12 @@ func (f *FsStorage) Copy(src fs.Object, remote string) (fs.Object, error) { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectStorage) Fs() fs.Fs { - return o.storage +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectStorage) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -465,22 +471,22 @@ func (o *FsObjectStorage) String() string { } // Remote returns the remote path -func (o *FsObjectStorage) Remote() string { +func (o *Object) Remote() string { return o.remote } // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectStorage) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { return o.md5sum, nil } // Size returns the size of an object in bytes -func (o *FsObjectStorage) Size() int64 { +func (o *Object) Size() int64 { return o.bytes } // setMetaData sets the fs data from a storage.Object -func (o *FsObjectStorage) setMetaData(info *storage.Object) { +func (o *Object) setMetaData(info *storage.Object) { o.url = info.MediaLink o.bytes = int64(info.Size) @@ -515,11 +521,11 @@ func (o *FsObjectStorage) setMetaData(info *storage.Object) { // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *FsObjectStorage) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if !o.modTime.IsZero() { return nil } - object, err := o.storage.svc.Objects.Get(o.storage.bucket, o.storage.root+o.remote).Do() + object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do() if err != nil { fs.Debug(o, "Failed to read info: %s", err) return err @@ -532,7 +538,7 @@ func (o *FsObjectStorage) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectStorage) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { // fs.Log(o, "Failed to read metadata: %s", err) @@ -549,14 +555,14 @@ func metadataFromModTime(modTime time.Time) map[string]string { } // SetModTime sets the modification time of the local fs object -func (o *FsObjectStorage) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { // This only adds metadata so will perserve other metadata object := storage.Object{ - Bucket: o.storage.bucket, - Name: o.storage.root + o.remote, + Bucket: o.fs.bucket, + Name: o.fs.root + o.remote, Metadata: metadataFromModTime(modTime), } - newObject, err := o.storage.svc.Objects.Patch(o.storage.bucket, o.storage.root+o.remote, &object).Do() + newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do() if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) @@ -565,12 +571,12 @@ func (o *FsObjectStorage) SetModTime(modTime time.Time) { } // Storable returns a boolean as to whether this object is storable -func (o *FsObjectStorage) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) { +func (o *Object) Open() (in io.ReadCloser, err error) { // This is slightly complicated by Go here insisting on // decoding the %2F in URLs into / which is legal in http, but // unfortunately not what the storage server wants. @@ -586,7 +592,7 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) { // alter any hex-escaped characters googleapi.SetOpaque(req.URL) req.Header.Set("User-Agent", fs.UserAgent) - res, err := o.storage.client.Do(req) + res, err := o.fs.client.Do(req) if err != nil { return nil, err } @@ -600,16 +606,16 @@ func (o *FsObjectStorage) Open() (in io.ReadCloser, err error) { // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { object := storage.Object{ - Bucket: o.storage.bucket, - Name: o.storage.root + o.remote, + Bucket: o.fs.bucket, + Name: o.fs.root + o.remote, ContentType: fs.MimeType(o), Size: uint64(size), Updated: modTime.Format(timeFormatOut), // Doesn't get set Metadata: metadataFromModTime(modTime), } - newObject, err := o.storage.svc.Objects.Insert(o.storage.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.storage.objectAcl).Do() + newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in).Name(object.Name).PredefinedAcl(o.fs.objectAcl).Do() if err != nil { return err } @@ -619,11 +625,13 @@ func (o *FsObjectStorage) Update(in io.Reader, modTime time.Time, size int64) er } // Remove an object -func (o *FsObjectStorage) Remove() error { - return o.storage.svc.Objects.Delete(o.storage.bucket, o.storage.root+o.remote).Do() +func (o *Object) Remove() error { + return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do() } // Check the interfaces are satisfied -var _ fs.Fs = &FsStorage{} -var _ fs.Copier = &FsStorage{} -var _ fs.Object = &FsObjectStorage{} +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.Object = &Object{} +) diff --git a/googlecloudstorage/googlecloudstorage_test.go b/googlecloudstorage/googlecloudstorage_test.go index 6fb3a29c2..2d0f45e59 100644 --- a/googlecloudstorage/googlecloudstorage_test.go +++ b/googlecloudstorage/googlecloudstorage_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*googlecloudstorage.FsObjectStorage)(nil)) + fstests.NilObject = fs.Object((*googlecloudstorage.Object)(nil)) fstests.RemoteName = "TestGoogleCloudStorage:" } diff --git a/local/local.go b/local/local.go index 56d31d130..7fc1b10fb 100644 --- a/local/local.go +++ b/local/local.go @@ -28,8 +28,8 @@ func init() { }) } -// FsLocal represents a local filesystem rooted at root -type FsLocal struct { +// Fs represents a local filesystem rooted at root +type Fs struct { name string // the name of the remote root string // The root directory precisionOk sync.Once // Whether we need to read the precision @@ -37,9 +37,9 @@ type FsLocal struct { warned map[string]struct{} // whether we have warned about this string } -// FsObjectLocal represents a local filesystem object -type FsObjectLocal struct { - local *FsLocal // The Fs this object is part of +// Object represents a local filesystem object +type Object struct { + fs *Fs // The Fs this object is part of remote string // The remote path path string // The local path info os.FileInfo // Interface for file info (always present) @@ -48,11 +48,11 @@ type FsObjectLocal struct { // ------------------------------------------------------------ -// NewFs constructs an FsLocal from the path +// NewFs constructs an Fs from the path func NewFs(name, root string) (fs.Fs, error) { var err error - f := &FsLocal{ + f := &Fs{ name: name, warned: make(map[string]struct{}), } @@ -72,31 +72,35 @@ func NewFs(name, root string) (fs.Fs, error) { } // Name of the remote (as passed into NewFs) -func (f *FsLocal) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsLocal) Root() string { +func (f *Fs) Root() string { return f.root } -// String converts this FsLocal to a string -func (f *FsLocal) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { return fmt.Sprintf("Local file system at %s", f.root) } -// newFsObject makes a half completed FsObjectLocal -func (f *FsLocal) newFsObject(remote string) *FsObjectLocal { +// newFsObject makes a half completed Object +func (f *Fs) newFsObject(remote string) *Object { remote = filepath.ToSlash(remote) dstPath := filterPath(filepath.Join(f.root, f.cleanUtf8(remote))) - return &FsObjectLocal{local: f, remote: remote, path: dstPath} + return &Object{ + fs: f, + remote: remote, + path: dstPath, + } } // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object { +func (f *Fs) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object { o := f.newFsObject(remote) if info != nil { o.info = info @@ -113,14 +117,14 @@ func (f *FsLocal) newFsObjectWithInfo(remote string, info os.FileInfo) fs.Object // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsLocal) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // List the path returning a channel of FsObjects // // Ignores everything which isn't Storable, eg links etc -func (f *FsLocal) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) go func() { err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error { @@ -158,7 +162,7 @@ func (f *FsLocal) List() fs.ObjectsChan { // CleanUtf8 makes string a valid UTF-8 string // // Any invalid UTF-8 characters will be replaced with utf8.RuneError -func (f *FsLocal) cleanUtf8(name string) string { +func (f *Fs) cleanUtf8(name string) string { if !utf8.ValidString(name) { if _, ok := f.warned[name]; !ok { fs.Debug(f, "Replacing invalid UTF-8 characters in %q", name) @@ -173,7 +177,7 @@ func (f *FsLocal) cleanUtf8(name string) string { } // ListDir walks the path returning a channel of FsObjects -func (f *FsLocal) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) go func() { defer close(out) @@ -216,7 +220,7 @@ func (f *FsLocal) ListDir() fs.DirChan { } // Put the FsObject to the local filesystem -func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { // Temporary FsObject under construction - info filled in by Update() o := f.newFsObject(remote) err := o.Update(in, modTime, size) @@ -227,7 +231,7 @@ func (f *FsLocal) Put(in io.Reader, remote string, modTime time.Time, size int64 } // Mkdir creates the directory if it doesn't exist -func (f *FsLocal) Mkdir() error { +func (f *Fs) Mkdir() error { // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go return os.MkdirAll(f.root, 0777) } @@ -235,12 +239,12 @@ func (f *FsLocal) Mkdir() error { // Rmdir removes the directory // // If it isn't empty it will return an error -func (f *FsLocal) Rmdir() error { +func (f *Fs) Rmdir() error { return os.Remove(f.root) } // Precision of the file system -func (f *FsLocal) Precision() (precision time.Duration) { +func (f *Fs) Precision() (precision time.Duration) { f.precisionOk.Do(func() { f.precision = f.readPrecision() }) @@ -248,7 +252,7 @@ func (f *FsLocal) Precision() (precision time.Duration) { } // Read the precision -func (f *FsLocal) readPrecision() (precision time.Duration) { +func (f *Fs) readPrecision() (precision time.Duration) { // Default precision of 1s precision = time.Second @@ -304,7 +308,7 @@ func (f *FsLocal) readPrecision() (precision time.Duration) { // Optional interface: Only implement this if you have a way of // deleting all the files quicker than just running Remove() on the // result of List() -func (f *FsLocal) Purge() error { +func (f *Fs) Purge() error { fi, err := os.Lstat(f.root) if err != nil { return err @@ -324,8 +328,8 @@ func (f *FsLocal) Purge() error { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantMove -func (f *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectLocal) +func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove @@ -374,8 +378,8 @@ func (f *FsLocal) Move(src fs.Object, remote string) (fs.Object, error) { // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists -func (f *FsLocal) DirMove(src fs.Fs) error { - srcFs, ok := src.(*FsLocal) +func (f *Fs) DirMove(src fs.Fs) error { + srcFs, ok := src.(*Fs) if !ok { fs.Debug(srcFs, "Can't move directory - not same remote type") return fs.ErrorCantDirMove @@ -403,12 +407,12 @@ func (f *FsLocal) DirMove(src fs.Fs) error { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectLocal) Fs() fs.Fs { - return o.local +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectLocal) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -416,12 +420,12 @@ func (o *FsObjectLocal) String() string { } // Remote returns the remote path -func (o *FsObjectLocal) Remote() string { - return o.local.cleanUtf8(o.remote) +func (o *Object) Remote() string { + return o.fs.cleanUtf8(o.remote) } // Md5sum calculates the Md5sum of a file returning a lowercase hex string -func (o *FsObjectLocal) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { if o.md5sum != "" { return o.md5sum, nil } @@ -449,17 +453,17 @@ func (o *FsObjectLocal) Md5sum() (string, error) { } // Size returns the size of an object in bytes -func (o *FsObjectLocal) Size() int64 { +func (o *Object) Size() int64 { return o.info.Size() } // ModTime returns the modification time of the object -func (o *FsObjectLocal) ModTime() time.Time { +func (o *Object) ModTime() time.Time { return o.info.ModTime() } // SetModTime sets the modification time of the local fs object -func (o *FsObjectLocal) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { err := os.Chtimes(o.path, modTime, modTime) if err != nil { fs.Debug(o, "Failed to set mtime on file: %s", err) @@ -474,7 +478,7 @@ func (o *FsObjectLocal) SetModTime(modTime time.Time) { } // Storable returns a boolean showing if this object is storable -func (o *FsObjectLocal) Storable() bool { +func (o *Object) Storable() bool { mode := o.info.Mode() if mode&(os.ModeSymlink|os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { fs.Debug(o, "Can't transfer non file/directory") @@ -489,9 +493,9 @@ func (o *FsObjectLocal) Storable() bool { // localOpenFile wraps an io.ReadCloser and updates the md5sum of the // object that is read type localOpenFile struct { - o *FsObjectLocal // object that is open - in io.ReadCloser // handle we are wrapping - hash hash.Hash // currently accumulating MD5 + o *Object // object that is open + in io.ReadCloser // handle we are wrapping + hash hash.Hash // currently accumulating MD5 } // Read bytes from the object - see io.Reader @@ -516,7 +520,7 @@ func (file *localOpenFile) Close() (err error) { } // Open an object for read -func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) { +func (o *Object) Open() (in io.ReadCloser, err error) { in, err = os.Open(o.path) if err != nil { return @@ -531,13 +535,13 @@ func (o *FsObjectLocal) Open() (in io.ReadCloser, err error) { } // mkdirAll makes all the directories needed to store the object -func (o *FsObjectLocal) mkdirAll() error { +func (o *Object) mkdirAll() error { dir, _ := getDirFile(o.path) return os.MkdirAll(dir, 0777) } // Update the object from in with modTime and size -func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { err := o.mkdirAll() if err != nil { return err @@ -572,14 +576,14 @@ func (o *FsObjectLocal) Update(in io.Reader, modTime time.Time, size int64) erro } // Stat a FsObject into info -func (o *FsObjectLocal) lstat() error { +func (o *Object) lstat() error { info, err := os.Lstat(o.path) o.info = info return err } // Remove an object -func (o *FsObjectLocal) Remove() error { +func (o *Object) Remove() error { return os.Remove(o.path) } @@ -642,7 +646,7 @@ func uncPath(s string) string { } // cleanWindowsName will clean invalid Windows characters -func cleanWindowsName(f *FsLocal, name string) string { +func cleanWindowsName(f *Fs, name string) string { original := name var name2 string if strings.HasPrefix(name, `\\?\`) { @@ -679,8 +683,10 @@ func cleanWindowsName(f *FsLocal, name string) string { } // Check the interfaces are satisfied -var _ fs.Fs = &FsLocal{} -var _ fs.Purger = &FsLocal{} -var _ fs.Mover = &FsLocal{} -var _ fs.DirMover = &FsLocal{} -var _ fs.Object = &FsObjectLocal{} +var ( + _ fs.Fs = &Fs{} + _ fs.Purger = &Fs{} + _ fs.Mover = &Fs{} + _ fs.DirMover = &Fs{} + _ fs.Object = &Object{} +) diff --git a/local/local_test.go b/local/local_test.go index 82c98a3e1..68f176f90 100644 --- a/local/local_test.go +++ b/local/local_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*local.FsObjectLocal)(nil)) + fstests.NilObject = fs.Object((*local.Object)(nil)) fstests.RemoteName = "" } diff --git a/local/tests_test.go b/local/tests_test.go index 7b2c6f8d1..9a2fe9176 100644 --- a/local/tests_test.go +++ b/local/tests_test.go @@ -58,7 +58,7 @@ var utf8Tests = [][2]string{ } func TestCleanUtf8(t *testing.T) { - f := &FsLocal{} + f := &Fs{} f.warned = make(map[string]struct{}) for _, test := range utf8Tests { got := f.cleanUtf8(test[0]) diff --git a/s3/s3.go b/s3/s3.go index b236b934e..fc97c7de5 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -128,8 +128,8 @@ const ( maxRetries = 10 // number of retries to make of operations ) -// FsS3 represents a remote s3 server -type FsS3 struct { +// Fs represents a remote s3 server +type Fs struct { name string // the name of the remote c *s3.S3 // the connection to the s3 server ses *session.Session // the s3 session @@ -139,13 +139,13 @@ type FsS3 struct { locationConstraint string // location constraint of new buckets } -// FsObjectS3 describes a s3 object -type FsObjectS3 struct { +// Object describes a s3 object +type Object struct { // Will definitely have everything but meta which may be nil // // List will read everything but meta - to fill that in need to call // readMetaData - s3 *FsS3 // what this object is part of + fs *Fs // what this object is part of remote string // The remote path etag string // md5sum of the object bytes int64 // size of the object @@ -156,20 +156,20 @@ type FsObjectS3 struct { // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsS3) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsS3) Root() string { +func (f *Fs) Root() string { if f.root == "" { return f.bucket } return f.bucket + "/" + f.root } -// String converts this FsS3 to a string -func (f *FsS3) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { if f.root == "" { return fmt.Sprintf("S3 bucket %s", f.bucket) } @@ -247,7 +247,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) { return c, ses, nil } -// NewFs contstructs an FsS3 from the path, bucket:path +// NewFs contstructs an Fs from the path, bucket:path func NewFs(name, root string) (fs.Fs, error) { bucket, directory, err := s3ParsePath(root) if err != nil { @@ -257,7 +257,7 @@ func NewFs(name, root string) (fs.Fs, error) { if err != nil { return nil, err } - f := &FsS3{ + f := &Fs{ name: name, c: c, bucket: bucket, @@ -294,9 +294,9 @@ func NewFs(name, root string) (fs.Fs, error) { // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object { - o := &FsObjectS3{ - s3: f, +func (f *Fs) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object { + o := &Object{ + fs: f, remote: remote, } if info != nil { @@ -322,14 +322,14 @@ func (f *FsS3) newFsObjectWithInfo(remote string, info *s3.Object) fs.Object { // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsS3) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } // list the objects into the function supplied // // If directories is set it only sends directories -func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) { +func (f *Fs) list(directories bool, fn func(string, *s3.Object)) { maxKeys := int64(listChunkSize) delimiter := "" if directories { @@ -394,7 +394,7 @@ func (f *FsS3) list(directories bool, fn func(string, *s3.Object)) { } // List walks the path returning a channel of FsObjects -func (f *FsS3) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.bucket == "" { // Return no objects at top level list @@ -415,7 +415,7 @@ func (f *FsS3) List() fs.ObjectsChan { } // ListDir lists the buckets -func (f *FsS3) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.bucket == "" { // List the buckets @@ -458,14 +458,17 @@ func (f *FsS3) ListDir() fs.DirChan { } // Put the FsObject into the bucket -func (f *FsS3) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { - // Temporary FsObject under construction - fs := &FsObjectS3{s3: f, remote: remote} +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { + // Temporary Object under construction + fs := &Object{ + fs: f, + remote: remote, + } return fs, fs.Update(in, modTime, size) } // Mkdir creates the bucket if it doesn't exist -func (f *FsS3) Mkdir() error { +func (f *Fs) Mkdir() error { req := s3.CreateBucketInput{ Bucket: &f.bucket, ACL: &f.perm, @@ -487,7 +490,7 @@ func (f *FsS3) Mkdir() error { // Rmdir deletes the bucket // // Returns an error if it isn't empty -func (f *FsS3) Rmdir() error { +func (f *Fs) Rmdir() error { req := s3.DeleteBucketInput{ Bucket: &f.bucket, } @@ -496,7 +499,7 @@ func (f *FsS3) Rmdir() error { } // Precision of the remote -func (f *FsS3) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return time.Nanosecond } @@ -509,13 +512,13 @@ func (f *FsS3) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectS3) +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - srcFs := srcObj.s3 + srcFs := srcObj.fs key := f.root + remote source := srcFs.bucket + "/" + srcFs.root + srcObj.remote req := s3.CopyObjectInput{ @@ -534,12 +537,12 @@ func (f *FsS3) Copy(src fs.Object, remote string) (fs.Object, error) { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectS3) Fs() fs.Fs { - return o.s3 +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectS3) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -547,14 +550,14 @@ func (o *FsObjectS3) String() string { } // Remote returns the remote path -func (o *FsObjectS3) Remote() string { +func (o *Object) Remote() string { return o.remote } var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectS3) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { etag := strings.Trim(strings.ToLower(o.etag), `"`) // Check the etag is a valid md5sum if !matchMd5.MatchString(etag) { @@ -565,23 +568,23 @@ func (o *FsObjectS3) Md5sum() (string, error) { } // Size returns the size of an object in bytes -func (o *FsObjectS3) Size() int64 { +func (o *Object) Size() int64 { return o.bytes } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *FsObjectS3) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if o.meta != nil { return nil } - key := o.s3.root + o.remote + key := o.fs.root + o.remote req := s3.HeadObjectInput{ - Bucket: &o.s3.bucket, + Bucket: &o.fs.bucket, Key: &key, } - resp, err := o.s3.c.HeadObject(&req) + resp, err := o.fs.c.HeadObject(&req) if err != nil { fs.Debug(o, "Failed to read info: %s", err) return err @@ -608,7 +611,7 @@ func (o *FsObjectS3) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectS3) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Log(o, "Failed to read metadata: %s", err) @@ -629,7 +632,7 @@ func (o *FsObjectS3) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *FsObjectS3) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() @@ -639,18 +642,18 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) { o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) // Copy the object to itself to update the metadata - key := o.s3.root + o.remote - sourceKey := o.s3.bucket + "/" + key + key := o.fs.root + o.remote + sourceKey := o.fs.bucket + "/" + key directive := s3.MetadataDirectiveReplace // replace metadata with that passed in req := s3.CopyObjectInput{ - Bucket: &o.s3.bucket, - ACL: &o.s3.perm, + Bucket: &o.fs.bucket, + ACL: &o.fs.perm, Key: &key, CopySource: &sourceKey, Metadata: o.meta, MetadataDirective: &directive, } - _, err = o.s3.c.CopyObject(&req) + _, err = o.fs.c.CopyObject(&req) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) @@ -658,18 +661,18 @@ func (o *FsObjectS3) SetModTime(modTime time.Time) { } // Storable raturns a boolean indicating if this object is storable -func (o *FsObjectS3) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectS3) Open() (in io.ReadCloser, err error) { - key := o.s3.root + o.remote +func (o *Object) Open() (in io.ReadCloser, err error) { + key := o.fs.root + o.remote req := s3.GetObjectInput{ - Bucket: &o.s3.bucket, + Bucket: &o.fs.bucket, Key: &key, } - resp, err := o.s3.c.GetObject(&req) + resp, err := o.fs.c.GetObject(&req) if err != nil { return nil, err } @@ -677,11 +680,11 @@ func (o *FsObjectS3) Open() (in io.ReadCloser, err error) { } // Update the Object from in with modTime and size -func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error { - uploader := s3manager.NewUploader(o.s3.ses, func(u *s3manager.Uploader) { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { + uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) { u.Concurrency = 2 u.LeavePartsOnError = false - u.S3 = o.s3.c + u.S3 = o.fs.c }) // Set the mtime in the meta data @@ -692,10 +695,10 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error { // Guess the content type contentType := fs.MimeType(o) - key := o.s3.root + o.remote + key := o.fs.root + o.remote req := s3manager.UploadInput{ - Bucket: &o.s3.bucket, - ACL: &o.s3.perm, + Bucket: &o.fs.bucket, + ACL: &o.fs.perm, Key: &key, Body: in, ContentType: &contentType, @@ -714,17 +717,19 @@ func (o *FsObjectS3) Update(in io.Reader, modTime time.Time, size int64) error { } // Remove an object -func (o *FsObjectS3) Remove() error { - key := o.s3.root + o.remote +func (o *Object) Remove() error { + key := o.fs.root + o.remote req := s3.DeleteObjectInput{ - Bucket: &o.s3.bucket, + Bucket: &o.fs.bucket, Key: &key, } - _, err := o.s3.c.DeleteObject(&req) + _, err := o.fs.c.DeleteObject(&req) return err } // Check the interfaces are satisfied -var _ fs.Fs = &FsS3{} -var _ fs.Copier = &FsS3{} -var _ fs.Object = &FsObjectS3{} +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.Object = &Object{} +) diff --git a/s3/s3_test.go b/s3/s3_test.go index 4d928ec40..f73440e03 100644 --- a/s3/s3_test.go +++ b/s3/s3_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*s3.FsObjectS3)(nil)) + fstests.NilObject = fs.Object((*s3.Object)(nil)) fstests.RemoteName = "TestS3:" } diff --git a/swift/swift.go b/swift/swift.go index 6c4d55852..e5d377ae5 100644 --- a/swift/swift.go +++ b/swift/swift.go @@ -65,8 +65,8 @@ func init() { pflag.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.") } -// FsSwift represents a remote swift server -type FsSwift struct { +// Fs represents a remote swift server +type Fs struct { name string // name of this remote c swift.Connection // the connection to the swift server container string // the container we are working on @@ -74,11 +74,11 @@ type FsSwift struct { root string // the path we are working on if any } -// FsObjectSwift describes a swift object +// Object describes a swift object // // Will definitely have info but maybe not meta -type FsObjectSwift struct { - swift *FsSwift // what this object is part of +type Object struct { + fs *Fs // what this object is part of remote string // The remote path info swift.Object // Info from the swift object if known headers *swift.Headers // The object headers if known @@ -87,20 +87,20 @@ type FsObjectSwift struct { // ------------------------------------------------------------ // Name of the remote (as passed into NewFs) -func (f *FsSwift) Name() string { +func (f *Fs) Name() string { return f.name } // Root of the remote (as passed into NewFs) -func (f *FsSwift) Root() string { +func (f *Fs) Root() string { if f.root == "" { return f.container } return f.container + "/" + f.root } -// String converts this FsSwift to a string -func (f *FsSwift) String() string { +// String converts this Fs to a string +func (f *Fs) String() string { if f.root == "" { return fmt.Sprintf("Swift container %s", f.container) } @@ -154,7 +154,7 @@ func swiftConnection(name string) (*swift.Connection, error) { return c, nil } -// NewFs contstructs an FsSwift from the path, container:path +// NewFs contstructs an Fs from the path, container:path func NewFs(name, root string) (fs.Fs, error) { container, directory, err := parsePath(root) if err != nil { @@ -164,7 +164,7 @@ func NewFs(name, root string) (fs.Fs, error) { if err != nil { return nil, err } - f := &FsSwift{ + f := &Fs{ name: name, c: *c, container: container, @@ -194,9 +194,9 @@ func NewFs(name, root string) (fs.Fs, error) { // Return an FsObject from a path // // May return nil if an error occurred -func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object { - o := &FsObjectSwift{ - swift: f, +func (f *Fs) newFsObjectWithInfo(remote string, info *swift.Object) fs.Object { + o := &Object{ + fs: f, remote: remote, } if info != nil { @@ -215,7 +215,7 @@ func (f *FsSwift) newFsObjectWithInfo(remote string, info *swift.Object) fs.Obje // NewFsObject returns an FsObject from a path // // May return nil if an error occurred -func (f *FsSwift) NewFsObject(remote string) fs.Object { +func (f *Fs) NewFsObject(remote string) fs.Object { return f.newFsObjectWithInfo(remote, nil) } @@ -226,7 +226,7 @@ type listFn func(string, *swift.Object) error // the container and root supplied // // If directories is set it only sends directories -func (f *FsSwift) listContainerRoot(container, root string, directories bool, fn listFn) error { +func (f *Fs) listContainerRoot(container, root string, directories bool, fn listFn) error { // Options for ObjectsWalk opts := swift.ObjectsOpts{ Prefix: root, @@ -266,7 +266,7 @@ func (f *FsSwift) listContainerRoot(container, root string, directories bool, fn // list the objects into the function supplied // // If directories is set it only sends directories -func (f *FsSwift) list(directories bool, fn listFn) { +func (f *Fs) list(directories bool, fn listFn) { err := f.listContainerRoot(f.container, f.root, directories, fn) if err != nil { fs.Stats.Error() @@ -275,7 +275,7 @@ func (f *FsSwift) list(directories bool, fn listFn) { } // List walks the path returning a channel of FsObjects -func (f *FsSwift) List() fs.ObjectsChan { +func (f *Fs) List() fs.ObjectsChan { out := make(fs.ObjectsChan, fs.Config.Checkers) if f.container == "" { // Return no objects at top level list @@ -290,7 +290,7 @@ func (f *FsSwift) List() fs.ObjectsChan { if o := f.newFsObjectWithInfo(remote, object); o != nil { // Do full metadata read on 0 size objects which might be manifest files if o.Size() == 0 { - err := o.(*FsObjectSwift).readMetaData() + err := o.(*Object).readMetaData() if err != nil { fs.Debug(o, "Failed to read metadata: %v", err) } @@ -305,7 +305,7 @@ func (f *FsSwift) List() fs.ObjectsChan { } // ListDir lists the containers -func (f *FsSwift) ListDir() fs.DirChan { +func (f *Fs) ListDir() fs.DirChan { out := make(fs.DirChan, fs.Config.Checkers) if f.container == "" { // List the containers @@ -347,26 +347,29 @@ func (f *FsSwift) ListDir() fs.DirChan { // Copy the reader in to the new object which is returned // // The new object may have been created if an error is returned -func (f *FsSwift) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { - // Temporary FsObject under construction - fs := &FsObjectSwift{swift: f, remote: remote} +func (f *Fs) Put(in io.Reader, remote string, modTime time.Time, size int64) (fs.Object, error) { + // Temporary Object under construction + fs := &Object{ + fs: f, + remote: remote, + } return fs, fs.Update(in, modTime, size) } // Mkdir creates the container if it doesn't exist -func (f *FsSwift) Mkdir() error { +func (f *Fs) Mkdir() error { return f.c.ContainerCreate(f.container, nil) } // Rmdir deletes the container // // Returns an error if it isn't empty -func (f *FsSwift) Rmdir() error { +func (f *Fs) Rmdir() error { return f.c.ContainerDelete(f.container) } // Precision of the remote -func (f *FsSwift) Precision() time.Duration { +func (f *Fs) Precision() time.Duration { return time.Nanosecond } @@ -379,13 +382,13 @@ func (f *FsSwift) Precision() time.Duration { // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantCopy -func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) { - srcObj, ok := src.(*FsObjectSwift) +func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) if !ok { fs.Debug(src, "Can't copy - not same remote type") return nil, fs.ErrorCantCopy } - srcFs := srcObj.swift + srcFs := srcObj.fs _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil) if err != nil { return nil, err @@ -396,12 +399,12 @@ func (f *FsSwift) Copy(src fs.Object, remote string) (fs.Object, error) { // ------------------------------------------------------------ // Fs returns the parent Fs -func (o *FsObjectSwift) Fs() fs.Fs { - return o.swift +func (o *Object) Fs() fs.Fs { + return o.fs } // Return a string version -func (o *FsObjectSwift) String() string { +func (o *Object) String() string { if o == nil { return "" } @@ -409,12 +412,12 @@ func (o *FsObjectSwift) String() string { } // Remote returns the remote path -func (o *FsObjectSwift) Remote() string { +func (o *Object) Remote() string { return o.remote } // Md5sum returns the Md5sum of an object returning a lowercase hex string -func (o *FsObjectSwift) Md5sum() (string, error) { +func (o *Object) Md5sum() (string, error) { isManifest, err := o.isManifestFile() if err != nil { return "", err @@ -427,7 +430,7 @@ func (o *FsObjectSwift) Md5sum() (string, error) { } // isManifestFile checks for manifest header -func (o *FsObjectSwift) isManifestFile() (bool, error) { +func (o *Object) isManifestFile() (bool, error) { err := o.readMetaData() if err != nil { if err == swift.ObjectNotFound { @@ -440,18 +443,18 @@ func (o *FsObjectSwift) isManifestFile() (bool, error) { } // Size returns the size of an object in bytes -func (o *FsObjectSwift) Size() int64 { +func (o *Object) Size() int64 { return o.info.Bytes } // readMetaData gets the metadata if it hasn't already been fetched // // it also sets the info -func (o *FsObjectSwift) readMetaData() (err error) { +func (o *Object) readMetaData() (err error) { if o.headers != nil { return nil } - info, h, err := o.swift.c.Object(o.swift.container, o.swift.root+o.remote) + info, h, err := o.fs.c.Object(o.fs.container, o.fs.root+o.remote) if err != nil { return err } @@ -465,7 +468,7 @@ func (o *FsObjectSwift) readMetaData() (err error) { // // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers -func (o *FsObjectSwift) ModTime() time.Time { +func (o *Object) ModTime() time.Time { err := o.readMetaData() if err != nil { fs.Debug(o, "Failed to read metadata: %s", err) @@ -480,7 +483,7 @@ func (o *FsObjectSwift) ModTime() time.Time { } // SetModTime sets the modification time of the local fs object -func (o *FsObjectSwift) SetModTime(modTime time.Time) { +func (o *Object) SetModTime(modTime time.Time) { err := o.readMetaData() if err != nil { fs.Stats.Error() @@ -493,7 +496,7 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) { for k, v := range newHeaders { (*o.headers)[k] = v } - err = o.swift.c.ObjectUpdate(o.swift.container, o.swift.root+o.remote, newHeaders) + err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders) if err != nil { fs.Stats.Error() fs.ErrorLog(o, "Failed to update remote mtime: %s", err) @@ -501,13 +504,13 @@ func (o *FsObjectSwift) SetModTime(modTime time.Time) { } // Storable returns if this object is storable -func (o *FsObjectSwift) Storable() bool { +func (o *Object) Storable() bool { return true } // Open an object for read -func (o *FsObjectSwift) Open() (in io.ReadCloser, err error) { - in, _, err = o.swift.c.ObjectOpen(o.swift.container, o.swift.root+o.remote, true, nil) +func (o *Object) Open() (in io.ReadCloser, err error) { + in, _, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, true, nil) return } @@ -522,33 +525,33 @@ func min(x, y int64) int64 { // removeSegments removes any old segments from o // // if except is passed in then segments with that prefix won't be deleted -func (o *FsObjectSwift) removeSegments(except string) error { - segmentsRoot := o.swift.root + o.remote + "/" - err := o.swift.listContainerRoot(o.swift.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error { +func (o *Object) removeSegments(except string) error { + segmentsRoot := o.fs.root + o.remote + "/" + err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error { if except != "" && strings.HasPrefix(remote, except) { - // fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.swift.segmentsContainer) + // fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer) return nil } segmentPath := segmentsRoot + remote - fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.swift.segmentsContainer) - return o.swift.c.ObjectDelete(o.swift.segmentsContainer, segmentPath) + fs.Debug(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer) + return o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath) }) if err != nil { return err } // remove the segments container if empty, ignore errors - err = o.swift.c.ContainerDelete(o.swift.segmentsContainer) + err = o.fs.c.ContainerDelete(o.fs.segmentsContainer) if err == nil { - fs.Debug(o, "Removed empty container %q", o.swift.segmentsContainer) + fs.Debug(o, "Removed empty container %q", o.fs.segmentsContainer) } return nil } // updateChunks updates the existing object using chunks to a separate // container. It returns a string which prefixes current segments. -func (o *FsObjectSwift) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) { +func (o *Object) updateChunks(in io.Reader, headers swift.Headers, size int64) (string, error) { // Create the segmentsContainer if it doesn't exist - err := o.swift.c.ContainerCreate(o.swift.segmentsContainer, nil) + err := o.fs.c.ContainerCreate(o.fs.segmentsContainer, nil) if err != nil { return "", err } @@ -556,14 +559,14 @@ func (o *FsObjectSwift) updateChunks(in io.Reader, headers swift.Headers, size i left := size i := 0 uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size) - segmentsPath := fmt.Sprintf("%s%s/%s", o.swift.root, o.remote, uniquePrefix) + segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix) for left > 0 { n := min(left, int64(chunkSize)) headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it segmentReader := io.LimitReader(in, n) segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i) - fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.swift.segmentsContainer) - _, err := o.swift.c.ObjectPut(o.swift.segmentsContainer, segmentPath, segmentReader, true, "", "", headers) + fs.Debug(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer) + _, err := o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers) if err != nil { return "", err } @@ -571,18 +574,18 @@ func (o *FsObjectSwift) updateChunks(in io.Reader, headers swift.Headers, size i i++ } // Upload the manifest - headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.swift.segmentsContainer, segmentsPath) + headers["X-Object-Manifest"] = fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath) headers["Content-Length"] = "0" // set Content-Length as we know it emptyReader := bytes.NewReader(nil) - manifestName := o.swift.root + o.remote - _, err = o.swift.c.ObjectPut(o.swift.container, manifestName, emptyReader, true, "", "", headers) + manifestName := o.fs.root + o.remote + _, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", "", headers) return uniquePrefix + "/", err } // Update the object with the contents of the io.Reader, modTime and size // // The new object may have been created if an error is returned -func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) error { +func (o *Object) Update(in io.Reader, modTime time.Time, size int64) error { // Note whether this has a manifest before starting isManifest, err := o.isManifestFile() if err != nil { @@ -601,7 +604,7 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro } } else { headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length as we know it - _, err := o.swift.c.ObjectPut(o.swift.container, o.swift.root+o.remote, in, true, "", "", headers) + _, err := o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", "", headers) if err != nil { return err } @@ -621,13 +624,13 @@ func (o *FsObjectSwift) Update(in io.Reader, modTime time.Time, size int64) erro } // Remove an object -func (o *FsObjectSwift) Remove() error { +func (o *Object) Remove() error { isManifestFile, err := o.isManifestFile() if err != nil { return err } // Remove file/manifest first - err = o.swift.c.ObjectDelete(o.swift.container, o.swift.root+o.remote) + err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote) if err != nil { return err } @@ -642,6 +645,8 @@ func (o *FsObjectSwift) Remove() error { } // Check the interfaces are satisfied -var _ fs.Fs = &FsSwift{} -var _ fs.Copier = &FsSwift{} -var _ fs.Object = &FsObjectSwift{} +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.Object = &Object{} +) diff --git a/swift/swift_test.go b/swift/swift_test.go index 739b86c24..5351449ca 100644 --- a/swift/swift_test.go +++ b/swift/swift_test.go @@ -13,7 +13,7 @@ import ( ) func init() { - fstests.NilObject = fs.Object((*swift.FsObjectSwift)(nil)) + fstests.NilObject = fs.Object((*swift.Object)(nil)) fstests.RemoteName = "TestSwift:" }