mirror of
https://github.com/rclone/rclone.git
synced 2024-11-28 19:34:55 +01:00
Add context propagation to rclone
- Change rclone/fs interfaces to accept context.Context - Update interface implementations to use context.Context - Change top level usage to propagate context to lover level functions Context propagation is needed for stopping transfers and passing other request-scoped values.
This commit is contained in:
parent
a2c317b46e
commit
f78cd1e043
@ -1,6 +1,7 @@
|
|||||||
package alias
|
package alias
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) {
|
|||||||
prepare(t, remoteRoot)
|
prepare(t, remoteRoot)
|
||||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
gotEntries, err := f.List(test.fsList)
|
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
|
|
||||||
sort.Sort(gotEntries)
|
sort.Sort(gotEntries)
|
||||||
|
@ -12,6 +12,7 @@ we ignore assets completely!
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -246,6 +247,7 @@ func filterRequest(req *http.Request) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -307,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -315,12 +317,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -352,7 +354,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -361,7 +363,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
|||||||
// Set info but not meta
|
// Set info but not meta
|
||||||
o.info = info
|
o.info = info
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and meta, returning an error
|
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -371,12 +373,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -403,7 +405,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -501,12 +503,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -524,7 +526,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
case fileKind:
|
case fileKind:
|
||||||
o, err := f.newObjectWithInfo(remote, node)
|
o, err := f.newObjectWithInfo(ctx, remote, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@ -568,7 +570,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// At the end of large uploads. The speculation is that the timeout
|
// At the end of large uploads. The speculation is that the timeout
|
||||||
// is waiting for the sha1 hashing to complete and the file may well
|
// is waiting for the sha1 hashing to complete and the file may well
|
||||||
// be properly uploaded.
|
// be properly uploaded.
|
||||||
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||||
// Return if no error - all is well
|
// Return if no error - all is well
|
||||||
if inErr == nil {
|
if inErr == nil {
|
||||||
return false, inInfo, inErr
|
return false, inInfo, inErr
|
||||||
@ -608,7 +610,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
|||||||
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
for i := 1; i <= retries; i++ {
|
for i := 1; i <= retries; i++ {
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -634,7 +636,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
@ -643,17 +645,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
// Check if object already exists
|
// Check if object already exists
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// If not create it
|
// If not create it
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -669,7 +671,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
info, resp, err = folder.Put(in, leaf)
|
info, resp, err = folder.Put(in, leaf)
|
||||||
f.tokenRenewer.Stop()
|
f.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
|
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -683,13 +685,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -703,7 +705,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -712,15 +714,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the destination directory if necessary
|
// create the destination directory if necessary
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
|
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -736,12 +738,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
srcErr, dstErr error
|
srcErr, dstErr error
|
||||||
)
|
)
|
||||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||||
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
|
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on source
|
// exit if error on source
|
||||||
return nil, srcErr
|
return nil, srcErr
|
||||||
}
|
}
|
||||||
dstObj, dstErr = f.NewObject(remote)
|
dstObj, dstErr = f.NewObject(ctx, remote)
|
||||||
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
||||||
// exit if error on dst
|
// exit if error on dst
|
||||||
return nil, dstErr
|
return nil, dstErr
|
||||||
@ -770,7 +772,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "DirMove error: not same remote type")
|
fs.Debugf(src, "DirMove error: not same remote type")
|
||||||
@ -786,14 +788,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err = srcFs.dirCache.FindRoot(false)
|
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -808,14 +810,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
|
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -831,7 +833,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
if srcRemote == "" {
|
if srcRemote == "" {
|
||||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||||
} else {
|
} else {
|
||||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
|
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -839,7 +841,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
srcLeaf, _ := dircache.SplitPath(srcPath)
|
srcLeaf, _ := dircache.SplitPath(srcPath)
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -872,17 +874,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
|
|
||||||
// purgeCheck remotes the root directory, if check is set then it
|
// purgeCheck remotes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -931,8 +933,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@ -954,7 +956,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// srcObj, ok := src.(*Object)
|
// srcObj, ok := src.(*Object)
|
||||||
// if !ok {
|
// if !ok {
|
||||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@ -965,7 +967,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// if err != nil {
|
// if err != nil {
|
||||||
// return nil, err
|
// return nil, err
|
||||||
// }
|
// }
|
||||||
// return f.NewObject(remote), nil
|
// return f.NewObject(ctx, remote), nil
|
||||||
//}
|
//}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
@ -973,8 +975,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@ -998,7 +1000,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -1021,11 +1023,11 @@ func (o *Object) Size() int64 {
|
|||||||
// it also sets the info
|
// it also sets the info
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
@ -1054,8 +1056,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@ -1069,7 +1071,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// FIXME not implemented
|
// FIXME not implemented
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
@ -1080,7 +1082,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||||
if bigObject {
|
if bigObject {
|
||||||
fs.Debugf(o, "Downloading large object via tempLink")
|
fs.Debugf(o, "Downloading large object via tempLink")
|
||||||
@ -1102,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
file := acd.File{Node: o.info}
|
file := acd.File{Node: o.info}
|
||||||
var info *acd.File
|
var info *acd.File
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -1113,7 +1115,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
info, resp, err = file.Overwrite(in)
|
info, resp, err = file.Overwrite(in)
|
||||||
o.fs.tokenRenewer.Stop()
|
o.fs.tokenRenewer.Stop()
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
|
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||||
if ok {
|
if ok {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -1138,7 +1140,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.removeNode(o.info)
|
return o.fs.removeNode(o.info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1260,7 +1262,7 @@ OnConflict:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
||||||
return *o.info.ContentProperties.ContentType
|
return *o.info.ContentProperties.ContentType
|
||||||
}
|
}
|
||||||
@ -1273,7 +1275,7 @@ func (o *Object) MimeType() string {
|
|||||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
checkpoint := f.opt.Checkpoint
|
checkpoint := f.opt.Checkpoint
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -309,6 +309,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -415,7 +416,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
f.root += "/"
|
f.root += "/"
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
|
||||||
// File doesn't exist or is a directory so return old f
|
// File doesn't exist or is a directory so return old f
|
||||||
@ -454,7 +455,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItem) (fs.Object,
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -496,7 +497,7 @@ type listFn func(remote string, object *azblob.BlobItem, isDirectory bool) error
|
|||||||
// the container and root supplied
|
// the container and root supplied
|
||||||
//
|
//
|
||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
func (f *Fs) list(ctx context.Context, dir string, recurse bool, maxResults uint, fn listFn) error {
|
||||||
f.containerOKMu.Lock()
|
f.containerOKMu.Lock()
|
||||||
deleted := f.containerDeleted
|
deleted := f.containerDeleted
|
||||||
f.containerOKMu.Unlock()
|
f.containerOKMu.Unlock()
|
||||||
@ -523,7 +524,6 @@ func (f *Fs) list(dir string, recurse bool, maxResults uint, fn listFn) error {
|
|||||||
Prefix: root,
|
Prefix: root,
|
||||||
MaxResults: int32(maxResults),
|
MaxResults: int32(maxResults),
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
|
||||||
directoryMarkers := map[string]struct{}{}
|
directoryMarkers := map[string]struct{}{}
|
||||||
for marker := (azblob.Marker{}); marker.NotDone(); {
|
for marker := (azblob.Marker{}); marker.NotDone(); {
|
||||||
var response *azblob.ListBlobsHierarchySegmentResponse
|
var response *azblob.ListBlobsHierarchySegmentResponse
|
||||||
@ -621,8 +621,8 @@ func (f *Fs) markContainerOK() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.list(dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, dir, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -665,11 +665,11 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.container == "" {
|
if f.container == "" {
|
||||||
return f.listContainers(dir)
|
return f.listContainers(dir)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -688,12 +688,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.container == "" {
|
if f.container == "" {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, dir, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -745,13 +745,13 @@ func (f *Fs) listContainersToFn(fn listContainerFn) error {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
fs := &Object{
|
fs := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return fs, fs.Update(in, src, options...)
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the container exists
|
// Check if the container exists
|
||||||
@ -784,7 +784,7 @@ func (f *Fs) dirExists() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.containerOKMu.Lock()
|
f.containerOKMu.Lock()
|
||||||
defer f.containerOKMu.Unlock()
|
defer f.containerOKMu.Unlock()
|
||||||
if f.containerOK {
|
if f.containerOK {
|
||||||
@ -831,9 +831,9 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isEmpty checks to see if a given directory is empty and returns an error if not
|
// isEmpty checks to see if a given directory is empty and returns an error if not
|
||||||
func (f *Fs) isEmpty(dir string) (err error) {
|
func (f *Fs) isEmpty(ctx context.Context, dir string) (err error) {
|
||||||
empty := true
|
empty := true
|
||||||
err = f.list(dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
err = f.list(ctx, dir, true, 1, func(remote string, object *azblob.BlobItem, isDirectory bool) error {
|
||||||
empty = false
|
empty = false
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
@ -880,8 +880,8 @@ func (f *Fs) deleteContainer() error {
|
|||||||
// Rmdir deletes the container if the fs is at the root
|
// Rmdir deletes the container if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
err := f.isEmpty(dir)
|
err := f.isEmpty(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -902,7 +902,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
dir := "" // forward compat!
|
dir := "" // forward compat!
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
// Delegate to caller if not root container
|
// Delegate to caller if not root container
|
||||||
@ -920,8 +920,8 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -939,7 +939,6 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
options := azblob.BlobAccessConditions{}
|
options := azblob.BlobAccessConditions{}
|
||||||
ctx := context.Background()
|
|
||||||
var startCopy *azblob.BlobStartCopyFromURLResponse
|
var startCopy *azblob.BlobStartCopyFromURLResponse
|
||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -960,7 +959,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
copyStatus = getMetadata.CopyStatus()
|
copyStatus = getMetadata.CopyStatus()
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@ -984,7 +983,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -1124,14 +1123,14 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() (result time.Time) {
|
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||||
// The error is logged in readMetaData
|
// The error is logged in readMetaData
|
||||||
_ = o.readMetaData()
|
_ = o.readMetaData()
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Make sure o.meta is not nil
|
// Make sure o.meta is not nil
|
||||||
if o.meta == nil {
|
if o.meta == nil {
|
||||||
o.meta = make(map[string]string, 1)
|
o.meta = make(map[string]string, 1)
|
||||||
@ -1140,7 +1139,6 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
o.meta[modTimeKey] = modTime.Format(timeFormatOut)
|
||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ctx := context.Background()
|
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
_, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{})
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
@ -1158,7 +1156,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
// Offset and Count for range download
|
// Offset and Count for range download
|
||||||
var offset int64
|
var offset int64
|
||||||
var count int64
|
var count int64
|
||||||
@ -1182,7 +1180,6 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
ctx := context.Background()
|
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
var dowloadResponse *azblob.DownloadResponse
|
var dowloadResponse *azblob.DownloadResponse
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@ -1371,26 +1368,26 @@ outer:
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
err = o.fs.Mkdir("")
|
err = o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
// Update Mod time
|
// Update Mod time
|
||||||
o.updateMetadataWithModTime(src.ModTime())
|
o.updateMetadataWithModTime(src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
httpHeaders := azblob.BlobHTTPHeaders{}
|
httpHeaders := azblob.BlobHTTPHeaders{}
|
||||||
httpHeaders.ContentType = fs.MimeType(o)
|
httpHeaders.ContentType = fs.MimeType(ctx, o)
|
||||||
// Compute the Content-MD5 of the file, for multiparts uploads it
|
// Compute the Content-MD5 of the file, for multiparts uploads it
|
||||||
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
// will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header
|
||||||
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
// Note: If multipart, a MD5 checksum will also be computed for each uploaded block
|
||||||
// in order to validate its integrity during transport
|
// in order to validate its integrity during transport
|
||||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" {
|
||||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
httpHeaders.ContentMD5 = sourceMD5bytes
|
httpHeaders.ContentMD5 = sourceMD5bytes
|
||||||
@ -1415,7 +1412,6 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
fs.Debugf(o, "Setting multipart upload for file of chunk size (%d) to work around SDK bug", size)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
if multipartUpload {
|
if multipartUpload {
|
||||||
@ -1448,11 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
blob := o.getBlobReference()
|
blob := o.getBlobReference()
|
||||||
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
snapShotOptions := azblob.DeleteSnapshotsOptionNone
|
||||||
ac := azblob.BlobAccessConditions{}
|
ac := azblob.BlobAccessConditions{}
|
||||||
ctx := context.Background()
|
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
_, err := blob.Delete(ctx, snapShotOptions, ac)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
@ -1460,7 +1455,7 @@ func (o *Object) Remove() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
108
backend/b2/b2.go
108
backend/b2/b2.go
@ -7,6 +7,7 @@ package b2
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"fmt"
|
"fmt"
|
||||||
gohash "hash"
|
gohash "hash"
|
||||||
@ -324,6 +325,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -398,7 +400,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
f.root += "/"
|
f.root += "/"
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -516,7 +518,7 @@ func (f *Fs) putUploadBlock(buf []byte) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -527,7 +529,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and headers, returning an error
|
err := o.readMetaData(ctx) // reads info and headers, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -537,8 +539,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.File) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// listFn is called from list to handle an object
|
// listFn is called from list to handle an object
|
||||||
@ -562,7 +564,7 @@ var errEndList = errors.New("end list")
|
|||||||
// than 1000)
|
// than 1000)
|
||||||
//
|
//
|
||||||
// If hidden is set then it will list the hidden (deleted) files too.
|
// If hidden is set then it will list the hidden (deleted) files too.
|
||||||
func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
||||||
root := f.root
|
root := f.root
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += dir + "/"
|
root += dir + "/"
|
||||||
@ -643,7 +645,7 @@ func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert a list item into a DirEntry
|
// Convert a list item into a DirEntry
|
||||||
func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
|
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
d := fs.NewDir(remote, time.Time{})
|
d := fs.NewDir(remote, time.Time{})
|
||||||
return d, nil
|
return d, nil
|
||||||
@ -657,7 +659,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, l
|
|||||||
if object.Action == "hide" {
|
if object.Action == "hide" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
o, err := f.newObjectWithInfo(remote, object)
|
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -674,10 +676,10 @@ func (f *Fs) markBucketOK() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
last := ""
|
last := ""
|
||||||
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -719,11 +721,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return f.listBuckets(dir)
|
return f.listBuckets(dir)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -742,14 +744,14 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
last := ""
|
last := ""
|
||||||
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -834,22 +836,22 @@ func (f *Fs) clearBucketID() {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
fs := &Object{
|
fs := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return fs, fs.Update(in, src, options...)
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.bucketOK {
|
if f.bucketOK {
|
||||||
@ -895,7 +897,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
@ -990,7 +992,7 @@ func (f *Fs) deleteByID(ID, Name string) error {
|
|||||||
// if oldOnly is true then it deletes only non current files.
|
// if oldOnly is true then it deletes only non current files.
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete old versions.
|
// Implemented here so we can make sure we delete old versions.
|
||||||
func (f *Fs) purge(oldOnly bool) error {
|
func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
|
||||||
var errReturn error
|
var errReturn error
|
||||||
var checkErrMutex sync.Mutex
|
var checkErrMutex sync.Mutex
|
||||||
var checkErr = func(err error) {
|
var checkErr = func(err error) {
|
||||||
@ -1025,7 +1027,7 @@ func (f *Fs) purge(oldOnly bool) error {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
last := ""
|
last := ""
|
||||||
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if !isDirectory {
|
if !isDirectory {
|
||||||
accounting.Stats.Checking(remote)
|
accounting.Stats.Checking(remote)
|
||||||
if oldOnly && last != remote {
|
if oldOnly && last != remote {
|
||||||
@ -1051,19 +1053,19 @@ func (f *Fs) purge(oldOnly bool) error {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
if !oldOnly {
|
if !oldOnly {
|
||||||
checkErr(f.Rmdir(""))
|
checkErr(f.Rmdir(ctx, ""))
|
||||||
}
|
}
|
||||||
return errReturn
|
return errReturn
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and directories including the old versions.
|
// Purge deletes all the files and directories including the old versions.
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purge(false)
|
return f.purge(ctx, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all the hidden files.
|
// CleanUp deletes all the hidden files.
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
return f.purge(true)
|
return f.purge(ctx, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@ -1075,8 +1077,8 @@ func (f *Fs) CleanUp() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1144,13 +1146,13 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Sha-1 of an object returning a lowercase hex string
|
// Hash returns the Sha-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.SHA1 {
|
if t != hash.SHA1 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
if o.sha1 == "" {
|
if o.sha1 == "" {
|
||||||
// Error is logged in readMetaData
|
// Error is logged in readMetaData
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1207,7 +1209,10 @@ func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getMetaData gets the metadata from the object unconditionally
|
// getMetaData gets the metadata from the object unconditionally
|
||||||
func (o *Object) getMetaData() (info *api.File, err error) {
|
func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
|
||||||
|
if o.id != "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
maxSearched := 1
|
maxSearched := 1
|
||||||
var timestamp api.Timestamp
|
var timestamp api.Timestamp
|
||||||
baseRemote := o.remote
|
baseRemote := o.remote
|
||||||
@ -1215,7 +1220,8 @@ func (o *Object) getMetaData() (info *api.File, err error) {
|
|||||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||||
maxSearched = maxVersions
|
maxSearched = maxVersions
|
||||||
}
|
}
|
||||||
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
|
||||||
|
err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1246,11 +1252,11 @@ func (o *Object) getMetaData() (info *api.File, err error) {
|
|||||||
// o.modTime
|
// o.modTime
|
||||||
// o.size
|
// o.size
|
||||||
// o.sha1
|
// o.sha1
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.id != "" {
|
if o.id != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.getMetaData()
|
info, err := o.getMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1285,15 +1291,15 @@ func (o *Object) parseTimeString(timeString string) (err error) {
|
|||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
//
|
//
|
||||||
// SHA-1 will also be updated once the request has completed.
|
// SHA-1 will also be updated once the request has completed.
|
||||||
func (o *Object) ModTime() (result time.Time) {
|
func (o *Object) ModTime(ctx context.Context) (result time.Time) {
|
||||||
// The error is logged in readMetaData
|
// The error is logged in readMetaData
|
||||||
_ = o.readMetaData()
|
_ = o.readMetaData(ctx)
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the Object
|
// SetModTime sets the modification time of the Object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
info, err := o.getMetaData()
|
info, err := o.getMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1386,7 +1392,7 @@ func (file *openFile) Close() (err error) {
|
|||||||
var _ io.ReadCloser = &openFile{}
|
var _ io.ReadCloser = &openFile{}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Options: options,
|
Options: options,
|
||||||
@ -1477,11 +1483,11 @@ func urlEncode(in string) string {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
err = o.fs.Mkdir("")
|
err = o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1499,7 +1505,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||||
up, err := o.fs.newLargeUpload(o, in, src)
|
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
o.fs.putUploadBlock(buf)
|
o.fs.putUploadBlock(buf)
|
||||||
return err
|
return err
|
||||||
@ -1514,16 +1520,16 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if size > int64(o.fs.opt.UploadCutoff) {
|
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||||
up, err := o.fs.newLargeUpload(o, in, src)
|
up, err := o.fs.newLargeUpload(ctx, o, in, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return up.Upload()
|
return up.Upload()
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
calculatedSha1, _ := src.Hash(hash.SHA1)
|
calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
|
||||||
if calculatedSha1 == "" {
|
if calculatedSha1 == "" {
|
||||||
calculatedSha1 = "hex_digits_at_end"
|
calculatedSha1 = "hex_digits_at_end"
|
||||||
har := newHashAppendingReader(in, sha1.New())
|
har := newHashAppendingReader(in, sha1.New())
|
||||||
@ -1601,7 +1607,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
ExtraHeaders: map[string]string{
|
ExtraHeaders: map[string]string{
|
||||||
"Authorization": upload.AuthorizationToken,
|
"Authorization": upload.AuthorizationToken,
|
||||||
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
|
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
|
||||||
"Content-Type": fs.MimeType(src),
|
"Content-Type": fs.MimeType(ctx, src),
|
||||||
sha1Header: calculatedSha1,
|
sha1Header: calculatedSha1,
|
||||||
timeHeader: timeString(modTime),
|
timeHeader: timeString(modTime),
|
||||||
},
|
},
|
||||||
@ -1626,7 +1632,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
if o.fs.opt.Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
@ -1637,7 +1643,7 @@ func (o *Object) Remove() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ package b2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -80,7 +81,7 @@ type largeUpload struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||||
remote := o.remote
|
remote := o.remote
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
parts := int64(0)
|
parts := int64(0)
|
||||||
@ -98,7 +99,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
|||||||
sha1SliceSize = parts
|
sha1SliceSize = parts
|
||||||
}
|
}
|
||||||
|
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/b2_start_large_file",
|
Path: "/b2_start_large_file",
|
||||||
@ -110,14 +111,14 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
|||||||
var request = api.StartLargeFileRequest{
|
var request = api.StartLargeFileRequest{
|
||||||
BucketID: bucketID,
|
BucketID: bucketID,
|
||||||
Name: o.fs.root + remote,
|
Name: o.fs.root + remote,
|
||||||
ContentType: fs.MimeType(src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
Info: map[string]string{
|
Info: map[string]string{
|
||||||
timeKey: timeString(modTime),
|
timeKey: timeString(modTime),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// Set the SHA1 if known
|
// Set the SHA1 if known
|
||||||
if !o.fs.opt.DisableCheckSum {
|
if !o.fs.opt.DisableCheckSum {
|
||||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||||
request.Info[sha1Key] = calculatedSha1
|
request.Info[sha1Key] = calculatedSha1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ package box
|
|||||||
// FIXME box can copy a directory
|
// FIXME box can copy a directory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@ -193,9 +194,9 @@ func restoreReservedChars(x string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@ -238,6 +239,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -271,7 +273,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath("")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -279,7 +281,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -287,12 +289,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -323,7 +325,7 @@ func (f *Fs) rootSlash() string {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -333,7 +335,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData() // reads info and meta, returning an error
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -343,12 +345,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if item.Name == leaf {
|
||||||
@ -368,7 +370,7 @@ func fieldsValue() url.Values {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
@ -467,12 +469,12 @@ OUTER:
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -486,7 +488,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else if info.Type == api.ItemTypeFile {
|
} else if info.Type == api.ItemTypeFile {
|
||||||
o, err := f.newObjectWithInfo(remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@ -510,9 +512,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -529,22 +531,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return existingObj, existingObj.Update(in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src)
|
return f.PutUnchecked(ctx, in, src)
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked the object into the container
|
// PutUnchecked the object into the container
|
||||||
@ -554,26 +556,26 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -593,17 +595,17 @@ func (f *Fs) deleteObject(id string) error {
|
|||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -633,8 +635,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@ -651,13 +653,13 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -669,7 +671,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -708,8 +710,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// move a file or folder
|
// move a file or folder
|
||||||
@ -746,7 +748,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -754,7 +756,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -780,7 +782,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -796,14 +798,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(false)
|
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -819,14 +821,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -837,7 +839,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -852,8 +854,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
id, err := f.dirCache.FindDir(remote, false)
|
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||||
@ -865,7 +867,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -928,7 +930,7 @@ func (o *Object) srvPath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.SHA1 {
|
if t != hash.SHA1 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -937,7 +939,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@ -962,11 +964,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
||||||
@ -983,8 +985,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@ -993,7 +995,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setModTime sets the modification time of the local fs object
|
// setModTime sets the modification time of the local fs object
|
||||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/files/" + o.id,
|
Path: "/files/" + o.id,
|
||||||
@ -1011,8 +1013,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
info, err := o.setModTime(modTime)
|
info, err := o.setModTime(ctx, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1025,7 +1027,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
@ -1093,16 +1095,16 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1117,7 +1119,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.deleteObject(o.id)
|
return o.fs.deleteObject(o.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
140
backend/cache/cache.go
vendored
140
backend/cache/cache.go
vendored
@ -509,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
pollInterval := make(chan time.Duration, 1)
|
pollInterval := make(chan time.Duration, 1)
|
||||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@ -600,7 +600,7 @@ is used on top of the cache.
|
|||||||
return f, fsErr
|
return f, fsErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
m, err := f.Stats()
|
m, err := f.Stats()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -627,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
|
|||||||
return remote
|
return remote
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
remoteInt, ok := in["remote"]
|
remoteInt, ok := in["remote"]
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -672,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
type chunkRange struct {
|
type chunkRange struct {
|
||||||
start, end int64
|
start, end int64
|
||||||
}
|
}
|
||||||
@ -777,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
|||||||
for _, pair := range files {
|
for _, pair := range files {
|
||||||
file, remote := pair[0], pair[1]
|
file, remote := pair[0], pair[1]
|
||||||
var status fileStatus
|
var status fileStatus
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := o.(*Object)
|
co := o.(*Object)
|
||||||
err = co.refreshFromSource(true)
|
err = co.refreshFromSource(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
handle := NewObjectHandle(co, f)
|
handle := NewObjectHandle(ctx, co, f)
|
||||||
handle.UseMemory = false
|
handle.UseMemory = false
|
||||||
handle.scaleWorkers(1)
|
handle.scaleWorkers(1)
|
||||||
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
||||||
@ -874,7 +874,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
|||||||
// ChangeNotify can subscribe multiple callers
|
// ChangeNotify can subscribe multiple callers
|
||||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||||
f.parentsForgetMu.Lock()
|
f.parentsForgetMu.Lock()
|
||||||
defer f.parentsForgetMu.Unlock()
|
defer f.parentsForgetMu.Unlock()
|
||||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||||
@ -921,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
fs.Debugf(f, "new object '%s'", remote)
|
fs.Debugf(f, "new object '%s'", remote)
|
||||||
@ -940,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
// search for entry in source or temp fs
|
// search for entry in source or temp fs
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
obj, err = f.tempFs.NewObject(remote)
|
obj, err = f.tempFs.NewObject(ctx, remote)
|
||||||
// not found in temp fs
|
// not found in temp fs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "find: not found in local cache fs")
|
fs.Debugf(remote, "find: not found in local cache fs")
|
||||||
obj, err = f.Fs.NewObject(remote)
|
obj, err = f.Fs.NewObject(ctx, remote)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(obj, "find: found in local cache fs")
|
fs.Debugf(obj, "find: found in local cache fs")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
obj, err = f.Fs.NewObject(remote)
|
obj, err = f.Fs.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// not found in either fs
|
// not found in either fs
|
||||||
@ -959,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cache the new entry
|
// cache the new entry
|
||||||
co = ObjectFromOriginal(f, obj).persist()
|
co = ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(co, "find: cached object")
|
fs.Debugf(co, "find: cached object")
|
||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries
|
// List the objects and directories in dir into entries
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
fs.Debugf(f, "list '%s'", dir)
|
fs.Debugf(f, "list '%s'", dir)
|
||||||
cd := ShallowDirectory(f, dir)
|
cd := ShallowDirectory(f, dir)
|
||||||
|
|
||||||
@ -995,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
||||||
|
|
||||||
for _, queuedRemote := range queuedEntries {
|
for _, queuedRemote := range queuedEntries {
|
||||||
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
|
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := ObjectFromOriginal(f, queuedEntry).persist()
|
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
|
||||||
fs.Debugf(co, "list: cached temp object")
|
fs.Debugf(co, "list: cached temp object")
|
||||||
cachedEntries = append(cachedEntries, co)
|
cachedEntries = append(cachedEntries, co)
|
||||||
}
|
}
|
||||||
@ -1008,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// search from the source
|
// search from the source
|
||||||
sourceEntries, err := f.Fs.List(dir)
|
sourceEntries, err := f.Fs.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1046,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
co := ObjectFromOriginal(f, o).persist()
|
co := ObjectFromOriginal(ctx, f, o).persist()
|
||||||
cachedEntries = append(cachedEntries, co)
|
cachedEntries = append(cachedEntries, co)
|
||||||
fs.Debugf(dir, "list: cached object: %v", co)
|
fs.Debugf(dir, "list: cached object: %v", co)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
cdd := DirectoryFromOriginal(f, o)
|
cdd := DirectoryFromOriginal(ctx, f, o)
|
||||||
// check if the dir isn't expired and add it in cache if it isn't
|
// check if the dir isn't expired and add it in cache if it isn't
|
||||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||||
batchDirectories = append(batchDirectories, cdd)
|
batchDirectories = append(batchDirectories, cdd)
|
||||||
@ -1080,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return cachedEntries, nil
|
return cachedEntries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||||
entries, err := f.List(dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1089,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
|||||||
for i := 0; i < len(entries); i++ {
|
for i := 0; i < len(entries); i++ {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1106,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
|||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
// from dir recursively into out.
|
// from dir recursively into out.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
fs.Debugf(f, "list recursively from '%s'", dir)
|
fs.Debugf(f, "list recursively from '%s'", dir)
|
||||||
|
|
||||||
// we check if the source FS supports ListR
|
// we check if the source FS supports ListR
|
||||||
// if it does, we'll use that to get all the entries, cache them and return
|
// if it does, we'll use that to get all the entries, cache them and return
|
||||||
do := f.Fs.Features().ListR
|
do := f.Fs.Features().ListR
|
||||||
if do != nil {
|
if do != nil {
|
||||||
return do(dir, func(entries fs.DirEntries) error {
|
return do(ctx, dir, func(entries fs.DirEntries) error {
|
||||||
// we got called back with a set of entries so let's cache them and call the original callback
|
// we got called back with a set of entries so let's cache them and call the original callback
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch o := entry.(type) {
|
switch o := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
|
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
|
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||||
default:
|
default:
|
||||||
return errors.Errorf("Unknown object type %T", entry)
|
return errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@ -1133,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
|
|
||||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.recurse(dir, list)
|
err = f.recurse(ctx, dir, list)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1142,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the directory (container, bucket)
|
// Mkdir makes the directory (container, bucket)
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
fs.Debugf(f, "mkdir '%s'", dir)
|
fs.Debugf(f, "mkdir '%s'", dir)
|
||||||
err := f.Fs.Mkdir(dir)
|
err := f.Fs.Mkdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1172,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
fs.Debugf(f, "rmdir '%s'", dir)
|
fs.Debugf(f, "rmdir '%s'", dir)
|
||||||
|
|
||||||
if f.opt.TempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
@ -1182,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
|
|
||||||
// we check if the source exists on the remote and make the same move on it too if it does
|
// we check if the source exists on the remote and make the same move on it too if it does
|
||||||
// otherwise, we skip this step
|
// otherwise, we skip this step
|
||||||
_, err := f.UnWrap().List(dir)
|
_, err := f.UnWrap().List(ctx, dir)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err := f.Fs.Rmdir(dir)
|
err := f.Fs.Rmdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1192,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []*Object
|
var queuedEntries []*Object
|
||||||
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
co := ObjectFromOriginal(f, oo)
|
co := ObjectFromOriginal(ctx, f, oo)
|
||||||
queuedEntries = append(queuedEntries, co)
|
queuedEntries = append(queuedEntries, co)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1212,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := f.Fs.Rmdir(dir)
|
err := f.Fs.Rmdir(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1243,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
// using server side move operations.
|
// using server side move operations.
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||||
|
|
||||||
do := f.Fs.Features().DirMove
|
do := f.Fs.Features().DirMove
|
||||||
@ -1265,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
f.backgroundRunner.pause()
|
f.backgroundRunner.pause()
|
||||||
defer f.backgroundRunner.play()
|
defer f.backgroundRunner.play()
|
||||||
|
|
||||||
_, errInWrap := srcFs.UnWrap().List(srcRemote)
|
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
|
||||||
_, errInTemp := f.tempFs.List(srcRemote)
|
_, errInTemp := f.tempFs.List(ctx, srcRemote)
|
||||||
// not found in either fs
|
// not found in either fs
|
||||||
if errInWrap != nil && errInTemp != nil {
|
if errInWrap != nil && errInTemp != nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@ -1275,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// we check if the source exists on the remote and make the same move on it too if it does
|
// we check if the source exists on the remote and make the same move on it too if it does
|
||||||
// otherwise, we skip this step
|
// otherwise, we skip this step
|
||||||
if errInWrap == nil {
|
if errInWrap == nil {
|
||||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1288,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []*Object
|
var queuedEntries []*Object
|
||||||
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
co := ObjectFromOriginal(f, oo)
|
co := ObjectFromOriginal(ctx, f, oo)
|
||||||
queuedEntries = append(queuedEntries, co)
|
queuedEntries = append(queuedEntries, co)
|
||||||
if co.tempFileStartedUpload() {
|
if co.tempFileStartedUpload() {
|
||||||
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
||||||
@ -1312,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
err = do(f.tempFs, srcRemote, dstRemote)
|
err = do(ctx, f.tempFs, srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = f.cache.ReconcileTempUploads(f)
|
err = f.cache.ReconcileTempUploads(ctx, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1427,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
// put in to the remote path
|
// put in to the remote path
|
||||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
var err error
|
var err error
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
|
|
||||||
@ -1441,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
_ = f.cache.ExpireDir(parentCd)
|
_ = f.cache.ExpireDir(parentCd)
|
||||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||||
|
|
||||||
obj, err = f.tempFs.Put(in, src, options...)
|
obj, err = f.tempFs.Put(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1456,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
// if cache writes is enabled write it first through cache
|
// if cache writes is enabled write it first through cache
|
||||||
} else if f.opt.StoreWrites {
|
} else if f.opt.StoreWrites {
|
||||||
f.cacheReader(in, src, func(inn io.Reader) {
|
f.cacheReader(in, src, func(inn io.Reader) {
|
||||||
obj, err = put(inn, src, options...)
|
obj, err = put(ctx, inn, src, options...)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
||||||
}
|
}
|
||||||
// last option: save it directly in remote fs
|
// last option: save it directly in remote fs
|
||||||
} else {
|
} else {
|
||||||
obj, err = put(in, src, options...)
|
obj, err = put(ctx, in, src, options...)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(obj, "put: uploaded to remote fs")
|
fs.Debugf(obj, "put: uploaded to remote fs")
|
||||||
}
|
}
|
||||||
@ -1475,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cache the new file
|
// cache the new file
|
||||||
cachedObj := ObjectFromOriginal(f, obj)
|
cachedObj := ObjectFromOriginal(ctx, f, obj)
|
||||||
|
|
||||||
// deleting cached chunks and info to be replaced with new ones
|
// deleting cached chunks and info to be replaced with new ones
|
||||||
_ = f.cache.RemoveObject(cachedObj.abs())
|
_ = f.cache.RemoveObject(cachedObj.abs())
|
||||||
@ -1498,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
fs.Debugf(f, "put data at '%s'", src.Remote())
|
fs.Debugf(f, "put data at '%s'", src.Remote())
|
||||||
return f.put(in, src, options, f.Fs.Put)
|
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutUnchecked
|
do := f.Fs.Features().PutUnchecked
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutUnchecked")
|
return nil, errors.New("can't PutUnchecked")
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
||||||
return f.put(in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads the object
|
// PutStream uploads the object
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutStream
|
do := f.Fs.Features().PutStream
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutStream")
|
return nil, errors.New("can't PutStream")
|
||||||
}
|
}
|
||||||
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
||||||
return f.put(in, src, options, do)
|
return f.put(ctx, in, src, options, do)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||||
|
|
||||||
do := f.Fs.Features().Copy
|
do := f.Fs.Features().Copy
|
||||||
@ -1544,7 +1544,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
// refresh from source or abort
|
// refresh from source or abort
|
||||||
if err := srcObj.refreshFromSource(false); err != nil {
|
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||||
fs.Errorf(f, "can't copy %v - %v", src, err)
|
fs.Errorf(f, "can't copy %v - %v", src, err)
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
@ -1563,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := do(srcObj.Object, remote)
|
obj, err := do(ctx, srcObj.Object, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1571,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(obj, "copy: file copied")
|
fs.Debugf(obj, "copy: file copied")
|
||||||
|
|
||||||
// persist new
|
// persist new
|
||||||
co := ObjectFromOriginal(f, obj).persist()
|
co := ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(co, "copy: added to cache")
|
fs.Debugf(co, "copy: added to cache")
|
||||||
// expire the destination path
|
// expire the destination path
|
||||||
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
||||||
@ -1598,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||||
|
|
||||||
// if source fs doesn't support move abort
|
// if source fs doesn't support move abort
|
||||||
@ -1619,7 +1619,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
// refresh from source or abort
|
// refresh from source or abort
|
||||||
if err := srcObj.refreshFromSource(false); err != nil {
|
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||||
fs.Errorf(f, "can't move %v - %v", src, err)
|
fs.Errorf(f, "can't move %v - %v", src, err)
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
@ -1655,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := do(srcObj.Object, remote)
|
obj, err := do(ctx, srcObj.Object, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(srcObj, "error moving: %v", err)
|
fs.Errorf(srcObj, "error moving: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1680,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// advertise to ChangeNotify if wrapped doesn't do that
|
// advertise to ChangeNotify if wrapped doesn't do that
|
||||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||||
// persist new
|
// persist new
|
||||||
cachedObj := ObjectFromOriginal(f, obj).persist()
|
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
|
||||||
fs.Debugf(cachedObj, "move: added to cache")
|
fs.Debugf(cachedObj, "move: added to cache")
|
||||||
// expire new parent
|
// expire new parent
|
||||||
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
||||||
@ -1702,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
fs.Infof(f, "purging cache")
|
fs.Infof(f, "purging cache")
|
||||||
f.cache.Purge()
|
f.cache.Purge()
|
||||||
|
|
||||||
@ -1711,7 +1711,7 @@ func (f *Fs) Purge() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err := do()
|
err := do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1720,7 +1720,7 @@ func (f *Fs) Purge() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp the trash in the Fs
|
// CleanUp the trash in the Fs
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
f.CleanUpCache(false)
|
f.CleanUpCache(false)
|
||||||
|
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
@ -1728,16 +1728,16 @@ func (f *Fs) CleanUp() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("About not supported")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
|
119
backend/cache/cache_internal_test.go
vendored
119
backend/cache/cache_internal_test.go
vendored
@ -4,6 +4,7 @@ package cache_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -120,7 +121,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
listInner, err := rootFs2.List("")
|
listInner, err := rootFs2.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Len(t, listRoot, 1)
|
require.Len(t, listRoot, 1)
|
||||||
@ -138,10 +139,10 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
||||||
_, err = rootFs.List("test")
|
_, err = rootFs.List(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
testReader := runInstance.randomReader(t, testSize)
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
@ -266,7 +267,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
|||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
obj, err := rootFs.NewObject("404")
|
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Nil(t, obj)
|
require.Nil(t, obj)
|
||||||
}
|
}
|
||||||
@ -445,7 +446,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("original size: %v", originalSize)
|
log.Printf("original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
expectedSize := int64(len([]byte("test content")))
|
expectedSize := int64(len([]byte("test content")))
|
||||||
var data2 []byte
|
var data2 []byte
|
||||||
@ -457,7 +458,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
data2 = []byte("test content")
|
data2 = []byte("test content")
|
||||||
}
|
}
|
||||||
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
||||||
err = o.Update(bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
log.Printf("updated size: %v", len(data2))
|
log.Printf("updated size: %v", len(data2))
|
||||||
@ -503,9 +504,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
// list in mount
|
// list in mount
|
||||||
@ -515,7 +516,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@ -589,9 +590,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
testData = []byte("test content")
|
testData = []byte("test content")
|
||||||
}
|
}
|
||||||
err = rootFs.Mkdir("test")
|
err = rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("test/one")
|
err = rootFs.Mkdir(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||||
|
|
||||||
@ -608,7 +609,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
require.False(t, found)
|
require.False(t, found)
|
||||||
|
|
||||||
// move file
|
// move file
|
||||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
@ -670,23 +671,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(wrappedTime)
|
err = o.SetModTime(context.Background(), wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject("data.bin")
|
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||||
|
|
||||||
cfs.DirCacheFlush() // flush the cache
|
cfs.DirCacheFlush() // flush the cache
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject("data.bin")
|
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||||
@ -713,19 +714,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
|
|
||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||||
err = o.SetModTime(wrappedTime)
|
err = o.SetModTime(context.Background(), wrappedTime)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err := rootFs.NewObject("data.bin")
|
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||||
|
|
||||||
// Call the rc function
|
// Call the rc function
|
||||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
@ -733,9 +734,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
require.Contains(t, m["message"], "cached file cleared")
|
require.Contains(t, m["message"], "cached file cleared")
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
co, err = rootFs.NewObject("data.bin")
|
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||||
_, err = runInstance.list(t, rootFs, "")
|
_, err = runInstance.list(t, rootFs, "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -749,7 +750,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
require.Len(t, li1, 1)
|
require.Len(t, li1, 1)
|
||||||
|
|
||||||
// Call the rc function
|
// Call the rc function
|
||||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Contains(t, m, "status")
|
require.Contains(t, m, "status")
|
||||||
require.Contains(t, m, "message")
|
require.Contains(t, m, "message")
|
||||||
@ -794,7 +795,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
// create some rand test data
|
// create some rand test data
|
||||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
@ -833,7 +834,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Len(t, l, 1)
|
require.Len(t, l, 1)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
l, err = runInstance.list(t, rootFs, "test")
|
l, err = runInstance.list(t, rootFs, "test")
|
||||||
@ -868,14 +869,14 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = cfs.UnWrap().Mkdir("test")
|
err = cfs.UnWrap().Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := 1; i <= 4; i++ {
|
for i := 1; i <= 4; i++ {
|
||||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
|
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for j := 1; j <= 4; j++ {
|
for j := 1; j <= 4; j++ {
|
||||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
|
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
||||||
@ -1080,10 +1081,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
if purge {
|
if purge {
|
||||||
_ = f.Features().Purge()
|
_ = f.Features().Purge(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
err = f.Mkdir("")
|
err = f.Mkdir(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount && !r.isMounted {
|
if r.useMount && !r.isMounted {
|
||||||
r.mountFs(t, f)
|
r.mountFs(t, f)
|
||||||
@ -1097,7 +1098,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
|||||||
r.unmountFs(t, f)
|
r.unmountFs(t, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.Features().Purge()
|
err := f.Features().Purge(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1199,7 +1200,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
|||||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||||
in := bytes.NewReader(data)
|
in := bytes.NewReader(data)
|
||||||
_ = r.writeObjectReader(t, f, remote, in)
|
_ = r.writeObjectReader(t, f, remote, in)
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(context.Background(), remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data)), o.Size())
|
require.Equal(t, int64(len(data)), o.Size())
|
||||||
return o
|
return o
|
||||||
@ -1208,7 +1209,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
|
|||||||
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
||||||
modTime := time.Now()
|
modTime := time.Now()
|
||||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||||
obj, err := f.Put(in, objInfo)
|
obj, err := f.Put(context.Background(), in, objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
if r.useMount {
|
if r.useMount {
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
@ -1228,18 +1229,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
|||||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
} else {
|
} else {
|
||||||
in1 := bytes.NewReader(data1)
|
in1 := bytes.NewReader(data1)
|
||||||
in2 := bytes.NewReader(data2)
|
in2 := bytes.NewReader(data2)
|
||||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||||
|
|
||||||
obj, err = f.Put(in1, objInfo1)
|
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = obj.Update(in2, objInfo2)
|
err = obj.Update(context.Background(), in2, objInfo2)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
@ -1268,7 +1269,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
co, err := f.NewObject(remote)
|
co, err := f.NewObject(context.Background(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return checkSample, err
|
return checkSample, err
|
||||||
}
|
}
|
||||||
@ -1283,7 +1284,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
|||||||
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
||||||
size := end - offset
|
size := end - offset
|
||||||
checkSample := make([]byte, size)
|
checkSample := make([]byte, size)
|
||||||
reader, err := o.Open(&fs.SeekOption{Offset: offset})
|
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
totalRead, err := io.ReadFull(reader, checkSample)
|
totalRead, err := io.ReadFull(reader, checkSample)
|
||||||
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
||||||
@ -1300,7 +1301,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
|||||||
if r.useMount {
|
if r.useMount {
|
||||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||||
} else {
|
} else {
|
||||||
err = f.Mkdir(remote)
|
err = f.Mkdir(context.Background(), remote)
|
||||||
}
|
}
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
@ -1312,11 +1313,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
err = os.Remove(path.Join(r.mntDir, remote))
|
err = os.Remove(path.Join(r.mntDir, remote))
|
||||||
} else {
|
} else {
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
obj, err = f.NewObject(remote)
|
obj, err = f.NewObject(context.Background(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = f.Rmdir(remote)
|
err = f.Rmdir(context.Background(), remote)
|
||||||
} else {
|
} else {
|
||||||
err = obj.Remove()
|
err = obj.Remove(context.Background())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1334,7 +1335,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll)
|
l = append(l, ll)
|
||||||
}
|
}
|
||||||
@ -1353,7 +1354,7 @@ func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
l = append(l, ll.Remote())
|
l = append(l, ll.Remote())
|
||||||
}
|
}
|
||||||
@ -1393,7 +1394,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().DirMove != nil {
|
} else if rootFs.Features().DirMove != nil {
|
||||||
err = rootFs.Features().DirMove(rootFs, src, dst)
|
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1415,11 +1416,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Move != nil {
|
} else if rootFs.Features().Move != nil {
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Move(obj1, dst)
|
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1441,11 +1442,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
|||||||
}
|
}
|
||||||
r.vfs.WaitForWriters(10 * time.Second)
|
r.vfs.WaitForWriters(10 * time.Second)
|
||||||
} else if rootFs.Features().Copy != nil {
|
} else if rootFs.Features().Copy != nil {
|
||||||
obj, err := rootFs.NewObject(src)
|
obj, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = rootFs.Features().Copy(obj, dst)
|
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1467,11 +1468,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
|||||||
}
|
}
|
||||||
return fi.ModTime(), nil
|
return fi.ModTime(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return time.Time{}, err
|
return time.Time{}, err
|
||||||
}
|
}
|
||||||
return obj1.ModTime(), nil
|
return obj1.ModTime(context.Background()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||||
@ -1484,7 +1485,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
|||||||
}
|
}
|
||||||
return fi.Size(), nil
|
return fi.Size(), nil
|
||||||
}
|
}
|
||||||
obj1, err := rootFs.NewObject(src)
|
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return int64(0), err
|
return int64(0), err
|
||||||
}
|
}
|
||||||
@ -1507,14 +1508,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
|||||||
_, err = f.WriteString(data + append)
|
_, err = f.WriteString(data + append)
|
||||||
} else {
|
} else {
|
||||||
var obj1 fs.Object
|
var obj1 fs.Object
|
||||||
obj1, err = rootFs.NewObject(src)
|
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
data1 := []byte(data + append)
|
data1 := []byte(data + append)
|
||||||
r := bytes.NewReader(data1)
|
r := bytes.NewReader(data1)
|
||||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||||
err = obj1.Update(r, objInfo1)
|
err = obj1.Update(context.Background(), r, objInfo1)
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
47
backend/cache/cache_upload_test.go
vendored
47
backend/cache/cache_upload_test.go
vendored
@ -3,6 +3,7 @@
|
|||||||
package cache_test
|
package cache_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
@ -85,11 +86,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("one/test")
|
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("second")
|
err = rootFs.Mkdir(context.Background(), "second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@ -122,11 +123,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
|||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("one")
|
err := rootFs.Mkdir(context.Background(), "one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("one/test")
|
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = rootFs.Mkdir("second")
|
err = rootFs.Mkdir(context.Background(), "second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// create some rand test data
|
// create some rand test data
|
||||||
@ -165,7 +166,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir(context.Background(), "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
minSize := 5242880
|
minSize := 5242880
|
||||||
maxSize := 10485760
|
maxSize := 10485760
|
||||||
@ -233,9 +234,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("second/one")
|
_, err = rootFs.NewObject(context.Background(), "second/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -256,7 +257,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "directory not empty")
|
require.Contains(t, err.Error(), "directory not empty")
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -270,9 +271,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/second")
|
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -289,9 +290,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/third")
|
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -306,7 +307,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
// test Remove -- allowed
|
// test Remove -- allowed
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -318,7 +319,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj2, err := rootFs.NewObject("test/one")
|
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||||
require.Equal(t, "one content updated", string(data2))
|
require.Equal(t, "one content updated", string(data2))
|
||||||
@ -366,7 +367,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -378,7 +379,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Rmdir
|
// test Rmdir
|
||||||
err = runInstance.rm(t, rootFs, "test")
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -389,9 +390,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// try to read from it
|
// try to read from it
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/second")
|
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
// validate that it exists in temp fs
|
// validate that it exists in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
@ -404,9 +405,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
if err != errNotSupported {
|
if err != errNotSupported {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = rootFs.NewObject("test/third")
|
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -421,7 +422,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
// test Remove
|
// test Remove
|
||||||
err = runInstance.rm(t, rootFs, "test/one")
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
_, err = rootFs.NewObject("test/one")
|
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
// validate that it doesn't exist in temp fs
|
// validate that it doesn't exist in temp fs
|
||||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
7
backend/cache/directory.go
vendored
7
backend/cache/directory.go
vendored
@ -3,6 +3,7 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
||||||
var cd *Directory
|
var cd *Directory
|
||||||
fullRemote := path.Join(f.Root(), d.Remote())
|
fullRemote := path.Join(f.Root(), d.Remote())
|
||||||
|
|
||||||
@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
|||||||
CacheFs: f,
|
CacheFs: f,
|
||||||
Name: name,
|
Name: name,
|
||||||
Dir: dir,
|
Dir: dir,
|
||||||
CacheModTime: d.ModTime().UnixNano(),
|
CacheModTime: d.ModTime(ctx).UnixNano(),
|
||||||
CacheSize: d.Size(),
|
CacheSize: d.Size(),
|
||||||
CacheItems: d.Items(),
|
CacheItems: d.Items(),
|
||||||
CacheType: "Directory",
|
CacheType: "Directory",
|
||||||
@ -110,7 +111,7 @@ func (d *Directory) parentRemote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (d *Directory) ModTime() time.Time {
|
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||||
return time.Unix(0, d.CacheModTime)
|
return time.Unix(0, d.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
21
backend/cache/handle.go
vendored
21
backend/cache/handle.go
vendored
@ -3,6 +3,7 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
|||||||
|
|
||||||
// Handle is managing the read/write/seek operations on an open handle
|
// Handle is managing the read/write/seek operations on an open handle
|
||||||
type Handle struct {
|
type Handle struct {
|
||||||
|
ctx context.Context
|
||||||
cachedObject *Object
|
cachedObject *Object
|
||||||
cfs *Fs
|
cfs *Fs
|
||||||
memory *Memory
|
memory *Memory
|
||||||
@ -58,8 +60,9 @@ type Handle struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObjectHandle returns a new Handle for an existing Object
|
// NewObjectHandle returns a new Handle for an existing Object
|
||||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
||||||
r := &Handle{
|
r := &Handle{
|
||||||
|
ctx: ctx,
|
||||||
cachedObject: o,
|
cachedObject: o,
|
||||||
cfs: cfs,
|
cfs: cfs,
|
||||||
offset: 0,
|
offset: 0,
|
||||||
@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
r := w.rc
|
r := w.rc
|
||||||
if w.rc == nil {
|
if w.rc == nil {
|
||||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
if !closeOpen {
|
if !closeOpen {
|
||||||
if do, ok := r.(fs.RangeSeeker); ok {
|
if do, ok := r.(fs.RangeSeeker); ok {
|
||||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
||||||
return r, err
|
return r, err
|
||||||
} else if do, ok := r.(io.Seeker); ok {
|
} else if do, ok := r.(io.Seeker); ok {
|
||||||
_, err = do.Seek(offset, io.SeekStart)
|
_, err = do.Seek(offset, io.SeekStart)
|
||||||
@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
|||||||
|
|
||||||
_ = w.rc.Close()
|
_ = w.rc.Close()
|
||||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
// we seem to be getting only errors so we abort
|
// we seem to be getting only errors so we abort
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(true)
|
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
sourceRead, err = io.ReadFull(w.rc, data)
|
sourceRead, err = io.ReadFull(w.rc, data)
|
||||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||||
err = w.r.cachedObject.refreshFromSource(true)
|
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(w, "%v", err)
|
fs.Errorf(w, "%v", err)
|
||||||
}
|
}
|
||||||
@ -588,7 +591,7 @@ func (b *backgroundWriter) run() {
|
|||||||
remote := b.fs.cleanRootFromPath(absPath)
|
remote := b.fs.cleanRootFromPath(absPath)
|
||||||
b.notify(remote, BackgroundUploadStarted, nil)
|
b.notify(remote, BackgroundUploadStarted, nil)
|
||||||
fs.Infof(remote, "background upload: started upload")
|
fs.Infof(remote, "background upload: started upload")
|
||||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.notify(remote, BackgroundUploadError, err)
|
b.notify(remote, BackgroundUploadError, err)
|
||||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||||
@ -598,14 +601,14 @@ func (b *backgroundWriter) run() {
|
|||||||
// clean empty dirs up to root
|
// clean empty dirs up to root
|
||||||
thisDir := cleanPath(path.Dir(remote))
|
thisDir := cleanPath(path.Dir(remote))
|
||||||
for thisDir != "" {
|
for thisDir != "" {
|
||||||
thisList, err := b.fs.tempFs.List(thisDir)
|
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if len(thisList) > 0 {
|
if len(thisList) > 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
err = b.fs.tempFs.Rmdir(thisDir)
|
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
||||||
fs.Debugf(thisDir, "cleaned from temp path")
|
fs.Debugf(thisDir, "cleaned from temp path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
|
65
backend/cache/object.go
vendored
65
backend/cache/object.go
vendored
@ -3,6 +3,7 @@
|
|||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
@ -68,7 +69,7 @@ func NewObject(f *Fs, remote string) *Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ObjectFromOriginal builds one from a generic fs.Object
|
// ObjectFromOriginal builds one from a generic fs.Object
|
||||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
||||||
var co *Object
|
var co *Object
|
||||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||||
dir, name := path.Split(fullRemote)
|
dir, name := path.Split(fullRemote)
|
||||||
@ -92,13 +93,13 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
|||||||
CacheType: cacheType,
|
CacheType: cacheType,
|
||||||
CacheTs: time.Now(),
|
CacheTs: time.Now(),
|
||||||
}
|
}
|
||||||
co.updateData(o)
|
co.updateData(ctx, o)
|
||||||
return co
|
return co
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) updateData(source fs.Object) {
|
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
||||||
o.Object = source
|
o.Object = source
|
||||||
o.CacheModTime = source.ModTime().UnixNano()
|
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
||||||
o.CacheSize = source.Size()
|
o.CacheSize = source.Size()
|
||||||
o.CacheStorable = source.Storable()
|
o.CacheStorable = source.Storable()
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@ -130,20 +131,20 @@ func (o *Object) abs() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the cached ModTime
|
// ModTime returns the cached ModTime
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
_ = o.refresh()
|
_ = o.refresh(ctx)
|
||||||
return time.Unix(0, o.CacheModTime)
|
return time.Unix(0, o.CacheModTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the cached Size
|
// Size returns the cached Size
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
_ = o.refresh()
|
_ = o.refresh(context.TODO())
|
||||||
return o.CacheSize
|
return o.CacheSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns the cached Storable
|
// Storable returns the cached Storable
|
||||||
func (o *Object) Storable() bool {
|
func (o *Object) Storable() bool {
|
||||||
_ = o.refresh()
|
_ = o.refresh(context.TODO())
|
||||||
return o.CacheStorable
|
return o.CacheStorable
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,18 +152,18 @@ func (o *Object) Storable() bool {
|
|||||||
// all these conditions must be true to ignore a refresh
|
// all these conditions must be true to ignore a refresh
|
||||||
// 1. cache ts didn't expire yet
|
// 1. cache ts didn't expire yet
|
||||||
// 2. is not pending a notification from the wrapped fs
|
// 2. is not pending a notification from the wrapped fs
|
||||||
func (o *Object) refresh() error {
|
func (o *Object) refresh(ctx context.Context) error {
|
||||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||||
if !isExpired && !isNotified {
|
if !isExpired && !isNotified {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.refreshFromSource(true)
|
return o.refreshFromSource(ctx, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||||
func (o *Object) refreshFromSource(force bool) error {
|
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||||
o.refreshMutex.Lock()
|
o.refreshMutex.Lock()
|
||||||
defer o.refreshMutex.Unlock()
|
defer o.refreshMutex.Unlock()
|
||||||
var err error
|
var err error
|
||||||
@ -172,29 +173,29 @@ func (o *Object) refreshFromSource(force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if o.isTempFile() {
|
if o.isTempFile() {
|
||||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||||
} else {
|
} else {
|
||||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.updateData(liveObject)
|
o.updateData(ctx, liveObject)
|
||||||
o.persist()
|
o.persist()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the ModTime of this object
|
// SetModTime sets the ModTime of this object
|
||||||
func (o *Object) SetModTime(t time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.Object.SetModTime(t)
|
err := o.Object.SetModTime(ctx, t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -207,19 +208,19 @@ func (o *Object) SetModTime(t time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open is used to request a specific part of the file using fs.RangeOption
|
// Open is used to request a specific part of the file using fs.RangeOption
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if o.Object == nil {
|
if o.Object == nil {
|
||||||
err = o.refreshFromSource(true)
|
err = o.refreshFromSource(ctx, true)
|
||||||
} else {
|
} else {
|
||||||
err = o.refresh()
|
err = o.refresh(ctx)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@ -238,8 +239,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update will change the object data
|
// Update will change the object data
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@ -254,7 +255,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||||
|
|
||||||
// FIXME use reliable upload
|
// FIXME use reliable upload
|
||||||
err := o.Object.Update(in, src, options...)
|
err := o.Object.Update(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "error updating source: %v", err)
|
fs.Errorf(o, "error updating source: %v", err)
|
||||||
return err
|
return err
|
||||||
@ -265,7 +266,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// advertise to ChangeNotify if wrapped doesn't do that
|
// advertise to ChangeNotify if wrapped doesn't do that
|
||||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||||
|
|
||||||
o.CacheModTime = src.ModTime().UnixNano()
|
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
||||||
o.CacheSize = src.Size()
|
o.CacheSize = src.Size()
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
o.CacheTs = time.Now()
|
o.CacheTs = time.Now()
|
||||||
@ -275,8 +276,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove deletes the object from both the cache and the source
|
// Remove deletes the object from both the cache and the source
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
@ -288,7 +289,7 @@ func (o *Object) Remove() error {
|
|||||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := o.Object.Remove()
|
err := o.Object.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -306,8 +307,8 @@ func (o *Object) Remove() error {
|
|||||||
|
|
||||||
// Hash requests a hash of the object and stores in the cache
|
// Hash requests a hash of the object and stores in the cache
|
||||||
// since it might or might not be called, this is lazy loaded
|
// since it might or might not be called, this is lazy loaded
|
||||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
_ = o.refresh()
|
_ = o.refresh(ctx)
|
||||||
if o.CacheHashes == nil {
|
if o.CacheHashes == nil {
|
||||||
o.CacheHashes = make(map[hash.Type]string)
|
o.CacheHashes = make(map[hash.Type]string)
|
||||||
}
|
}
|
||||||
@ -316,10 +317,10 @@ func (o *Object) Hash(ht hash.Type) (string, error) {
|
|||||||
if found {
|
if found {
|
||||||
return cachedHash, nil
|
return cachedHash, nil
|
||||||
}
|
}
|
||||||
if err := o.refreshFromSource(false); err != nil {
|
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
liveHash, err := o.Object.Hash(ht)
|
liveHash, err := o.Object.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
5
backend/cache/storage_persistent.go
vendored
5
backend/cache/storage_persistent.go
vendored
@ -4,6 +4,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -1014,7 +1015,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||||
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||||
return b.db.Update(func(tx *bolt.Tx) error {
|
return b.db.Update(func(tx *bolt.Tx) error {
|
||||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||||
@ -1023,7 +1024,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var queuedEntries []fs.Object
|
var queuedEntries []fs.Object
|
||||||
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, o := range entries {
|
for _, o := range entries {
|
||||||
if oo, ok := o.(fs.Object); ok {
|
if oo, ok := o.(fs.Object); ok {
|
||||||
queuedEntries = append(queuedEntries, oo)
|
queuedEntries = append(queuedEntries, oo)
|
||||||
|
@ -2,6 +2,7 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
gocipher "crypto/cipher"
|
gocipher "crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
@ -68,7 +69,7 @@ type ReadSeekCloser interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||||
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||||
|
|
||||||
// Cipher is used to swap out the encryption implementations
|
// Cipher is used to swap out the encryption implementations
|
||||||
type Cipher interface {
|
type Cipher interface {
|
||||||
@ -85,7 +86,7 @@ type Cipher interface {
|
|||||||
// DecryptData
|
// DecryptData
|
||||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||||
// DecryptDataSeek decrypt at a given position
|
// DecryptDataSeek decrypt at a given position
|
||||||
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||||
// EncryptedSize calculates the size of the data when encrypted
|
// EncryptedSize calculates the size of the data when encrypted
|
||||||
EncryptedSize(int64) int64
|
EncryptedSize(int64) int64
|
||||||
// DecryptedSize calculates the size of the data when decrypted
|
// DecryptedSize calculates the size of the data when decrypted
|
||||||
@ -755,22 +756,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||||
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||||
var rc io.ReadCloser
|
var rc io.ReadCloser
|
||||||
doRangeSeek := false
|
doRangeSeek := false
|
||||||
setLimit := false
|
setLimit := false
|
||||||
// Open initially with no seek
|
// Open initially with no seek
|
||||||
if offset == 0 && limit < 0 {
|
if offset == 0 && limit < 0 {
|
||||||
// If no offset or limit then open whole file
|
// If no offset or limit then open whole file
|
||||||
rc, err = open(0, -1)
|
rc, err = open(ctx, 0, -1)
|
||||||
} else if offset == 0 {
|
} else if offset == 0 {
|
||||||
// If no offset open the header + limit worth of the file
|
// If no offset open the header + limit worth of the file
|
||||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||||
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
|
||||||
setLimit = true
|
setLimit = true
|
||||||
} else {
|
} else {
|
||||||
// Otherwise just read the header to start with
|
// Otherwise just read the header to start with
|
||||||
rc, err = open(0, int64(fileHeaderSize))
|
rc, err = open(ctx, 0, int64(fileHeaderSize))
|
||||||
doRangeSeek = true
|
doRangeSeek = true
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -783,7 +784,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
|||||||
}
|
}
|
||||||
fh.open = open // will be called by fh.RangeSeek
|
fh.open = open // will be called by fh.RangeSeek
|
||||||
if doRangeSeek {
|
if doRangeSeek {
|
||||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = fh.Close()
|
_ = fh.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -903,7 +904,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
|
|||||||
// limiting the total length to limit.
|
// limiting the total length to limit.
|
||||||
//
|
//
|
||||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||||
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
|
||||||
fh.mu.Lock()
|
fh.mu.Lock()
|
||||||
defer fh.mu.Unlock()
|
defer fh.mu.Unlock()
|
||||||
|
|
||||||
@ -930,7 +931,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
// Can we seek underlying stream directly?
|
// Can we seek underlying stream directly?
|
||||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||||
// Seek underlying stream directly
|
// Seek underlying stream directly
|
||||||
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(err)
|
return 0, fh.finish(err)
|
||||||
}
|
}
|
||||||
@ -940,7 +941,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
fh.rc = nil
|
fh.rc = nil
|
||||||
|
|
||||||
// Re-open the underlying object with the offset given
|
// Re-open the underlying object with the offset given
|
||||||
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||||
}
|
}
|
||||||
@ -969,7 +970,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
|||||||
|
|
||||||
// Seek implements the io.Seeker interface
|
// Seek implements the io.Seeker interface
|
||||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||||
return fh.RangeSeek(offset, whence, -1)
|
return fh.RangeSeek(context.TODO(), offset, whence, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// finish sets the final error and tidies up
|
// finish sets the final error and tidies up
|
||||||
@ -1043,8 +1044,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
|||||||
// The open function must return a ReadCloser opened to the offset supplied
|
// The open function must return a ReadCloser opened to the offset supplied
|
||||||
//
|
//
|
||||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||||
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||||
out, err := c.newDecrypterSeek(open, offset, limit)
|
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package crypt
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/base32"
|
"encoding/base32"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -965,7 +966,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
|
|
||||||
// Open stream with a seek of underlyingOffset
|
// Open stream with a seek of underlyingOffset
|
||||||
var reader io.ReadCloser
|
var reader io.ReadCloser
|
||||||
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = int(underlyingOffset + underlyingLimit)
|
end = int(underlyingOffset + underlyingLimit)
|
||||||
@ -1006,7 +1007,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(rc, offset, limit)
|
check(rc, offset, limit)
|
||||||
@ -1014,14 +1015,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try decoding it with a single open and lots of seeks
|
// Try decoding it with a single open and lots of seeks
|
||||||
fh, err := c.DecryptDataSeek(open, 0, -1)
|
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
for _, offset := range trials {
|
for _, offset := range trials {
|
||||||
for _, limit := range limits {
|
for _, limit := range limits {
|
||||||
if offset+limit > len(plaintext) {
|
if offset+limit > len(plaintext) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
check(fh, offset, limit)
|
check(fh, offset, limit)
|
||||||
@ -1072,7 +1073,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
} {
|
} {
|
||||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||||
callCount := 0
|
callCount := 0
|
||||||
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
switch callCount {
|
switch callCount {
|
||||||
case 0:
|
case 0:
|
||||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||||
@ -1084,11 +1085,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||||
}
|
}
|
||||||
callCount++
|
callCount++
|
||||||
return open(underlyingOffset, underlyingLimit)
|
return open(ctx, underlyingOffset, underlyingLimit)
|
||||||
}
|
}
|
||||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, gotOffset, test.offset)
|
assert.Equal(t, gotOffset, test.offset)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
package crypt
|
package crypt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"strings"
|
"strings"
|
||||||
@ -232,7 +233,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt an directory file name to entries.
|
// Encrypt an directory file name to entries.
|
||||||
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -242,18 +243,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
|||||||
if f.opt.ShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(dir))
|
*entries = append(*entries, f.newDir(ctx, dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||||
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||||
newEntries = entries[:0] // in place filter
|
newEntries = entries[:0] // in place filter
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
f.add(&newEntries, x)
|
f.add(&newEntries, x)
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
f.addDir(&newEntries, x)
|
f.addDir(ctx, &newEntries, x)
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||||
}
|
}
|
||||||
@ -270,12 +271,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.encryptEntries(entries)
|
return f.encryptEntries(ctx, entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -294,9 +295,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||||
newEntries, err := f.encryptEntries(entries)
|
newEntries, err := f.encryptEntries(ctx, entries)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -305,18 +306,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.newObject(o), nil
|
return f.newObject(o), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||||
|
|
||||||
// put implements Put or PutStream
|
// put implements Put or PutStream
|
||||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||||
// Encrypt the data into wrappedIn
|
// Encrypt the data into wrappedIn
|
||||||
wrappedIn, err := f.cipher.EncryptData(in)
|
wrappedIn, err := f.cipher.EncryptData(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -342,7 +343,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer the data
|
// Transfer the data
|
||||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -351,13 +352,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
if ht != hash.None && hasher != nil {
|
if ht != hash.None && hasher != nil {
|
||||||
srcHash := hasher.Sums()[ht]
|
srcHash := hasher.Sums()[ht]
|
||||||
var dstHash string
|
var dstHash string
|
||||||
dstHash, err = o.Hash(ht)
|
dstHash, err = o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||||
}
|
}
|
||||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||||
// remove object
|
// remove object
|
||||||
err = o.Remove()
|
err = o.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||||
}
|
}
|
||||||
@ -373,13 +374,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(in, src, options, f.Fs.Put)
|
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.put(in, src, options, f.Fs.Features().PutStream)
|
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@ -390,15 +391,15 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
// Mkdir makes the directory (container, bucket)
|
// Mkdir makes the directory (container, bucket)
|
||||||
//
|
//
|
||||||
// Shouldn't return an error if it already exists
|
// Shouldn't return an error if it already exists
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
@ -407,12 +408,12 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
do := f.Fs.Features().Purge
|
do := f.Fs.Features().Purge
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantPurge
|
return fs.ErrorCantPurge
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@ -424,7 +425,7 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Copy
|
do := f.Fs.Features().Copy
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
@ -433,7 +434,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -449,7 +450,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
do := f.Fs.Features().Move
|
do := f.Fs.Features().Move
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
@ -458,7 +459,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -473,7 +474,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
do := f.Fs.Features().DirMove
|
do := f.Fs.Features().DirMove
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
@ -483,14 +484,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
do := f.Fs.Features().PutUnchecked
|
do := f.Fs.Features().PutUnchecked
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("can't PutUnchecked")
|
return nil, errors.New("can't PutUnchecked")
|
||||||
@ -499,7 +500,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
o, err := do(wrappedIn, f.newObjectInfo(src))
|
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -510,21 +511,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
//
|
//
|
||||||
// Implement this if you have a way of emptying the trash or
|
// Implement this if you have a way of emptying the trash or
|
||||||
// otherwise cleaning up old versions of files.
|
// otherwise cleaning up old versions of files.
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
do := f.Fs.Features().CleanUp
|
do := f.Fs.Features().CleanUp
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("can't CleanUp")
|
return errors.New("can't CleanUp")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
do := f.Fs.Features().About
|
do := f.Fs.Features().About
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return nil, errors.New("About not supported")
|
return nil, errors.New("About not supported")
|
||||||
}
|
}
|
||||||
return do()
|
return do(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnWrap returns the Fs that this Fs is wrapping
|
// UnWrap returns the Fs that this Fs is wrapping
|
||||||
@ -556,10 +557,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
|||||||
// src with it, and calculates the hash given by HashType on the fly
|
// src with it, and calculates the hash given by HashType on the fly
|
||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||||
// use a limited read so we only read the header
|
// use a limited read so we only read the header
|
||||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||||
}
|
}
|
||||||
@ -589,7 +590,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open the src for input
|
// Open the src for input
|
||||||
in, err = src.Open()
|
in, err = src.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to open src")
|
return "", errors.Wrap(err, "failed to open src")
|
||||||
}
|
}
|
||||||
@ -616,16 +617,16 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
|||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
// MergeDirs merges the contents of all the directories passed
|
||||||
// in into the first one and rmdirs the other directories.
|
// in into the first one and rmdirs the other directories.
|
||||||
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
do := f.Fs.Features().MergeDirs
|
do := f.Fs.Features().MergeDirs
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return errors.New("MergeDirs not supported")
|
return errors.New("MergeDirs not supported")
|
||||||
}
|
}
|
||||||
out := make([]fs.Directory, len(dirs))
|
out := make([]fs.Directory, len(dirs))
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
out[i] = fs.NewDirCopy(dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||||
}
|
}
|
||||||
return do(out)
|
return do(ctx, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirCacheFlush resets the directory cache - used in testing
|
// DirCacheFlush resets the directory cache - used in testing
|
||||||
@ -638,23 +639,23 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
do := f.Fs.Features().PublicLink
|
do := f.Fs.Features().PublicLink
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return "", errors.New("PublicLink not supported")
|
return "", errors.New("PublicLink not supported")
|
||||||
}
|
}
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// assume it is a directory
|
// assume it is a directory
|
||||||
return do(f.cipher.EncryptDirName(remote))
|
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||||
}
|
}
|
||||||
return do(o.(*Object).Object.Remote())
|
return do(ctx, o.(*Object).Object.Remote())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
// ChangeNotify calls the passed function with a path
|
||||||
// that has had changes. If the implementation
|
// that has had changes. If the implementation
|
||||||
// uses polling, it should adhere to the given interval.
|
// uses polling, it should adhere to the given interval.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
do := f.Fs.Features().ChangeNotify
|
do := f.Fs.Features().ChangeNotify
|
||||||
if do == nil {
|
if do == nil {
|
||||||
return
|
return
|
||||||
@ -680,7 +681,7 @@ func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalCha
|
|||||||
}
|
}
|
||||||
notifyFunc(decrypted, entryType)
|
notifyFunc(decrypted, entryType)
|
||||||
}
|
}
|
||||||
do(wrappedNotifyFunc, pollIntervalChan)
|
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a wrapped for being read from the Fs
|
// Object describes a wrapped for being read from the Fs
|
||||||
@ -733,7 +734,7 @@ func (o *Object) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +744,7 @@ func (o *Object) UnWrap() fs.Object {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
var openOptions []fs.OpenOption
|
var openOptions []fs.OpenOption
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@ -757,10 +758,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
openOptions = append(openOptions, option)
|
openOptions = append(openOptions, option)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||||
// Open with no seek
|
// Open with no seek
|
||||||
return o.Object.Open(openOptions...)
|
return o.Object.Open(ctx, openOptions...)
|
||||||
}
|
}
|
||||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||||
end := int64(-1)
|
end := int64(-1)
|
||||||
@ -771,7 +772,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||||
return o.Object.Open(newOpenOptions...)
|
return o.Object.Open(ctx, newOpenOptions...)
|
||||||
}, offset, limit)
|
}, offset, limit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -780,17 +781,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return o.Object, o.Object.Update(in, src, options...)
|
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
_, err := o.f.put(in, src, options, update)
|
_, err := o.f.put(ctx, in, src, options, update)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDir returns a dir with the Name decrypted
|
// newDir returns a dir with the Name decrypted
|
||||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||||
newDir := fs.NewDirCopy(dir)
|
newDir := fs.NewDirCopy(ctx, dir)
|
||||||
remote := dir.Remote()
|
remote := dir.Remote()
|
||||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -837,7 +838,7 @@ func (o *ObjectInfo) Size() int64 {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,6 +9,7 @@ package drive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -565,7 +566,7 @@ func containsString(slice []string, s string) bool {
|
|||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
//
|
//
|
||||||
// Search params: https://developers.google.com/drive/search-parameters
|
// Search params: https://developers.google.com/drive/search-parameters
|
||||||
func (f *Fs) list(dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
|
func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directoriesOnly, filesOnly, includeAll bool, fn listFn) (found bool, err error) {
|
||||||
var query []string
|
var query []string
|
||||||
if !includeAll {
|
if !includeAll {
|
||||||
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
||||||
@ -910,6 +911,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -996,7 +998,7 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -1004,12 +1006,12 @@ func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, f.rootFolderID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.NewObject(remote)
|
_, err := tempF.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// unable to list folder so return old f
|
// unable to list folder so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
@ -1164,8 +1166,8 @@ func (f *Fs) newObjectWithExportInfo(
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(remote)
|
info, extension, exportName, exportMimeType, isDocument, err := f.getRemoteInfoWithExport(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1183,9 +1185,9 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.list([]string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
found, err = f.list(ctx, []string{pathID}, leaf, true, false, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
_, exportName, _, isDocument := f.findExportFormat(item)
|
_, exportName, _, isDocument := f.findExportFormat(item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
@ -1206,7 +1208,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
// fmt.Println("Making", path)
|
// fmt.Println("Making", path)
|
||||||
// Define the metadata for the directory we are going to create.
|
// Define the metadata for the directory we are going to create.
|
||||||
createInfo := &drive.File{
|
createInfo := &drive.File{
|
||||||
@ -1368,18 +1370,18 @@ func (f *Fs) findImportFormat(mimeType string) string {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.list([]string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
_, err = f.list(ctx, []string{directoryID}, "", false, false, false, func(item *drive.File) bool {
|
||||||
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
entry, err := f.itemToDirEntry(path.Join(dir, item.Name), item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
@ -1432,7 +1434,7 @@ func (s listRSlices) Less(i, j int) bool {
|
|||||||
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
// In each cycle it will read up to grouping entries from the in channel without blocking.
|
||||||
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
|
||||||
// nil is send to the out channel and the function returns.
|
// nil is send to the out channel and the function returns.
|
||||||
func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
func (f *Fs) listRRunner(ctx context.Context, wg *sync.WaitGroup, in <-chan listREntry, out chan<- error, cb func(fs.DirEntry) error, grouping int) {
|
||||||
var dirs []string
|
var dirs []string
|
||||||
var paths []string
|
var paths []string
|
||||||
|
|
||||||
@ -1453,7 +1455,7 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- er
|
|||||||
}
|
}
|
||||||
listRSlices{dirs, paths}.Sort()
|
listRSlices{dirs, paths}.Sort()
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err := f.list(dirs, "", false, false, false, func(item *drive.File) bool {
|
_, err := f.list(ctx, dirs, "", false, false, false, func(item *drive.File) bool {
|
||||||
for _, parent := range item.Parents {
|
for _, parent := range item.Parents {
|
||||||
// only handle parents that are in the requested dirs list
|
// only handle parents that are in the requested dirs list
|
||||||
i := sort.SearchStrings(dirs, parent)
|
i := sort.SearchStrings(dirs, parent)
|
||||||
@ -1508,17 +1510,17 @@ func (f *Fs) listRRunner(wg *sync.WaitGroup, in <-chan listREntry, out chan<- er
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
const (
|
const (
|
||||||
grouping = 50
|
grouping = 50
|
||||||
inputBuffer = 1000
|
inputBuffer = 1000
|
||||||
)
|
)
|
||||||
|
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1562,7 +1564,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
in <- listREntry{directoryID, dir}
|
in <- listREntry{directoryID, dir}
|
||||||
|
|
||||||
for i := 0; i < fs.Config.Checkers; i++ {
|
for i := 0; i < fs.Config.Checkers; i++ {
|
||||||
go f.listRRunner(&wg, in, out, cb, grouping)
|
go f.listRRunner(ctx, &wg, in, out, cb, grouping)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
// wait until the all directories are processed
|
// wait until the all directories are processed
|
||||||
@ -1636,8 +1638,8 @@ func (f *Fs) itemToDirEntry(remote string, item *drive.File) (fs.DirEntry, error
|
|||||||
// Creates a drive.File info from the parameters passed in.
|
// Creates a drive.File info from the parameters passed in.
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createFileInfo(remote string, modTime time.Time) (*drive.File, error) {
|
func (f *Fs) createFileInfo(ctx context.Context, remote string, modTime time.Time) (*drive.File, error) {
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1658,32 +1660,32 @@ func (f *Fs) createFileInfo(remote string, modTime time.Time) (*drive.File, erro
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.NewObject(src.Remote())
|
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src, options...)
|
return f.PutUnchecked(ctx, in, src, options...)
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
srcMimeType := fs.MimeTypeFromName(remote)
|
srcMimeType := fs.MimeTypeFromName(remote)
|
||||||
srcExt := path.Ext(remote)
|
srcExt := path.Ext(remote)
|
||||||
exportExt := ""
|
exportExt := ""
|
||||||
@ -1705,7 +1707,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
createInfo, err := f.createFileInfo(remote, modTime)
|
createInfo, err := f.createFileInfo(ctx, remote, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1741,7 +1743,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
// MergeDirs merges the contents of all the directories passed
|
||||||
// in into the first one and rmdirs the other directories.
|
// in into the first one and rmdirs the other directories.
|
||||||
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
if len(dirs) < 2 {
|
if len(dirs) < 2 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1749,7 +1751,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
for _, srcDir := range dirs[1:] {
|
for _, srcDir := range dirs[1:] {
|
||||||
// list the the objects
|
// list the the objects
|
||||||
infos := []*drive.File{}
|
infos := []*drive.File{}
|
||||||
_, err := f.list([]string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
|
_, err := f.list(ctx, []string{srcDir.ID()}, "", false, false, true, func(info *drive.File) bool {
|
||||||
infos = append(infos, info)
|
infos = append(infos, info)
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@ -1775,7 +1777,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.rmdir(srcDir.ID(), true)
|
err = f.rmdir(ctx, srcDir.ID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||||
}
|
}
|
||||||
@ -1784,19 +1786,19 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir deletes a directory unconditionally by ID
|
// Rmdir deletes a directory unconditionally by ID
|
||||||
func (f *Fs) rmdir(directoryID string, useTrash bool) error {
|
func (f *Fs) rmdir(ctx context.Context, directoryID string, useTrash bool) error {
|
||||||
return f.pacer.Call(func() (bool, error) {
|
return f.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
if useTrash {
|
if useTrash {
|
||||||
@ -1820,15 +1822,15 @@ func (f *Fs) rmdir(directoryID string, useTrash bool) error {
|
|||||||
// Rmdir deletes a directory
|
// Rmdir deletes a directory
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
directoryID, err := dc.FindDir(dir, false)
|
directoryID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var trashedFiles = false
|
var trashedFiles = false
|
||||||
found, err := f.list([]string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
found, err := f.list(ctx, []string{directoryID}, "", false, false, true, func(item *drive.File) bool {
|
||||||
if !item.Trashed {
|
if !item.Trashed {
|
||||||
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
fs.Debugf(dir, "Rmdir: contains file: %q", item.Name)
|
||||||
return true
|
return true
|
||||||
@ -1847,7 +1849,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
// trash the directory if it had trashed files
|
// trash the directory if it had trashed files
|
||||||
// in or the user wants to trash, otherwise
|
// in or the user wants to trash, otherwise
|
||||||
// delete it.
|
// delete it.
|
||||||
err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
|
err = f.rmdir(ctx, directoryID, trashedFiles || f.opt.UseTrash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1873,7 +1875,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
var srcObj *baseObject
|
var srcObj *baseObject
|
||||||
ext := ""
|
ext := ""
|
||||||
switch src := src.(type) {
|
switch src := src.(type) {
|
||||||
@ -1897,9 +1899,9 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Look to see if there is an existing object
|
// Look to see if there is an existing object
|
||||||
existingObject, _ := f.NewObject(remote)
|
existingObject, _ := f.NewObject(ctx, remote)
|
||||||
|
|
||||||
createInfo, err := f.createFileInfo(remote, src.ModTime())
|
createInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1926,7 +1928,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if existingObject != nil {
|
if existingObject != nil {
|
||||||
err = existingObject.Remove()
|
err = existingObject.Remove(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
|
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
|
||||||
}
|
}
|
||||||
@ -1939,11 +1941,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
if f.root == "" {
|
if f.root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
err := f.dirCache.FindRoot(false)
|
err := f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1972,7 +1974,7 @@ func (f *Fs) Purge() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
err := f.svc.Files.EmptyTrash().Do()
|
err := f.svc.Files.EmptyTrash().Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
@ -1985,7 +1987,7 @@ func (f *Fs) CleanUp() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
// Teamdrives don't appear to have a usage API so just return empty
|
// Teamdrives don't appear to have a usage API so just return empty
|
||||||
return &fs.Usage{}, nil
|
return &fs.Usage{}, nil
|
||||||
@ -2021,7 +2023,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
var srcObj *baseObject
|
var srcObj *baseObject
|
||||||
ext := ""
|
ext := ""
|
||||||
switch src := src.(type) {
|
switch src := src.(type) {
|
||||||
@ -2044,13 +2046,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
remote = remote[:len(remote)-len(ext)]
|
remote = remote[:len(remote)-len(ext)]
|
||||||
}
|
}
|
||||||
|
|
||||||
_, srcParentID, err := srcObj.fs.dirCache.FindPath(src.Remote(), false)
|
_, srcParentID, err := srcObj.fs.dirCache.FindPath(ctx, src.Remote(), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
dstInfo, err := f.createFileInfo(remote, src.ModTime())
|
dstInfo, err := f.createFileInfo(ctx, remote, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -2095,13 +2097,13 @@ func (f *Fs) ShouldSupportTeamDrives(src fs.Object) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
id, err := f.dirCache.FindDir(remote, false)
|
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||||
o, err := f.NewObject(remote)
|
o, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -2137,7 +2139,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -2153,14 +2155,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(false)
|
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2176,14 +2178,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -2198,14 +2200,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if srcRemote == "" {
|
if srcRemote == "" {
|
||||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||||
} else {
|
} else {
|
||||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(srcRemote, false)
|
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, srcRemote, false)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2236,7 +2238,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// Automatically restarts itself in case of unexpected behavior of the remote.
|
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
go func() {
|
go func() {
|
||||||
// get the StartPageToken early so all changes from now on get processed
|
// get the StartPageToken early so all changes from now on get processed
|
||||||
startPageToken, err := f.changeNotifyStartPageToken()
|
startPageToken, err := f.changeNotifyStartPageToken()
|
||||||
@ -2411,13 +2413,13 @@ func (o *baseObject) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
return o.md5sum, nil
|
return o.md5sum, nil
|
||||||
}
|
}
|
||||||
func (o *baseObject) Hash(t hash.Type) (string, error) {
|
func (o *baseObject) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -2430,15 +2432,15 @@ func (o *baseObject) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getRemoteInfo returns a drive.File for the remote
|
// getRemoteInfo returns a drive.File for the remote
|
||||||
func (f *Fs) getRemoteInfo(remote string) (info *drive.File, err error) {
|
func (f *Fs) getRemoteInfo(ctx context.Context, remote string) (info *drive.File, err error) {
|
||||||
info, _, _, _, _, err = f.getRemoteInfoWithExport(remote)
|
info, _, _, _, _, err = f.getRemoteInfoWithExport(ctx, remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
|
||||||
func (f *Fs) getRemoteInfoWithExport(remote string) (
|
func (f *Fs) getRemoteInfoWithExport(ctx context.Context, remote string) (
|
||||||
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
info *drive.File, extension, exportName, exportMimeType string, isDocument bool, err error) {
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, false)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return nil, "", "", "", false, fs.ErrorObjectNotFound
|
return nil, "", "", "", false, fs.ErrorObjectNotFound
|
||||||
@ -2446,7 +2448,7 @@ func (f *Fs) getRemoteInfoWithExport(remote string) (
|
|||||||
return nil, "", "", "", false, err
|
return nil, "", "", "", false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := f.list([]string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
|
found, err := f.list(ctx, []string{directoryID}, leaf, false, true, false, func(item *drive.File) bool {
|
||||||
if !f.opt.SkipGdocs {
|
if !f.opt.SkipGdocs {
|
||||||
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
extension, exportName, exportMimeType, isDocument = f.findExportFormat(item)
|
||||||
if exportName == leaf {
|
if exportName == leaf {
|
||||||
@ -2477,7 +2479,7 @@ func (f *Fs) getRemoteInfoWithExport(remote string) (
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *baseObject) ModTime() time.Time {
|
func (o *baseObject) ModTime(ctx context.Context) time.Time {
|
||||||
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
|
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read mtime from object: %v", err)
|
fs.Debugf(o, "Failed to read mtime from object: %v", err)
|
||||||
@ -2487,7 +2489,7 @@ func (o *baseObject) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the drive fs object
|
// SetModTime sets the modification time of the drive fs object
|
||||||
func (o *baseObject) SetModTime(modTime time.Time) error {
|
func (o *baseObject) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// New metadata
|
// New metadata
|
||||||
updateInfo := &drive.File{
|
updateInfo := &drive.File{
|
||||||
ModifiedTime: modTime.Format(timeFormatOut),
|
ModifiedTime: modTime.Format(timeFormatOut),
|
||||||
@ -2620,7 +2622,7 @@ func (o *baseObject) open(url string, options ...fs.OpenOption) (in io.ReadClose
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.v2Download {
|
if o.v2Download {
|
||||||
var v2File *drive_v2.File
|
var v2File *drive_v2.File
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@ -2638,7 +2640,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
return o.baseObject.open(o.url, options...)
|
return o.baseObject.open(o.url, options...)
|
||||||
}
|
}
|
||||||
func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *documentObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
// Update the size with what we are reading as it can change from
|
// Update the size with what we are reading as it can change from
|
||||||
// the HEAD in the listing to this GET. This stops rclone marking
|
// the HEAD in the listing to this GET. This stops rclone marking
|
||||||
// the transfer as corrupted.
|
// the transfer as corrupted.
|
||||||
@ -2670,7 +2672,7 @@ func (o *documentObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err e
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (o *linkObject) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *linkObject) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
var data = o.content
|
var data = o.content
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@ -2722,11 +2724,11 @@ func (o *baseObject) update(updateInfo *drive.File, uploadMimeType string, in io
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
srcMimeType := fs.MimeType(src)
|
srcMimeType := fs.MimeType(ctx, src)
|
||||||
updateInfo := &drive.File{
|
updateInfo := &drive.File{
|
||||||
MimeType: srcMimeType,
|
MimeType: srcMimeType,
|
||||||
ModifiedTime: src.ModTime().Format(timeFormatOut),
|
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||||
}
|
}
|
||||||
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
info, err := o.baseObject.update(updateInfo, srcMimeType, in, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2745,12 +2747,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *documentObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
srcMimeType := fs.MimeType(src)
|
srcMimeType := fs.MimeType(ctx, src)
|
||||||
importMimeType := ""
|
importMimeType := ""
|
||||||
updateInfo := &drive.File{
|
updateInfo := &drive.File{
|
||||||
MimeType: srcMimeType,
|
MimeType: srcMimeType,
|
||||||
ModifiedTime: src.ModTime().Format(timeFormatOut),
|
ModifiedTime: src.ModTime(ctx).Format(timeFormatOut),
|
||||||
}
|
}
|
||||||
|
|
||||||
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
if o.fs.importMimeTypes == nil || o.fs.opt.SkipGdocs {
|
||||||
@ -2787,12 +2789,12 @@ func (o *documentObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.O
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *linkObject) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *linkObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
return errors.New("cannot update link files")
|
return errors.New("cannot update link files")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *baseObject) Remove() error {
|
func (o *baseObject) Remove(ctx context.Context) error {
|
||||||
var err error
|
var err error
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
if o.fs.opt.UseTrash {
|
if o.fs.opt.UseTrash {
|
||||||
@ -2815,7 +2817,7 @@ func (o *baseObject) Remove() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *baseObject) MimeType() string {
|
func (o *baseObject) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package drive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -195,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
|||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -209,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
|||||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
|||||||
f.exportExtensions, _, err = parseExtensions("txt")
|
f.exportExtensions, _, err = parseExtensions("txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj, err := f.NewObject("example2.txt")
|
obj, err := f.NewObject(context.Background(), "example2.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rc, err := obj.Open()
|
rc, err := obj.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
defer func() { require.NoError(t, rc.Close()) }()
|
||||||
|
|
||||||
@ -246,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
|||||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
obj, err := f.NewObject("example2.link.html")
|
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
rc, err := obj.Open()
|
rc, err := obj.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer func() { require.NoError(t, rc.Close()) }()
|
defer func() { require.NoError(t, rc.Close()) }()
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ of path_display and all will be well.
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
@ -441,7 +442,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,7 +455,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
root := f.slashRoot
|
root := f.slashRoot
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += "/" + dir
|
root += "/" + dir
|
||||||
@ -541,22 +542,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't create or run metadata on root
|
// can't create or run metadata on root
|
||||||
@ -586,7 +587,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir deletes the container
|
// Rmdir deletes the container
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.slashRoot, dir)
|
root := path.Join(f.slashRoot, dir)
|
||||||
|
|
||||||
// can't remove root
|
// can't remove root
|
||||||
@ -642,7 +643,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@ -687,7 +688,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() (err error) {
|
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||||
// Let dropbox delete the filesystem tree
|
// Let dropbox delete the filesystem tree
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||||
@ -705,7 +706,7 @@ func (f *Fs) Purge() (err error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -745,7 +746,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
absPath := "/" + path.Join(f.Root(), remote)
|
absPath := "/" + path.Join(f.Root(), remote)
|
||||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||||
@ -798,7 +799,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -834,7 +835,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
var q *users.SpaceUsage
|
var q *users.SpaceUsage
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
q, err = f.users.GetSpaceUsage()
|
q, err = f.users.GetSpaceUsage()
|
||||||
@ -886,7 +887,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the dropbox special hash
|
// Hash returns the dropbox special hash
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.Dropbox {
|
if t != hash.Dropbox {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -948,7 +949,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||||
@ -960,7 +961,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
//
|
//
|
||||||
// Commits the datastore
|
// Commits the datastore
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Dropbox doesn't have a way of doing this so returning this
|
// Dropbox doesn't have a way of doing this so returning this
|
||||||
// error will cause the file to be deleted first then
|
// error will cause the file to be deleted first then
|
||||||
// re-uploaded to set the time.
|
// re-uploaded to set the time.
|
||||||
@ -973,7 +974,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@ -1099,7 +1100,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
remote := o.remotePath()
|
remote := o.remotePath()
|
||||||
if ignoredFiles.MatchString(remote) {
|
if ignoredFiles.MatchString(remote) {
|
||||||
fs.Logf(o, "File name disallowed - not uploading")
|
fs.Logf(o, "File name disallowed - not uploading")
|
||||||
@ -1108,7 +1109,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||||
commitInfo.Mode.Tag = "overwrite"
|
commitInfo.Mode.Tag = "overwrite"
|
||||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||||
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
|
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
@ -1128,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
package ftp
|
package ftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
@ -202,6 +203,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
|
ctx := context.Background()
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@ -254,7 +256,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -319,7 +321,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||||
entry, err := f.findItem(remote)
|
entry, err := f.findItem(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -363,7 +365,7 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -453,7 +455,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -463,13 +465,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
err = o.Update(in, src, options...)
|
err = o.Update(ctx, in, src, options...)
|
||||||
return o, err
|
return o, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getInfo reads the FileInfo for a path
|
// getInfo reads the FileInfo for a path
|
||||||
@ -547,7 +549,7 @@ func (f *Fs) mkParentDir(remote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(root)
|
||||||
@ -556,7 +558,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
|||||||
// Rmdir removes the directory (container, bucket) if empty
|
// Rmdir removes the directory (container, bucket) if empty
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist or isn't empty
|
// Return an error if it doesn't exist or isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
c, err := f.getFtpConnection()
|
c, err := f.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||||
@ -567,7 +569,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move renames a remote file object
|
// Move renames a remote file object
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -589,7 +591,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, errors.Wrap(err, "Move Rename failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||||
}
|
}
|
||||||
@ -604,7 +606,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -667,7 +669,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the hash of an object returning a lowercase hex string
|
// Hash returns the hash of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -677,12 +679,12 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.info.ModTime
|
return o.info.ModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the object
|
// SetModTime sets the modification time of the object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +745,7 @@ func (f *ftpReadCloser) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
@ -777,7 +779,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// remove the file if upload failed
|
// remove the file if upload failed
|
||||||
@ -787,7 +789,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||||
// able to think of a better method to find out if the server has finished - ncw
|
// able to think of a better method to find out if the server has finished - ncw
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
removeErr := o.Remove()
|
removeErr := o.Remove(ctx)
|
||||||
if removeErr != nil {
|
if removeErr != nil {
|
||||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||||
} else {
|
} else {
|
||||||
@ -813,7 +815,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
// defer fs.Trace(o, "")("err=%v", &err)
|
// defer fs.Trace(o, "")("err=%v", &err)
|
||||||
path := path.Join(o.fs.root, o.remote)
|
path := path.Join(o.fs.root, o.remote)
|
||||||
// Check if it's a directory or a file
|
// Check if it's a directory or a file
|
||||||
@ -822,7 +824,7 @@ func (o *Object) Remove() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.IsDir {
|
if info.IsDir {
|
||||||
err = o.fs.Rmdir(o.remote)
|
err = o.fs.Rmdir(ctx, o.remote)
|
||||||
} else {
|
} else {
|
||||||
c, err := o.fs.getFtpConnection()
|
c, err := o.fs.getFtpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -473,7 +473,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,7 +485,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
|||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) (err error) {
|
||||||
root := f.root
|
root := f.root
|
||||||
rootLength := len(root)
|
rootLength := len(root)
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
@ -574,9 +574,9 @@ func (f *Fs) markBucketOK() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// List the objects
|
// List the objects
|
||||||
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
err = f.list(ctx, dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -633,11 +633,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return f.listBuckets(dir)
|
return f.listBuckets(dir)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -656,12 +656,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
err = f.list(ctx, dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -681,22 +681,22 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.bucketOK {
|
if f.bucketOK {
|
||||||
@ -755,7 +755,7 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||||
// to delete was not empty.
|
// to delete was not empty.
|
||||||
func (f *Fs) Rmdir(dir string) (err error) {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
@ -785,8 +785,8 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -845,7 +845,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -919,7 +919,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@ -936,7 +936,7 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||||
// This only adds metadata so will perserve other metadata
|
// This only adds metadata so will perserve other metadata
|
||||||
object := storage.Object{
|
object := storage.Object{
|
||||||
Bucket: o.fs.bucket,
|
Bucket: o.fs.bucket,
|
||||||
@ -961,7 +961,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
req, err := http.NewRequest("GET", o.url, nil)
|
req, err := http.NewRequest("GET", o.url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -992,17 +992,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
err := o.fs.Mkdir("")
|
err := o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
object := storage.Object{
|
object := storage.Object{
|
||||||
Bucket: o.fs.bucket,
|
Bucket: o.fs.bucket,
|
||||||
Name: o.fs.root + o.remote,
|
Name: o.fs.root + o.remote,
|
||||||
ContentType: fs.MimeType(src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
Metadata: metadataFromModTime(modTime),
|
Metadata: metadataFromModTime(modTime),
|
||||||
}
|
}
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
@ -1023,7 +1023,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() (err error) {
|
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
@ -1032,7 +1032,7 @@ func (o *Object) Remove() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -207,7 +208,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote http file object
|
// NewObject creates a new remote http file object
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -359,7 +360,7 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
@ -399,12 +400,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return nil, errorReadOnly
|
return nil, errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -427,7 +428,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -437,7 +438,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote http file
|
// ModTime returns the modification time of the remote http file
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -480,7 +481,7 @@ func (o *Object) stat() error {
|
|||||||
// SetModTime sets the modification and access time to the specified time
|
// SetModTime sets the modification and access time to the specified time
|
||||||
//
|
//
|
||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,7 +491,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a remote http file object for reading. Seek is supported
|
// Open a remote http file object for reading. Seek is supported
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url := o.url()
|
url := o.url()
|
||||||
req, err := http.NewRequest("GET", url, nil)
|
req, err := http.NewRequest("GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -517,27 +518,27 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove a remote http file object
|
// Remove a remote http file object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object with the modTime given of the given size
|
// Update in to the object with the modTime given of the given size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
return errorReadOnly
|
return errorReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package http
|
package http
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -64,7 +65,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||||
entries, err := f.List("")
|
entries, err := f.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@ -120,7 +121,7 @@ func TestListSubDir(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
entries, err := f.List("three")
|
entries, err := f.List(context.Background(), "three")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
@ -138,7 +139,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||||
@ -148,7 +149,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
|
|
||||||
// Test the time is correct on the object
|
// Test the time is correct on the object
|
||||||
|
|
||||||
tObj := o.ModTime()
|
tObj := o.ModTime(context.Background())
|
||||||
|
|
||||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -158,7 +159,7 @@ func TestNewObject(t *testing.T) {
|
|||||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||||
|
|
||||||
// check object not found
|
// check object not found
|
||||||
o, err = f.NewObject("not found.txt")
|
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||||
assert.Nil(t, o)
|
assert.Nil(t, o)
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
}
|
}
|
||||||
@ -167,11 +168,11 @@ func TestOpen(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Test normal read
|
// Test normal read
|
||||||
fd, err := o.Open()
|
fd, err := o.Open(context.Background())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err := ioutil.ReadAll(fd)
|
data, err := ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -179,7 +180,7 @@ func TestOpen(t *testing.T) {
|
|||||||
assert.Equal(t, "beetroot\n", string(data))
|
assert.Equal(t, "beetroot\n", string(data))
|
||||||
|
|
||||||
// Test with range request
|
// Test with range request
|
||||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
data, err = ioutil.ReadAll(fd)
|
data, err = ioutil.ReadAll(fd)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -191,12 +192,12 @@ func TestMimeType(t *testing.T) {
|
|||||||
f, tidy := prepare(t)
|
f, tidy := prepare(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
o, err := f.NewObject("four/under four.txt")
|
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
do, ok := o.(fs.MimeTyper)
|
do, ok := o.(fs.MimeTyper)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileRoot(t *testing.T) {
|
func TestIsAFileRoot(t *testing.T) {
|
||||||
@ -216,7 +217,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
|||||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List("")
|
entries, err := f.List(context.Background(), "")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sort.Sort(entries)
|
sort.Sort(entries)
|
||||||
|
@ -2,6 +2,7 @@ package jottacloud
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -542,7 +543,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(context.TODO(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -580,7 +581,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.JottaFile) (fs.Object, e
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -617,7 +618,7 @@ func (f *Fs) CreateDir(path string) (jf *api.JottaFolder, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
//fmt.Printf("List: %s\n", f.filePath(dir))
|
//fmt.Printf("List: %s\n", f.filePath(dir))
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
@ -734,7 +735,7 @@ func (f *Fs) listFileDir(remoteStartPath string, startFolder *api.JottaFolder, f
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: f.filePath(dir),
|
Path: f.filePath(dir),
|
||||||
@ -787,17 +788,17 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
if f.opt.Device != "Jotta" {
|
if f.opt.Device != "Jotta" {
|
||||||
return nil, errors.New("upload not supported for devices other than Jotta")
|
return nil, errors.New("upload not supported for devices other than Jotta")
|
||||||
}
|
}
|
||||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkParentDir makes the parent of the native path dirPath if
|
// mkParentDir makes the parent of the native path dirPath if
|
||||||
// necessary and any directories above that
|
// necessary and any directories above that
|
||||||
func (f *Fs) mkParentDir(dirPath string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||||
// defer log.Trace(dirPath, "")("")
|
// defer log.Trace(dirPath, "")("")
|
||||||
// chop off trailing / if it exists
|
// chop off trailing / if it exists
|
||||||
if strings.HasSuffix(dirPath, "/") {
|
if strings.HasSuffix(dirPath, "/") {
|
||||||
@ -807,25 +808,25 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
|||||||
if parent == "." {
|
if parent == "." {
|
||||||
parent = ""
|
parent = ""
|
||||||
}
|
}
|
||||||
return f.Mkdir(parent)
|
return f.Mkdir(ctx, parent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
_, err := f.CreateDir(dir)
|
_, err := f.CreateDir(dir)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error) {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check that the directory exists
|
// check that the directory exists
|
||||||
entries, err := f.List(dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -865,8 +866,8 @@ func (f *Fs) purgeCheck(dir string, check bool) (err error) {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@ -879,8 +880,8 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOrMoves copies or moves directories or files depending on the method parameter
|
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||||
@ -913,14 +914,14 @@ func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err erro
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -943,14 +944,14 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
err := f.mkParentDir(remote)
|
err := f.mkParentDir(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -972,7 +973,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -989,7 +990,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
//fmt.Printf("Move src: %s (FullPath %s), dst: %s (FullPath: %s)\n", srcRemote, srcPath, dstRemote, dstPath)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
_, err = f.List(dstRemote)
|
_, err = f.List(ctx, dstRemote)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -1007,7 +1008,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: f.filePath(remote),
|
Path: f.filePath(remote),
|
||||||
@ -1053,7 +1054,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
info, err := getAccountInfo(f.srv, f.user)
|
info, err := getAccountInfo(f.srv, f.user)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1095,7 +1096,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the MD5 of an object returning a lowercase hex string
|
// Hash returns the MD5 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -1113,7 +1114,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1145,7 +1146,7 @@ func (o *Object) readMetaData(force bool) (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData(false)
|
err := o.readMetaData(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@ -1155,7 +1156,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1165,7 +1166,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -1249,9 +1250,9 @@ func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader,
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
md5String, err := src.Hash(hash.MD5)
|
md5String, err := src.Hash(ctx, hash.MD5)
|
||||||
if err != nil || md5String == "" {
|
if err != nil || md5String == "" {
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
// back on after the buffering
|
// back on after the buffering
|
||||||
@ -1274,7 +1275,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
Path: "allocate",
|
Path: "allocate",
|
||||||
ExtraHeaders: make(map[string]string),
|
ExtraHeaders: make(map[string]string),
|
||||||
}
|
}
|
||||||
fileDate := api.Time(src.ModTime()).APIString()
|
fileDate := api.Time(src.ModTime(ctx)).APIString()
|
||||||
|
|
||||||
// the allocate request
|
// the allocate request
|
||||||
var request = api.AllocateFileRequest{
|
var request = api.AllocateFileRequest{
|
||||||
@ -1338,7 +1339,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: o.filePath(),
|
Path: o.filePath(),
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package koofr
|
package koofr
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -105,7 +106,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the Object
|
// ModTime returns the modification time of the Object
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,7 +121,7 @@ func (o *Object) Fs() fs.Info {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns an MD5 hash of the Object
|
// Hash returns an MD5 hash of the Object
|
||||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
|
||||||
if typ == hash.MD5 {
|
if typ == hash.MD5 {
|
||||||
return o.info.Hash, nil
|
return o.info.Hash, nil
|
||||||
}
|
}
|
||||||
@ -138,12 +139,12 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime is not supported
|
// SetModTime is not supported
|
||||||
func (o *Object) SetModTime(mtime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the Object for reading
|
// Open opens the Object for reading
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
var sOff, eOff int64 = 0, -1
|
var sOff, eOff int64 = 0, -1
|
||||||
|
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@ -177,7 +178,7 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the Object contents
|
// Update updates the Object contents
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
putopts := &koofrclient.PutFilter{
|
putopts := &koofrclient.PutFilter{
|
||||||
ForceOverwrite: true,
|
ForceOverwrite: true,
|
||||||
NoRename: true,
|
NoRename: true,
|
||||||
@ -199,7 +200,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove deletes the remote Object
|
// Remove deletes the remote Object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +298,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List returns a list of items in a directory
|
// List returns a list of items in a directory
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, translateErrorsDir(err)
|
return nil, translateErrorsDir(err)
|
||||||
@ -318,7 +319,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote Object for a given remote path
|
// NewObject creates a new remote Object for a given remote path
|
||||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
|
||||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, translateErrorsObject(err)
|
return nil, translateErrorsObject(err)
|
||||||
@ -334,7 +335,7 @@ func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put updates a remote Object
|
// Put updates a remote Object
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||||
putopts := &koofrclient.PutFilter{
|
putopts := &koofrclient.PutFilter{
|
||||||
ForceOverwrite: true,
|
ForceOverwrite: true,
|
||||||
NoRename: true,
|
NoRename: true,
|
||||||
@ -359,8 +360,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutStream updates a remote Object with a stream of unknown size
|
// PutStream updates a remote Object with a stream of unknown size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// isBadRequest is a predicate which holds true iff the error returned was
|
// isBadRequest is a predicate which holds true iff the error returned was
|
||||||
@ -436,13 +437,13 @@ func (f *Fs) mkdir(fullPath string) error {
|
|||||||
|
|
||||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||||
// necessary
|
// necessary
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
fullPath := f.fullPath(dir)
|
fullPath := f.fullPath(dir)
|
||||||
return f.mkdir(fullPath)
|
return f.mkdir(fullPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes an (empty) directory at the given remote path
|
// Rmdir removes an (empty) directory at the given remote path
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return translateErrorsDir(err)
|
return translateErrorsDir(err)
|
||||||
@ -458,7 +459,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy copies a remote Object to the given path
|
// Copy copies a remote Object to the given path
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstFullPath := f.fullPath(remote)
|
dstFullPath := f.fullPath(remote)
|
||||||
dstDir := dir(dstFullPath)
|
dstDir := dir(dstFullPath)
|
||||||
err := f.mkdir(dstDir)
|
err := f.mkdir(dstDir)
|
||||||
@ -471,11 +472,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move moves a remote Object to the given path
|
// Move moves a remote Object to the given path
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj := src.(*Object)
|
srcObj := src.(*Object)
|
||||||
dstFullPath := f.fullPath(remote)
|
dstFullPath := f.fullPath(remote)
|
||||||
dstDir := dir(dstFullPath)
|
dstDir := dir(dstFullPath)
|
||||||
@ -488,11 +489,11 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves a remote directory to the given path
|
// DirMove moves a remote directory to the given path
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs := src.(*Fs)
|
srcFs := src.(*Fs)
|
||||||
srcFullPath := srcFs.fullPath(srcRemote)
|
srcFullPath := srcFs.fullPath(srcRemote)
|
||||||
dstFullPath := f.fullPath(dstRemote)
|
dstFullPath := f.fullPath(dstRemote)
|
||||||
@ -512,7 +513,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About reports space usage (with a MB precision)
|
// About reports space usage (with a MB precision)
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
mount, err := f.client.MountsDetails(f.mountID)
|
mount, err := f.client.MountsDetails(f.mountID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -528,7 +529,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Purge purges the complete Fs
|
// Purge purges the complete Fs
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -580,7 +581,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink creates a public link to the remote path
|
// PublicLink creates a public link to the remote path
|
||||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", translateErrorsDir(err)
|
return "", translateErrorsDir(err)
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
@ -10,7 +11,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var s syscall.Statfs_t
|
var s syscall.Statfs_t
|
||||||
err := syscall.Statfs(f.root, &s)
|
err := syscall.Statfs(f.root, &s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
@ -13,7 +14,7 @@ import (
|
|||||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var available, total, free int64
|
var available, total, free int64
|
||||||
_, _, e1 := getFreeDiskSpace.Call(
|
_, _, e1 := getFreeDiskSpace.Call(
|
||||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||||
|
@ -3,6 +3,7 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -329,7 +330,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error ErrorObjectNotFound.
|
// it returns the error ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, "", nil)
|
return f.newObjectWithInfo(remote, "", nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,7 +343,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
|
|
||||||
dir = f.dirNames.Load(dir)
|
dir = f.dirNames.Load(dir)
|
||||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||||
@ -507,11 +508,11 @@ func (m *mapper) Save(in, out string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put the Object to the local filesystem
|
// Put the Object to the local filesystem
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
// Temporary Object under construction - info filled in by Update()
|
// Temporary Object under construction - info filled in by Update()
|
||||||
o := f.newObject(remote, "")
|
o := f.newObject(remote, "")
|
||||||
err := o.Update(in, src, options...)
|
err := o.Update(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -519,12 +520,12 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||||
err := os.MkdirAll(root, 0777)
|
err := os.MkdirAll(root, 0777)
|
||||||
@ -544,7 +545,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir removes the directory
|
// Rmdir removes the directory
|
||||||
//
|
//
|
||||||
// If it isn't empty it will return an error
|
// If it isn't empty it will return an error
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||||
return os.Remove(root)
|
return os.Remove(root)
|
||||||
}
|
}
|
||||||
@ -600,7 +601,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If it matches - have found the precision
|
// If it matches - have found the precision
|
||||||
// fmt.Println("compare", fi.ModTime(), t)
|
// fmt.Println("compare", fi.ModTime(ctx), t)
|
||||||
if fi.ModTime().Equal(t) {
|
if fi.ModTime().Equal(t) {
|
||||||
// fmt.Println("Precision detected as", duration)
|
// fmt.Println("Precision detected as", duration)
|
||||||
return duration
|
return duration
|
||||||
@ -614,7 +615,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
fi, err := f.lstat(f.root)
|
fi, err := f.lstat(f.root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -634,7 +635,7 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -693,7 +694,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -758,7 +759,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the requested hash of a file as a lowercase hex string
|
// Hash returns the requested hash of a file as a lowercase hex string
|
||||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
// Check that the underlying file hasn't changed
|
// Check that the underlying file hasn't changed
|
||||||
oldtime := o.modTime
|
oldtime := o.modTime
|
||||||
oldsize := o.size
|
oldsize := o.size
|
||||||
@ -809,12 +810,12 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the object
|
// ModTime returns the modification time of the object
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
var err error
|
var err error
|
||||||
if o.translatedLink {
|
if o.translatedLink {
|
||||||
err = lChtimes(o.path, modTime, modTime)
|
err = lChtimes(o.path, modTime, modTime)
|
||||||
@ -910,7 +911,7 @@ func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
hashes := hash.Supported
|
hashes := hash.Supported
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
@ -974,7 +975,7 @@ func (nwc nopWriterCloser) Close() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the object from in with modTime and size
|
// Update the object from in with modTime and size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
var out io.WriteCloser
|
var out io.WriteCloser
|
||||||
|
|
||||||
hashes := hash.Supported
|
hashes := hash.Supported
|
||||||
@ -1055,7 +1056,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
o.fs.objectHashesMu.Unlock()
|
o.fs.objectHashesMu.Unlock()
|
||||||
|
|
||||||
// Set the mtime
|
// Set the mtime
|
||||||
err = o.SetModTime(src.ModTime())
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1069,7 +1070,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// Pass in the remote desired and the size if known.
|
// Pass in the remote desired and the size if known.
|
||||||
//
|
//
|
||||||
// It truncates any existing object
|
// It truncates any existing object
|
||||||
func (f *Fs) OpenWriterAt(remote string, size int64) (fs.WriterAtCloser, error) {
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := f.newObject(remote, "")
|
o := f.newObject(remote, "")
|
||||||
|
|
||||||
@ -1119,7 +1120,7 @@ func (o *Object) lstat() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return remove(o.path)
|
return remove(o.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package local
|
package local
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -83,6 +84,7 @@ func TestUpdatingCheck(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSymlink(t *testing.T) {
|
func TestSymlink(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
r := fstest.NewRun(t)
|
r := fstest.NewRun(t)
|
||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
f := r.Flocal.(*Fs)
|
f := r.Flocal.(*Fs)
|
||||||
@ -131,7 +133,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
|
|
||||||
// Create a symlink
|
// Create a symlink
|
||||||
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
modTime3 := fstest.Time("2002-03-03T04:05:10.123123123Z")
|
||||||
file3 := r.WriteObjectTo(r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
file3 := r.WriteObjectTo(ctx, r.Flocal, "symlink2.txt"+linkSuffix, "file.txt", modTime3, false)
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
file3.Size = 0 // symlinks are 0 length under Windows
|
file3.Size = 0 // symlinks are 0 length under Windows
|
||||||
}
|
}
|
||||||
@ -150,7 +152,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
assert.Equal(t, "file.txt", linkText)
|
assert.Equal(t, "file.txt", linkText)
|
||||||
|
|
||||||
// Check that NewObject gets the correct object
|
// Check that NewObject gets the correct object
|
||||||
o, err := r.Flocal.NewObject("symlink2.txt" + linkSuffix)
|
o, err := r.Flocal.NewObject(ctx, "symlink2.txt"+linkSuffix)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
assert.Equal(t, "symlink2.txt"+linkSuffix, o.Remote())
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
@ -158,11 +160,11 @@ func TestSymlink(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that NewObject doesn't see the non suffixed version
|
// Check that NewObject doesn't see the non suffixed version
|
||||||
_, err = r.Flocal.NewObject("symlink2.txt")
|
_, err = r.Flocal.NewObject(ctx, "symlink2.txt")
|
||||||
require.Equal(t, fs.ErrorObjectNotFound, err)
|
require.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
|
|
||||||
// Check reading the object
|
// Check reading the object
|
||||||
in, err := o.Open()
|
in, err := o.Open(ctx)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
contents, err := ioutil.ReadAll(in)
|
contents, err := ioutil.ReadAll(in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -170,7 +172,7 @@ func TestSymlink(t *testing.T) {
|
|||||||
require.NoError(t, in.Close())
|
require.NoError(t, in.Close())
|
||||||
|
|
||||||
// Check reading the object with range
|
// Check reading the object with range
|
||||||
in, err = o.Open(&fs.RangeOption{Start: 2, End: 5})
|
in, err = o.Open(ctx, &fs.RangeOption{Start: 2, End: 5})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
contents, err = ioutil.ReadAll(in)
|
contents, err = ioutil.ReadAll(in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -16,6 +16,7 @@ Improvements:
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@ -403,10 +404,10 @@ func (f *Fs) clearRoot() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp deletes all files currently in trash
|
// CleanUp deletes all files currently in trash
|
||||||
func (f *Fs) CleanUp() (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
trash := f.srv.FS.GetTrash()
|
trash := f.srv.FS.GetTrash()
|
||||||
items := []*mega.Node{}
|
items := []*mega.Node{}
|
||||||
_, err = f.list(trash, func(item *mega.Node) bool {
|
_, err = f.list(ctx, trash, func(item *mega.Node) bool {
|
||||||
items = append(items, item)
|
items = append(items, item)
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@ -454,7 +455,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *mega.Node) (fs.Object, error
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -469,7 +470,7 @@ type listFn func(*mega.Node) bool
|
|||||||
// Lists the directory required calling the user function on each item found
|
// Lists the directory required calling the user function on each item found
|
||||||
//
|
//
|
||||||
// If the user fn ever returns true then it early exits with found = true
|
// If the user fn ever returns true then it early exits with found = true
|
||||||
func (f *Fs) list(dir *mega.Node, fn listFn) (found bool, err error) {
|
func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, err error) {
|
||||||
nodes, err := f.srv.FS.GetChildren(dir)
|
nodes, err := f.srv.FS.GetChildren(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrapf(err, "list failed")
|
return false, errors.Wrapf(err, "list failed")
|
||||||
@ -492,13 +493,13 @@ func (f *Fs) list(dir *mega.Node, fn listFn) (found bool, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
dirNode, err := f.lookupDir(dir)
|
dirNode, err := f.lookupDir(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.list(dirNode, func(info *mega.Node) bool {
|
_, err = f.list(ctx, dirNode, func(info *mega.Node) bool {
|
||||||
remote := path.Join(dir, info.GetName())
|
remote := path.Join(dir, info.GetName())
|
||||||
switch info.GetType() {
|
switch info.GetType() {
|
||||||
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
case mega.FOLDER, mega.ROOT, mega.INBOX, mega.TRASH:
|
||||||
@ -551,14 +552,14 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return existingObj, existingObj.Update(in, src, options...)
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src)
|
return f.PutUnchecked(ctx, in, src)
|
||||||
default:
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -573,20 +574,20 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
//
|
//
|
||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
rootNode, err := f.findRoot(true)
|
rootNode, err := f.findRoot(true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -648,7 +649,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -662,7 +663,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +744,7 @@ func (f *Fs) move(dstRemote string, srcFs *Fs, srcRemote string, info *mega.Node
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
dstFs := f
|
dstFs := f
|
||||||
|
|
||||||
//log.Printf("Move %q -> %q", src.Remote(), remote)
|
//log.Printf("Move %q -> %q", src.Remote(), remote)
|
||||||
@ -776,7 +777,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
dstFs := f
|
dstFs := f
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -824,7 +825,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
root, err := f.findRoot(false)
|
root, err := f.findRoot(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
return "", errors.Wrap(err, "PublicLink failed to find root node")
|
||||||
@ -842,7 +843,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
|
|
||||||
// MergeDirs merges the contents of all the directories passed
|
// MergeDirs merges the contents of all the directories passed
|
||||||
// in into the first one and rmdirs the other directories.
|
// in into the first one and rmdirs the other directories.
|
||||||
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||||
if len(dirs) < 2 {
|
if len(dirs) < 2 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -861,7 +862,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
|
|
||||||
// list the the objects
|
// list the the objects
|
||||||
infos := []*mega.Node{}
|
infos := []*mega.Node{}
|
||||||
_, err := f.list(srcDirNode, func(info *mega.Node) bool {
|
_, err := f.list(ctx, srcDirNode, func(info *mega.Node) bool {
|
||||||
infos = append(infos, info)
|
infos = append(infos, info)
|
||||||
return false
|
return false
|
||||||
})
|
})
|
||||||
@ -890,7 +891,7 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var q mega.QuotaResp
|
var q mega.QuotaResp
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -929,7 +930,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the hashes of an object
|
// Hash returns the hashes of an object
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -969,12 +970,12 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.info.GetTimeStamp()
|
return o.info.GetTimeStamp()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1065,7 +1066,7 @@ func (oo *openObject) Close() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@ -1103,12 +1104,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
return errors.New("mega backend can't upload a file of unknown length")
|
return errors.New("mega backend can't upload a file of unknown length")
|
||||||
}
|
}
|
||||||
//modTime := src.ModTime()
|
//modTime := src.ModTime(ctx)
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the parent directory
|
// Create the parent directory
|
||||||
@ -1171,7 +1172,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
err := o.fs.deleteNode(o.info)
|
err := o.fs.deleteNode(o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove object failed")
|
return errors.Wrap(err, "Remove object failed")
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package onedrive
|
package onedrive
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@ -353,7 +354,7 @@ func (f *Fs) readMetaDataForPathRelativeToID(normalizedID string, relPath string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
// readMetaDataForPath reads the metadata from the path (relative to the absolute root)
|
||||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Response, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, resp *http.Response, err error) {
|
||||||
firstSlashIndex := strings.IndexRune(path, '/')
|
firstSlashIndex := strings.IndexRune(path, '/')
|
||||||
|
|
||||||
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
if f.driveType != driveTypePersonal || firstSlashIndex == -1 {
|
||||||
@ -406,7 +407,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
|||||||
if !insideRoot || !dirCacheFoundRoot {
|
if !insideRoot || !dirCacheFoundRoot {
|
||||||
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
// We do not have the normalized ID in dirCache for our query to base on. Query it manually.
|
||||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||||
info, resp, err := f.readMetaDataForPath(firstDir)
|
info, resp, err := f.readMetaDataForPath(ctx, firstDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return info, resp, err
|
return info, resp, err
|
||||||
}
|
}
|
||||||
@ -418,7 +419,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, resp *http.Respon
|
|||||||
} else {
|
} else {
|
||||||
// Read metadata based on firstDir
|
// Read metadata based on firstDir
|
||||||
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
firstDir, relPath = path[:firstSlashIndex], path[firstSlashIndex+1:]
|
||||||
baseNormalizedID, err = f.dirCache.FindDir(firstDir, false)
|
baseNormalizedID, err = f.dirCache.FindDir(ctx, firstDir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -463,6 +464,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -503,12 +505,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, _, err := f.readMetaDataForPath("")
|
_, _, err := f.readMetaDataForPath(ctx, "")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
// Get rootID
|
// Get rootID
|
||||||
rootInfo, _, err := f.readMetaDataForPath("")
|
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||||
if err != nil || rootInfo.GetID() == "" {
|
if err != nil || rootInfo.GetID() == "" {
|
||||||
return nil, errors.Wrap(err, "failed to get root")
|
return nil, errors.Wrap(err, "failed to get root")
|
||||||
}
|
}
|
||||||
@ -516,7 +518,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
f.dirCache = dircache.New(root, rootInfo.GetID(), f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -524,12 +526,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, rootInfo.ID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -559,7 +561,7 @@ func (f *Fs) rootSlash() string {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -569,7 +571,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData() // reads info and meta, returning an error
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -579,12 +581,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
// fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||||
_, ok := f.dirCache.GetInv(pathID)
|
_, ok := f.dirCache.GetInv(pathID)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -607,7 +609,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(dirID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
@ -697,12 +699,12 @@ OUTER:
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -723,7 +725,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
d.SetItems(folder.ChildCount)
|
d.SetItems(folder.ChildCount)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else {
|
} else {
|
||||||
o, err := f.newObjectWithInfo(remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@ -747,9 +749,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, leaf, directoryID, err
|
return nil, leaf, directoryID, err
|
||||||
}
|
}
|
||||||
@ -766,26 +768,26 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -803,17 +805,17 @@ func (f *Fs) deleteObject(id string) error {
|
|||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -840,8 +842,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@ -850,7 +852,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// waitForJob waits for the job with status in url to complete
|
// waitForJob waits for the job with status in url to complete
|
||||||
func (f *Fs) waitForJob(location string, o *Object) error {
|
func (f *Fs) waitForJob(ctx context.Context, location string, o *Object) error {
|
||||||
deadline := time.Now().Add(fs.Config.Timeout)
|
deadline := time.Now().Add(fs.Config.Timeout)
|
||||||
for time.Now().Before(deadline) {
|
for time.Now().Before(deadline) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
@ -881,7 +883,7 @@ func (f *Fs) waitForJob(location string, o *Object) error {
|
|||||||
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
return errors.Errorf("%s: async operation returned %q", o.remote, status.Status)
|
||||||
}
|
}
|
||||||
case "completed":
|
case "completed":
|
||||||
err = o.readMetaData()
|
err = o.readMetaData(ctx)
|
||||||
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
return errors.Wrapf(err, "async operation completed but readMetaData failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -899,13 +901,13 @@ func (f *Fs) waitForJob(location string, o *Object) error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -917,7 +919,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -953,7 +955,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for job to finish
|
// Wait for job to finish
|
||||||
err = f.waitForJob(location, dstObj)
|
err = f.waitForJob(ctx, location, dstObj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -961,7 +963,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Copy does NOT copy the modTime from the source and there seems to
|
// Copy does NOT copy the modTime from the source and there seems to
|
||||||
// be no way to set date before
|
// be no way to set date before
|
||||||
// This will create TWO versions on OneDrive
|
// This will create TWO versions on OneDrive
|
||||||
err = dstObj.SetModTime(srcObj.ModTime())
|
err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -974,8 +976,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
@ -987,7 +989,7 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -995,7 +997,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1049,7 +1051,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -1065,14 +1067,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(false)
|
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1088,14 +1090,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, dstDirectoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, dstDirectoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
parsedDstDirID, dstDriveID, _ := parseNormalizedID(dstDirectoryID)
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1109,7 +1111,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -1160,7 +1162,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
var drive api.Drive
|
var drive api.Drive
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
@ -1193,8 +1195,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink returns a link for downloading without accout.
|
// PublicLink returns a link for downloading without accout.
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
info, _, err := f.readMetaDataForPath(f.srvPath(remote))
|
info, _, err := f.readMetaDataForPath(ctx, f.srvPath(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1249,7 +1251,7 @@ func (o *Object) srvPath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if o.fs.driveType == driveTypePersonal {
|
if o.fs.driveType == driveTypePersonal {
|
||||||
if t == hash.SHA1 {
|
if t == hash.SHA1 {
|
||||||
return o.sha1, nil
|
return o.sha1, nil
|
||||||
@ -1264,7 +1266,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@ -1313,11 +1315,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, _, err := o.fs.readMetaDataForPath(o.srvPath())
|
info, _, err := o.fs.readMetaDataForPath(ctx, o.srvPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
if apiErr.ErrorInfo.Code == "itemNotFound" {
|
if apiErr.ErrorInfo.Code == "itemNotFound" {
|
||||||
@ -1334,8 +1336,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@ -1344,9 +1346,9 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// setModTime sets the modification time of the local fs object
|
// setModTime sets the modification time of the local fs object
|
||||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||||
if drive != "" {
|
if drive != "" {
|
||||||
opts = rest.Opts{
|
opts = rest.Opts{
|
||||||
@ -1375,8 +1377,8 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
info, err := o.setModTime(modTime)
|
info, err := o.setModTime(ctx, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1389,7 +1391,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
@ -1418,8 +1420,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createUploadSession creates an upload session for the object
|
// createUploadSession creates an upload session for the object
|
||||||
func (o *Object) createUploadSession(modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||||
id, drive, rootURL := parseNormalizedID(directoryID)
|
id, drive, rootURL := parseNormalizedID(directoryID)
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
if drive != "" {
|
if drive != "" {
|
||||||
@ -1498,7 +1500,7 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// uploadMultipart uploads a file using multipart upload
|
||||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||||
if size <= 0 {
|
if size <= 0 {
|
||||||
return nil, errors.New("unknown-sized upload not supported")
|
return nil, errors.New("unknown-sized upload not supported")
|
||||||
}
|
}
|
||||||
@ -1522,7 +1524,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
|||||||
|
|
||||||
// Create upload session
|
// Create upload session
|
||||||
fs.Debugf(o, "Starting multipart upload")
|
fs.Debugf(o, "Starting multipart upload")
|
||||||
session, err := o.createUploadSession(modTime)
|
session, err := o.createUploadSession(ctx, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
close(uploadURLChan)
|
close(uploadURLChan)
|
||||||
atexit.Unregister(cancelFuncHandle)
|
atexit.Unregister(cancelFuncHandle)
|
||||||
@ -1562,7 +1564,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
|||||||
|
|
||||||
// Update the content of a remote file within 4MB size in one single request
|
// Update the content of a remote file within 4MB size in one single request
|
||||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||||
func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4MiB")
|
||||||
}
|
}
|
||||||
@ -1570,7 +1572,7 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
|||||||
fs.Debugf(o, "Starting singlepart upload")
|
fs.Debugf(o, "Starting singlepart upload")
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var opts rest.Opts
|
var opts rest.Opts
|
||||||
leaf, directoryID, _ := o.fs.dirCache.FindPath(o.remote, false)
|
leaf, directoryID, _ := o.fs.dirCache.FindPath(ctx, o.remote, false)
|
||||||
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
trueDirID, drive, rootURL := parseNormalizedID(directoryID)
|
||||||
if drive != "" {
|
if drive != "" {
|
||||||
opts = rest.Opts{
|
opts = rest.Opts{
|
||||||
@ -1608,13 +1610,13 @@ func (o *Object) uploadSinglepart(in io.Reader, size int64, modTime time.Time) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Set the mod time now and read metadata
|
// Set the mod time now and read metadata
|
||||||
return o.setModTime(modTime)
|
return o.setModTime(ctx, modTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
if o.hasMetaData && o.isOneNoteFile {
|
if o.hasMetaData && o.isOneNoteFile {
|
||||||
return errors.New("can't upload content to a OneNote file")
|
return errors.New("can't upload content to a OneNote file")
|
||||||
}
|
}
|
||||||
@ -1623,13 +1625,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
var info *api.Item
|
var info *api.Item
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
info, err = o.uploadMultipart(in, size, modTime)
|
info, err = o.uploadMultipart(ctx, in, size, modTime)
|
||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
info, err = o.uploadSinglepart(in, size, modTime)
|
info, err = o.uploadSinglepart(ctx, in, size, modTime)
|
||||||
} else {
|
} else {
|
||||||
return errors.New("unknown-sized upload not supported")
|
return errors.New("unknown-sized upload not supported")
|
||||||
}
|
}
|
||||||
@ -1641,12 +1643,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.deleteObject(o.id)
|
return o.fs.deleteObject(o.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ package opendrive
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
@ -121,6 +122,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -175,7 +177,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -184,12 +186,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
|
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -233,14 +235,14 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the folder if it doesn't exist
|
// Mkdir creates the folder if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
// fs.Debugf(nil, "Mkdir(\"%s\")", dir)
|
// fs.Debugf(nil, "Mkdir(\"%s\")", dir)
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -261,17 +263,17 @@ func (f *Fs) deleteObject(id string) error {
|
|||||||
|
|
||||||
// purgeCheck remotes the root directory, if check is set then it
|
// purgeCheck remotes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -293,9 +295,9 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
// fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir))
|
// fs.Debugf(nil, "Rmdir(\"%s\")", path.Join(f.root, dir))
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision of the remote
|
// Precision of the remote
|
||||||
@ -312,14 +314,14 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// fs.Debugf(nil, "Copy(%v)", remote)
|
// fs.Debugf(nil, "Copy(%v)", remote)
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -331,7 +333,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -376,20 +378,20 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
// fs.Debugf(nil, "Move(%v)", remote)
|
// fs.Debugf(nil, "Move(%v)", remote)
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -432,7 +434,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -448,14 +450,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err = srcFs.dirCache.FindRoot(false)
|
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -471,14 +473,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -489,7 +491,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -526,14 +528,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, file *File) (fs.Object, error) {
|
||||||
// fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file)
|
// fs.Debugf(nil, "newObjectWithInfo(%s, %v)", remote, file)
|
||||||
|
|
||||||
var o *Object
|
var o *Object
|
||||||
@ -552,7 +554,7 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) {
|
|||||||
remote: remote,
|
remote: remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -562,9 +564,9 @@ func (f *Fs) newObjectWithInfo(remote string, file *File) (fs.Object, error) {
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
// fs.Debugf(nil, "NewObject(\"%s\")", remote)
|
// fs.Debugf(nil, "NewObject(\"%s\")", remote)
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
@ -573,9 +575,9 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, leaf, directoryID, err
|
return nil, leaf, directoryID, err
|
||||||
}
|
}
|
||||||
@ -613,14 +615,14 @@ func (f *Fs) readMetaDataForFolderID(id string) (info *FolderList, err error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
// fs.Debugf(nil, "Put(%s)", remote)
|
// fs.Debugf(nil, "Put(%s)", remote)
|
||||||
|
|
||||||
o, leaf, directoryID, err := f.createObject(remote, modTime, size)
|
o, leaf, directoryID, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -628,7 +630,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
if "" == o.id {
|
if "" == o.id {
|
||||||
// Attempt to read ID, ignore error
|
// Attempt to read ID, ignore error
|
||||||
// FIXME is this correct?
|
// FIXME is this correct?
|
||||||
_ = o.readMetaData()
|
_ = o.readMetaData(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
if "" == o.id {
|
if "" == o.id {
|
||||||
@ -651,7 +653,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
o.id = response.FileID
|
o.id = response.FileID
|
||||||
}
|
}
|
||||||
|
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// retryErrorCodes is a slice of error codes that we will retry
|
// retryErrorCodes is a slice of error codes that we will retry
|
||||||
@ -676,7 +678,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
|||||||
// DirCacher methods
|
// DirCacher methods
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf))
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, replaceReservedChars(leaf))
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
response := createFolderResponse{}
|
response := createFolderResponse{}
|
||||||
@ -705,7 +707,7 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf)
|
// fs.Debugf(nil, "FindLeaf(\"%s\", \"%s\")", pathID, leaf)
|
||||||
|
|
||||||
if pathID == "0" && leaf == "" {
|
if pathID == "0" && leaf == "" {
|
||||||
@ -751,13 +753,13 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// fs.Debugf(nil, "List(%v)", dir)
|
// fs.Debugf(nil, "List(%v)", dir)
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -791,7 +793,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
file.Name = restoreReservedChars(file.Name)
|
file.Name = restoreReservedChars(file.Name)
|
||||||
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
// fs.Debugf(nil, "File: %s (%s)", file.Name, file.FileID)
|
||||||
remote := path.Join(dir, file.Name)
|
remote := path.Join(dir, file.Name)
|
||||||
o, err := f.newObjectWithInfo(remote, &file)
|
o, err := f.newObjectWithInfo(ctx, remote, &file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -822,7 +824,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -839,12 +841,12 @@ func (o *Object) Size() int64 {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// fs.Debugf(nil, "SetModTime(%v)", modTime.String())
|
// fs.Debugf(nil, "SetModTime(%v)", modTime.String())
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
@ -863,7 +865,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
// fs.Debugf(nil, "Open(\"%v\")", o.remote)
|
// fs.Debugf(nil, "Open(\"%v\")", o.remote)
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -884,7 +886,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
// fs.Debugf(nil, "Remove(\"%s\")", o.id)
|
// fs.Debugf(nil, "Remove(\"%s\")", o.id)
|
||||||
return o.fs.pacer.Call(func() (bool, error) {
|
return o.fs.pacer.Call(func() (bool, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -905,9 +907,9 @@ func (o *Object) Storable() bool {
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
// fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote)
|
// fs.Debugf(nil, "Update(\"%s\", \"%s\")", o.id, o.remote)
|
||||||
|
|
||||||
// Open file for upload
|
// Open file for upload
|
||||||
@ -1050,7 +1052,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
o.size = closeResponse.Size
|
o.size = closeResponse.Size
|
||||||
|
|
||||||
// Set the mod time now
|
// Set the mod time now
|
||||||
err = o.SetModTime(modTime)
|
err = o.SetModTime(ctx, modTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1071,11 +1073,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.readMetaData()
|
return o.readMetaData(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
|
@ -10,6 +10,7 @@ package pcloud
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -192,9 +193,9 @@ func restoreReservedChars(x string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPath reads the metadata from the path
|
// readMetaDataForPath reads the metadata from the path
|
||||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
@ -237,6 +238,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -264,7 +266,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||||
_, err := f.readMetaDataForPath("")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -272,7 +274,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
f.dirCache = dircache.New(root, rootID, f)
|
f.dirCache = dircache.New(root, rootID, f)
|
||||||
|
|
||||||
// Find the current root
|
// Find the current root
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Assume it is a file
|
// Assume it is a file
|
||||||
newRoot, remote := dircache.SplitPath(root)
|
newRoot, remote := dircache.SplitPath(root)
|
||||||
@ -280,12 +282,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||||
tempF.root = newRoot
|
tempF.root = newRoot
|
||||||
// Make new Fs which is the parent
|
// Make new Fs which is the parent
|
||||||
err = tempF.dirCache.FindRoot(false)
|
err = tempF.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -307,7 +309,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -317,7 +319,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
// Set info
|
// Set info
|
||||||
err = o.setMetaData(info)
|
err = o.setMetaData(info)
|
||||||
} else {
|
} else {
|
||||||
err = o.readMetaData() // reads info and meta, returning an error
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -327,12 +329,12 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||||
// Find the leaf in pathID
|
// Find the leaf in pathID
|
||||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||||
if item.Name == leaf {
|
if item.Name == leaf {
|
||||||
@ -345,7 +347,7 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory with pathID as parent and name leaf
|
// CreateDir makes a directory with pathID as parent and name leaf
|
||||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var result api.ItemResult
|
var result api.ItemResult
|
||||||
@ -448,12 +450,12 @@ func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn list
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
err = f.dirCache.FindRoot(false)
|
err = f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -467,7 +469,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// FIXME more info from dir?
|
// FIXME more info from dir?
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
} else {
|
} else {
|
||||||
o, err := f.newObjectWithInfo(remote, info)
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
iErr = err
|
iErr = err
|
||||||
return true
|
return true
|
||||||
@ -491,9 +493,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Returns the object, leaf, directoryID and error
|
// Returns the object, leaf, directoryID and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -510,43 +512,43 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
remote := src.Remote()
|
remote := src.Remote()
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
o, _, _, err := f.createObject(remote, modTime, size)
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
err := f.dirCache.FindRoot(true)
|
err := f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
_, err = f.dirCache.FindDir(dir, true)
|
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// purgeCheck removes the root directory, if check is set then it
|
// purgeCheck removes the root directory, if check is set then it
|
||||||
// refuses to do so if it has anything in
|
// refuses to do so if it has anything in
|
||||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
if root == "" {
|
if root == "" {
|
||||||
return errors.New("can't purge root directory")
|
return errors.New("can't purge root directory")
|
||||||
}
|
}
|
||||||
dc := f.dirCache
|
dc := f.dirCache
|
||||||
err := dc.FindRoot(false)
|
err := dc.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rootID, err := dc.FindDir(dir, false)
|
rootID, err := dc.FindDir(ctx, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -580,8 +582,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(ctx, dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
@ -598,19 +600,19 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
err := srcObj.readMetaData()
|
err := srcObj.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -647,13 +649,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck(ctx, "", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp empties the trash
|
// CleanUp empties the trash
|
||||||
func (f *Fs) CleanUp() error {
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||||
err := f.dirCache.FindRoot(false)
|
err := f.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -681,7 +683,7 @@ func (f *Fs) CleanUp() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -689,7 +691,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -729,7 +731,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -745,14 +747,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the root src directory
|
// find the root src directory
|
||||||
err := srcFs.dirCache.FindRoot(false)
|
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the root dst directory
|
// find the root dst directory
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
err = f.dirCache.FindRoot(true)
|
err = f.dirCache.FindRoot(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -768,14 +770,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
findPath = f.root
|
findPath = f.root
|
||||||
}
|
}
|
||||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check destination does not exist
|
// Check destination does not exist
|
||||||
if dstRemote != "" {
|
if dstRemote != "" {
|
||||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
// OK
|
// OK
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
@ -786,7 +788,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find ID of src
|
// Find ID of src
|
||||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -822,7 +824,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/userinfo",
|
Path: "/userinfo",
|
||||||
@ -871,7 +873,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getHashes fetches the hashes into the object
|
// getHashes fetches the hashes into the object
|
||||||
func (o *Object) getHashes() (err error) {
|
func (o *Object) getHashes(ctx context.Context) (err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var result api.ChecksumFileResult
|
var result api.ChecksumFileResult
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -893,12 +895,12 @@ func (o *Object) getHashes() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 && t != hash.SHA1 {
|
if t != hash.MD5 && t != hash.SHA1 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
if o.md5 == "" && o.sha1 == "" {
|
if o.md5 == "" && o.sha1 == "" {
|
||||||
err := o.getHashes()
|
err := o.getHashes(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to get hash")
|
return "", errors.Wrap(err, "failed to get hash")
|
||||||
}
|
}
|
||||||
@ -911,7 +913,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return 0
|
return 0
|
||||||
@ -940,11 +942,11 @@ func (o *Object) setHashes(hashes *api.Hashes) {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.hasMetaData {
|
if o.hasMetaData {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//if apiErr, ok := err.(*api.Error); ok {
|
//if apiErr, ok := err.(*api.Error); ok {
|
||||||
// FIXME
|
// FIXME
|
||||||
@ -962,8 +964,8 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@ -972,7 +974,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Pcloud doesn't have a way of doing this so returning this
|
// Pcloud doesn't have a way of doing this so returning this
|
||||||
// error will cause the file to be re-uploaded to set the time.
|
// error will cause the file to be re-uploaded to set the time.
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
@ -1015,7 +1017,7 @@ func (o *Object) downloadURL() (URL string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
url, err := o.downloadURL()
|
url, err := o.downloadURL()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1041,16 +1043,16 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
o.fs.tokenRenewer.Start()
|
o.fs.tokenRenewer.Start()
|
||||||
defer o.fs.tokenRenewer.Stop()
|
defer o.fs.tokenRenewer.Stop()
|
||||||
|
|
||||||
size := src.Size() // NB can upload without size
|
size := src.Size() // NB can upload without size
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
remote := o.Remote()
|
remote := o.Remote()
|
||||||
|
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1073,7 +1075,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
Method: "PUT",
|
Method: "PUT",
|
||||||
Path: "/uploadfile",
|
Path: "/uploadfile",
|
||||||
Body: in,
|
Body: in,
|
||||||
ContentType: fs.MimeType(o),
|
ContentType: fs.MimeType(ctx, o),
|
||||||
ContentLength: &size,
|
ContentLength: &size,
|
||||||
Parameters: url.Values{},
|
Parameters: url.Values{},
|
||||||
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
@ -1114,9 +1116,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// sometimes pcloud leaves a half complete file on
|
// sometimes pcloud leaves a half complete file on
|
||||||
// error, so delete it if it exists
|
// error, so delete it if it exists
|
||||||
delObj, delErr := o.fs.NewObject(o.remote)
|
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||||
if delErr == nil && delObj != nil {
|
if delErr == nil && delObj != nil {
|
||||||
_ = delObj.Remove()
|
_ = delObj.Remove(ctx)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1128,7 +1130,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/deletefile",
|
Path: "/deletefile",
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package qingstor
|
package qingstor
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -407,12 +408,12 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put created a new object
|
// Put created a new object
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
fsObj := &Object{
|
fsObj := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return fsObj, fsObj.Update(in, src, options...)
|
return fsObj, fsObj.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@ -424,8 +425,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -452,12 +453,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -510,7 +511,7 @@ type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
|
|||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
|
||||||
prefix := f.root
|
prefix := f.root
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
prefix += dir + "/"
|
prefix += dir + "/"
|
||||||
@ -620,9 +621,9 @@ func (f *Fs) markBucketOK() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists files and directories to out
|
// listDir lists files and directories to out
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// List the objects and directories
|
// List the objects and directories
|
||||||
err = f.list(dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -670,11 +671,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return f.listBuckets(dir)
|
return f.listBuckets(dir)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -693,12 +694,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -734,7 +735,7 @@ func (f *Fs) dirExists() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.bucketOK {
|
if f.bucketOK {
|
||||||
@ -810,7 +811,7 @@ func (f *Fs) dirIsEmpty() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir delete a bucket
|
// Rmdir delete a bucket
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
@ -913,7 +914,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
|
|
||||||
// ModTime returns the modification date of the file
|
// ModTime returns the modification date of the file
|
||||||
// It should return a best guess if one isn't available
|
// It should return a best guess if one isn't available
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata, %v", err)
|
fs.Logf(o, "Failed to read metadata, %v", err)
|
||||||
@ -924,13 +925,13 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.lastModified = modTime
|
o.lastModified = modTime
|
||||||
mimeType := fs.MimeType(o)
|
mimeType := fs.MimeType(ctx, o)
|
||||||
|
|
||||||
if o.size >= maxSizeForCopy {
|
if o.size >= maxSizeForCopy {
|
||||||
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
|
||||||
@ -955,7 +956,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -982,16 +983,16 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update in to the object
|
// Update in to the object
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
|
// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
|
||||||
err := o.fs.Mkdir("")
|
err := o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
// Guess the content type
|
// Guess the content type
|
||||||
mimeType := fs.MimeType(src)
|
mimeType := fs.MimeType(ctx, src)
|
||||||
|
|
||||||
req := uploadInput{
|
req := uploadInput{
|
||||||
body: in,
|
body: in,
|
||||||
@ -1021,7 +1022,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove this object
|
// Remove this object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1041,7 +1042,7 @@ var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -1078,7 +1079,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
|
116
backend/s3/s3.go
116
backend/s3/s3.go
@ -14,6 +14,7 @@ What happens if you CTRL-C a multipart upload
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -1109,7 +1110,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
//If it can't be found it returns the error ErrorObjectNotFound.
|
//If it can't be found it returns the error ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error) {
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -1125,7 +1126,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error
|
|||||||
o.etag = aws.StringValue(info.ETag)
|
o.etag = aws.StringValue(info.ETag)
|
||||||
o.bytes = aws.Int64Value(info.Size)
|
o.bytes = aws.Int64Value(info.Size)
|
||||||
} else {
|
} else {
|
||||||
err := o.readMetaData() // reads info and meta, returning an error
|
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1135,8 +1136,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the bucket location
|
// Gets the bucket location
|
||||||
@ -1192,7 +1193,7 @@ type listFn func(remote string, object *s3.Object, isDirectory bool) error
|
|||||||
// dir is the starting directory, "" for root
|
// dir is the starting directory, "" for root
|
||||||
//
|
//
|
||||||
// Set recurse to read sub directories
|
// Set recurse to read sub directories
|
||||||
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
|
||||||
root := f.root
|
root := f.root
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
root += dir + "/"
|
root += dir + "/"
|
||||||
@ -1215,7 +1216,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
|||||||
var resp *s3.ListObjectsOutput
|
var resp *s3.ListObjectsOutput
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.ListObjects(&req)
|
resp, err = f.c.ListObjectsWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1289,7 +1290,7 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Convert a list item into a DirEntry
|
// Convert a list item into a DirEntry
|
||||||
func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
|
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
size := int64(0)
|
size := int64(0)
|
||||||
if object.Size != nil {
|
if object.Size != nil {
|
||||||
@ -1298,7 +1299,7 @@ func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool)
|
|||||||
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
d := fs.NewDir(remote, time.Time{}).SetSize(size)
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
o, err := f.newObjectWithInfo(remote, object)
|
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1316,10 +1317,10 @@ func (f *Fs) markBucketOK() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listDir lists files and directories to out
|
// listDir lists files and directories to out
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
// List the objects and directories
|
// List the objects and directories
|
||||||
err = f.list(dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
|
err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1337,14 +1338,14 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// listBuckets lists the buckets to out
|
// listBuckets lists the buckets to out
|
||||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if dir != "" {
|
if dir != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return nil, fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
req := s3.ListBucketsInput{}
|
req := s3.ListBucketsInput{}
|
||||||
var resp *s3.ListBucketsOutput
|
var resp *s3.ListBucketsOutput
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.c.ListBuckets(&req)
|
resp, err = f.c.ListBucketsWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1366,11 +1367,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return f.listBuckets(dir)
|
return f.listBuckets(ctx, dir)
|
||||||
}
|
}
|
||||||
return f.listDir(dir)
|
return f.listDir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListR lists the objects and directories of the Fs starting
|
// ListR lists the objects and directories of the Fs starting
|
||||||
@ -1389,13 +1390,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.bucket == "" {
|
if f.bucket == "" {
|
||||||
return fs.ErrorListBucketRequired
|
return fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
|
err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1410,29 +1411,29 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Put the Object into the bucket
|
// Put the Object into the bucket
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
fs := &Object{
|
fs := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
return fs, fs.Update(in, src, options...)
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the bucket exists
|
// Check if the bucket exists
|
||||||
//
|
//
|
||||||
// NB this can return incorrect results if called immediately after bucket deletion
|
// NB this can return incorrect results if called immediately after bucket deletion
|
||||||
func (f *Fs) dirExists() (bool, error) {
|
func (f *Fs) dirExists(ctx context.Context) (bool, error) {
|
||||||
req := s3.HeadBucketInput{
|
req := s3.HeadBucketInput{
|
||||||
Bucket: &f.bucket,
|
Bucket: &f.bucket,
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.HeadBucket(&req)
|
_, err := f.c.HeadBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1447,14 +1448,14 @@ func (f *Fs) dirExists() (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the bucket if it doesn't exist
|
// Mkdir creates the bucket if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.bucketOK {
|
if f.bucketOK {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !f.bucketDeleted {
|
if !f.bucketDeleted {
|
||||||
exists, err := f.dirExists()
|
exists, err := f.dirExists(ctx)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.bucketOK = exists
|
f.bucketOK = exists
|
||||||
}
|
}
|
||||||
@ -1472,7 +1473,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.CreateBucket(&req)
|
_, err := f.c.CreateBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err, ok := err.(awserr.Error); ok {
|
if err, ok := err.(awserr.Error); ok {
|
||||||
@ -1491,7 +1492,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir deletes the bucket if the fs is at the root
|
// Rmdir deletes the bucket if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
f.bucketOKMu.Lock()
|
f.bucketOKMu.Lock()
|
||||||
defer f.bucketOKMu.Unlock()
|
defer f.bucketOKMu.Unlock()
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
@ -1501,7 +1502,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
Bucket: &f.bucket,
|
Bucket: &f.bucket,
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
_, err := f.c.DeleteBucket(&req)
|
_, err := f.c.DeleteBucketWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1532,8 +1533,8 @@ func pathEscape(s string) string {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1562,13 +1563,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
req.StorageClass = &f.opt.StorageClass
|
req.StorageClass = &f.opt.StorageClass
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.c.CopyObject(&req)
|
_, err = f.c.CopyObjectWithContext(ctx, &req)
|
||||||
return f.shouldRetry(err)
|
return f.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@ -1599,14 +1600,14 @@ func (o *Object) Remote() string {
|
|||||||
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
hash := strings.Trim(strings.ToLower(o.etag), `"`)
|
hash := strings.Trim(strings.ToLower(o.etag), `"`)
|
||||||
// Check the etag is a valid md5sum
|
// Check the etag is a valid md5sum
|
||||||
if !matchMd5.MatchString(hash) {
|
if !matchMd5.MatchString(hash) {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1632,7 +1633,7 @@ func (o *Object) Size() int64 {
|
|||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// readMetaData gets the metadata if it hasn't already been fetched
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// it also sets the info
|
||||||
func (o *Object) readMetaData() (err error) {
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||||
if o.meta != nil {
|
if o.meta != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1644,7 +1645,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
var resp *s3.HeadObjectOutput
|
var resp *s3.HeadObjectOutput
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
resp, err = o.fs.c.HeadObject(&req)
|
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1678,11 +1679,11 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if fs.Config.UseServerModTime {
|
if fs.Config.UseServerModTime {
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
}
|
}
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return time.Now()
|
return time.Now()
|
||||||
@ -1702,8 +1703,8 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1715,7 +1716,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Guess the content type
|
// Guess the content type
|
||||||
mimeType := fs.MimeType(o)
|
mimeType := fs.MimeType(ctx, o)
|
||||||
|
|
||||||
// Copy the object to itself to update the metadata
|
// Copy the object to itself to update the metadata
|
||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
@ -1743,7 +1744,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
req.StorageClass = &o.fs.opt.StorageClass
|
req.StorageClass = &o.fs.opt.StorageClass
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.c.CopyObject(&req)
|
_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
@ -1755,7 +1756,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
req := s3.GetObjectInput{
|
req := s3.GetObjectInput{
|
||||||
Bucket: &o.fs.bucket,
|
Bucket: &o.fs.bucket,
|
||||||
@ -1775,7 +1776,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
var resp *s3.GetObjectOutput
|
var resp *s3.GetObjectOutput
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
var err error
|
var err error
|
||||||
resp, err = o.fs.c.GetObject(&req)
|
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err, ok := err.(awserr.RequestFailure); ok {
|
if err, ok := err.(awserr.RequestFailure); ok {
|
||||||
@ -1790,12 +1791,12 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update the Object from in with modTime and size
|
// Update the Object from in with modTime and size
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
err := o.fs.Mkdir("")
|
err := o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
|
|
||||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||||
@ -1830,7 +1831,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// disable checksum isn't present.
|
// disable checksum isn't present.
|
||||||
var md5sum string
|
var md5sum string
|
||||||
if !multipart || !o.fs.opt.DisableChecksum {
|
if !multipart || !o.fs.opt.DisableChecksum {
|
||||||
hash, err := src.Hash(hash.MD5)
|
hash, err := src.Hash(ctx, hash.MD5)
|
||||||
if err == nil && matchMd5.MatchString(hash) {
|
if err == nil && matchMd5.MatchString(hash) {
|
||||||
hashBytes, err := hex.DecodeString(hash)
|
hashBytes, err := hex.DecodeString(hash)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1843,7 +1844,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Guess the content type
|
// Guess the content type
|
||||||
mimeType := fs.MimeType(src)
|
mimeType := fs.MimeType(ctx, src)
|
||||||
|
|
||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
if multipart {
|
if multipart {
|
||||||
@ -1866,7 +1867,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
req.StorageClass = &o.fs.opt.StorageClass
|
req.StorageClass = &o.fs.opt.StorageClass
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
_, err = uploader.Upload(&req)
|
_, err = uploader.UploadWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1915,6 +1916,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "s3 upload: new request")
|
return errors.Wrap(err, "s3 upload: new request")
|
||||||
}
|
}
|
||||||
|
httpReq = httpReq.WithContext(ctx)
|
||||||
|
|
||||||
// set the headers we signed and the length
|
// set the headers we signed and the length
|
||||||
httpReq.Header = headers
|
httpReq.Header = headers
|
||||||
@ -1942,27 +1944,27 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
// Read the metadata from the newly created object
|
// Read the metadata from the newly created object
|
||||||
o.meta = nil // wipe old metadata
|
o.meta = nil // wipe old metadata
|
||||||
err = o.readMetaData()
|
err = o.readMetaData(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
req := s3.DeleteObjectInput{
|
req := s3.DeleteObjectInput{
|
||||||
Bucket: &o.fs.bucket,
|
Bucket: &o.fs.bucket,
|
||||||
Key: &key,
|
Key: &key,
|
||||||
}
|
}
|
||||||
err := o.fs.pacer.Call(func() (bool, error) {
|
err := o.fs.pacer.Call(func() (bool, error) {
|
||||||
_, err := o.fs.c.DeleteObject(&req)
|
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
|
||||||
return o.fs.shouldRetry(err)
|
return o.fs.shouldRetry(err)
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
return ""
|
return ""
|
||||||
|
@ -321,6 +321,7 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -419,12 +420,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewFsWithConnection(name, root, opt, sshConfig)
|
return NewFsWithConnection(ctx, name, root, opt, sshConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
// NewFsWithConnection creates a new Fs object from the name and root and a ssh.ClientConfig. It connects to
|
||||||
// the host specified in the ssh.ClientConfig
|
// the host specified in the ssh.ClientConfig
|
||||||
func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
func NewFsWithConnection(ctx context.Context, name string, root string, opt *Options, sshConfig *ssh.ClientConfig) (fs.Fs, error) {
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
@ -450,7 +451,7 @@ func NewFsWithConnection(name string, root string, opt *Options, sshConfig *ssh.
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -491,7 +492,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote sftp file object
|
// NewObject creates a new remote sftp file object
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -536,7 +537,7 @@ func (f *Fs) dirExists(dir string) (bool, error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
ok, err := f.dirExists(root)
|
ok, err := f.dirExists(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -587,8 +588,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime()>
|
// Put data from <in> into a new remote sftp file object described by <src.Remote()> and <src.ModTime(ctx)>
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
err := f.mkParentDir(src.Remote())
|
err := f.mkParentDir(src.Remote())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||||
@ -598,7 +599,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
}
|
}
|
||||||
err = o.Update(in, src, options...)
|
err = o.Update(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -606,8 +607,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkParentDir makes the parent of remote if necessary and any
|
// mkParentDir makes the parent of remote if necessary and any
|
||||||
@ -649,16 +650,16 @@ func (f *Fs) mkdir(dirPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
root := path.Join(f.root, dir)
|
root := path.Join(f.root, dir)
|
||||||
return f.mkdir(root)
|
return f.mkdir(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
// Check to see if directory is empty as some servers will
|
// Check to see if directory is empty as some servers will
|
||||||
// delete recursively with RemoveDirectory
|
// delete recursively with RemoveDirectory
|
||||||
entries, err := f.List(dir)
|
entries, err := f.List(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Rmdir")
|
return errors.Wrap(err, "Rmdir")
|
||||||
}
|
}
|
||||||
@ -677,7 +678,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Move renames a remote sftp file object
|
// Move renames a remote sftp file object
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -699,7 +700,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move Rename failed")
|
return nil, errors.Wrap(err, "Move Rename failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||||
}
|
}
|
||||||
@ -714,7 +715,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -868,7 +869,7 @@ func (o *Object) Remote() string {
|
|||||||
|
|
||||||
// Hash returns the selected checksum of the file
|
// Hash returns the selected checksum of the file
|
||||||
// If no checksum is available it returns ""
|
// If no checksum is available it returns ""
|
||||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||||
var hashCmd string
|
var hashCmd string
|
||||||
if r == hash.MD5 {
|
if r == hash.MD5 {
|
||||||
if o.md5sum != nil {
|
if o.md5sum != nil {
|
||||||
@ -973,7 +974,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ModTime returns the modification time of the remote sftp file
|
// ModTime returns the modification time of the remote sftp file
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
return o.modTime
|
return o.modTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1020,7 +1021,7 @@ func (o *Object) stat() error {
|
|||||||
// SetModTime sets the modification and access time to the specified time
|
// SetModTime sets the modification and access time to the specified time
|
||||||
//
|
//
|
||||||
// it also updates the info field
|
// it also updates the info field
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return errors.Wrap(err, "SetModTime")
|
||||||
@ -1091,7 +1092,7 @@ func (file *objectReader) Close() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open a remote sftp file object for reading. Seek is supported
|
// Open a remote sftp file object for reading. Seek is supported
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var offset, limit int64 = 0, -1
|
var offset, limit int64 = 0, -1
|
||||||
for _, option := range options {
|
for _, option := range options {
|
||||||
switch x := option.(type) {
|
switch x := option.(type) {
|
||||||
@ -1125,7 +1126,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Update a remote sftp file using the data <in> and ModTime from <src>
|
// Update a remote sftp file using the data <in> and ModTime from <src>
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
// Clear the hash cache since we are about to update the object
|
// Clear the hash cache since we are about to update the object
|
||||||
o.md5sum = nil
|
o.md5sum = nil
|
||||||
o.sha1sum = nil
|
o.sha1sum = nil
|
||||||
@ -1163,7 +1164,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
remove()
|
remove()
|
||||||
return errors.Wrap(err, "Update Close failed")
|
return errors.Wrap(err, "Update Close failed")
|
||||||
}
|
}
|
||||||
err = o.SetModTime(src.ModTime())
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update SetModTime failed")
|
return errors.Wrap(err, "Update SetModTime failed")
|
||||||
}
|
}
|
||||||
@ -1171,7 +1172,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove a remote sftp file object
|
// Remove a remote sftp file object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
c, err := o.fs.getSftpConnection()
|
c, err := o.fs.getSftpConnection()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Remove")
|
return errors.Wrap(err, "Remove")
|
||||||
|
@ -4,6 +4,7 @@ package swift
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@ -508,7 +509,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, er
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it
|
// NewObject finds the Object at remote. If it can't be found it
|
||||||
// returns the error fs.ErrorObjectNotFound.
|
// returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -652,7 +653,7 @@ func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.container == "" {
|
if f.container == "" {
|
||||||
return f.listContainers(dir)
|
return f.listContainers(dir)
|
||||||
}
|
}
|
||||||
@ -675,7 +676,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
//
|
//
|
||||||
// Don't implement this unless you have a more efficient way
|
// Don't implement this unless you have a more efficient way
|
||||||
// of listing recursively that doing a directory traversal.
|
// of listing recursively that doing a directory traversal.
|
||||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.container == "" {
|
if f.container == "" {
|
||||||
return errors.New("container needed for recursive list")
|
return errors.New("container needed for recursive list")
|
||||||
}
|
}
|
||||||
@ -692,7 +693,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -719,23 +720,23 @@ func (f *Fs) About() (*fs.Usage, error) {
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
fs := &Object{
|
fs := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
remote: src.Remote(),
|
remote: src.Remote(),
|
||||||
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
|
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
|
||||||
}
|
}
|
||||||
return fs, fs.Update(in, src, options...)
|
return fs, fs.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
f.containerOKMu.Lock()
|
f.containerOKMu.Lock()
|
||||||
defer f.containerOKMu.Unlock()
|
defer f.containerOKMu.Unlock()
|
||||||
if f.containerOK {
|
if f.containerOK {
|
||||||
@ -773,7 +774,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// Rmdir deletes the container if the fs is at the root
|
// Rmdir deletes the container if the fs is at the root
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
f.containerOKMu.Lock()
|
f.containerOKMu.Lock()
|
||||||
defer f.containerOKMu.Unlock()
|
defer f.containerOKMu.Unlock()
|
||||||
if f.root != "" || dir != "" {
|
if f.root != "" || dir != "" {
|
||||||
@ -798,12 +799,12 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Purge deletes all the files and directories
|
// Purge deletes all the files and directories
|
||||||
//
|
//
|
||||||
// Implemented here so we can make sure we delete directory markers
|
// Implemented here so we can make sure we delete directory markers
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
// Delete all the files including the directory markers
|
// Delete all the files including the directory markers
|
||||||
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
|
||||||
delErr := make(chan error, 1)
|
delErr := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
delErr <- operations.DeleteFiles(toBeDeleted)
|
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
|
||||||
}()
|
}()
|
||||||
err := f.list("", true, func(entry fs.DirEntry) error {
|
err := f.list("", true, func(entry fs.DirEntry) error {
|
||||||
if o, ok := entry.(*Object); ok {
|
if o, ok := entry.(*Object); ok {
|
||||||
@ -819,7 +820,7 @@ func (f *Fs) Purge() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return f.Rmdir("")
|
return f.Rmdir(ctx, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@ -831,8 +832,8 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -850,7 +851,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
@ -879,7 +880,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -976,7 +977,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
if fs.Config.UseServerModTime {
|
if fs.Config.UseServerModTime {
|
||||||
return o.lastModified
|
return o.lastModified
|
||||||
}
|
}
|
||||||
@ -994,7 +995,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1026,7 +1027,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
headers := fs.OpenOptionHeaders(options)
|
headers := fs.OpenOptionHeaders(options)
|
||||||
_, isRanging := headers["Range"]
|
_, isRanging := headers["Range"]
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@ -1170,16 +1171,16 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
// Update the object with the contents of the io.Reader, modTime and size
|
// Update the object with the contents of the io.Reader, modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
if o.fs.container == "" {
|
if o.fs.container == "" {
|
||||||
return fserrors.FatalError(errors.New("container name needed in remote"))
|
return fserrors.FatalError(errors.New("container name needed in remote"))
|
||||||
}
|
}
|
||||||
err := o.fs.Mkdir("")
|
err := o.fs.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
size := src.Size()
|
size := src.Size()
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
|
|
||||||
// Note whether this is a dynamic large object before starting
|
// Note whether this is a dynamic large object before starting
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||||
@ -1190,7 +1191,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// Set the mtime
|
// Set the mtime
|
||||||
m := swift.Metadata{}
|
m := swift.Metadata{}
|
||||||
m.SetModTime(modTime)
|
m.SetModTime(modTime)
|
||||||
contentType := fs.MimeType(src)
|
contentType := fs.MimeType(ctx, src)
|
||||||
headers := m.ObjectHeaders()
|
headers := m.ObjectHeaders()
|
||||||
uniquePrefix := ""
|
uniquePrefix := ""
|
||||||
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
|
||||||
@ -1233,7 +1234,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
isDynamicLargeObject, err := o.isDynamicLargeObject()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1257,7 +1258,7 @@ func (o *Object) Remove() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.contentType
|
return o.contentType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package union
|
package union
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
@ -89,8 +90,8 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Rmdir removes the root directory of the Fs object
|
// Rmdir removes the root directory of the Fs object
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.wr.Rmdir(dir)
|
return f.wr.Rmdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||||
@ -99,8 +100,8 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir makes the root directory of the Fs object
|
// Mkdir makes the root directory of the Fs object
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
return f.wr.Mkdir(dir)
|
return f.wr.Mkdir(ctx, dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge all files in the root and the root directory
|
// Purge all files in the root and the root directory
|
||||||
@ -109,8 +110,8 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
// quicker than just running Remove() on the result of List()
|
// quicker than just running Remove() on the result of List()
|
||||||
//
|
//
|
||||||
// Return an error if it doesn't exist
|
// Return an error if it doesn't exist
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.wr.Features().Purge()
|
return f.wr.Features().Purge(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy src to this remote using server side copy operations.
|
// Copy src to this remote using server side copy operations.
|
||||||
@ -122,12 +123,12 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
if src.Fs() != f.wr {
|
if src.Fs() != f.wr {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
o, err := f.wr.Features().Copy(src, remote)
|
o, err := f.wr.Features().Copy(ctx, src, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -143,12 +144,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
if src.Fs() != f.wr {
|
if src.Fs() != f.wr {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
o, err := f.wr.Features().Move(src, remote)
|
o, err := f.wr.Features().Move(ctx, src, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -163,13 +164,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
return f.wr.Features().DirMove(srcFs.wr, srcRemote, dstRemote)
|
return f.wr.Features().DirMove(ctx, srcFs.wr, srcRemote, dstRemote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify calls the passed function with a path
|
// ChangeNotify calls the passed function with a path
|
||||||
@ -181,14 +182,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// The ChangeNotify implementation must empty the channel
|
// The ChangeNotify implementation must empty the channel
|
||||||
// regularly. When the channel gets closed, the implementation
|
// regularly. When the channel gets closed, the implementation
|
||||||
// should stop polling and release resources.
|
// should stop polling and release resources.
|
||||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
var remoteChans []chan time.Duration
|
var remoteChans []chan time.Duration
|
||||||
|
|
||||||
for _, remote := range f.remotes {
|
for _, remote := range f.remotes {
|
||||||
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
if ChangeNotify := remote.Features().ChangeNotify; ChangeNotify != nil {
|
||||||
ch := make(chan time.Duration)
|
ch := make(chan time.Duration)
|
||||||
remoteChans = append(remoteChans, ch)
|
remoteChans = append(remoteChans, ch)
|
||||||
ChangeNotify(fn, ch)
|
ChangeNotify(ctx, fn, ch)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,8 +220,8 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o, err := f.wr.Features().PutStream(in, src, options...)
|
o, err := f.wr.Features().PutStream(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -228,8 +229,8 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information from the Fs
|
// About gets quota information from the Fs
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
return f.wr.Features().About()
|
return f.wr.Features().About(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put in to the remote path with the modTime given of the given size
|
// Put in to the remote path with the modTime given of the given size
|
||||||
@ -237,8 +238,8 @@ func (f *Fs) About() (*fs.Usage, error) {
|
|||||||
// May create the object even if it returns an error - if so
|
// May create the object even if it returns an error - if so
|
||||||
// will return the object and the error, otherwise will return
|
// will return the object and the error, otherwise will return
|
||||||
// nil and the error
|
// nil and the error
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o, err := f.wr.Put(in, src, options...)
|
o, err := f.wr.Put(ctx, in, src, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -254,11 +255,11 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
set := make(map[string]fs.DirEntry)
|
set := make(map[string]fs.DirEntry)
|
||||||
found := false
|
found := false
|
||||||
for _, remote := range f.remotes {
|
for _, remote := range f.remotes {
|
||||||
var remoteEntries, err = remote.List(dir)
|
var remoteEntries, err = remote.List(ctx, dir)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -283,10 +284,10 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
// NewObject creates a new remote union file object based on the first Object it finds (reverse remote order)
|
||||||
func (f *Fs) NewObject(path string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, path string) (fs.Object, error) {
|
||||||
for i := range f.remotes {
|
for i := range f.remotes {
|
||||||
var remote = f.remotes[len(f.remotes)-i-1]
|
var remote = f.remotes[len(f.remotes)-i-1]
|
||||||
var obj, err = remote.NewObject(path)
|
var obj, err = remote.NewObject(ctx, path)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ package webdav
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -282,6 +283,7 @@ func (o *Object) filePath() string {
|
|||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
ctx := context.Background()
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
err := configstruct.Set(m, opt)
|
err := configstruct.Set(m, opt)
|
||||||
@ -343,7 +345,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
if f.root == "." {
|
if f.root == "." {
|
||||||
f.root = ""
|
f.root = ""
|
||||||
}
|
}
|
||||||
_, err := f.NewObject(remote)
|
_, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
if errors.Cause(err) == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -432,7 +434,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Prop) (fs.Object, error)
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,7 +560,7 @@ func (f *Fs) listAll(dir string, directoriesOnly bool, filesOnly bool, depth str
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
var iErr error
|
var iErr error
|
||||||
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
_, err = f.listAll(dir, false, false, defaultDepth, func(remote string, isDir bool, info *api.Prop) bool {
|
||||||
if isDir {
|
if isDir {
|
||||||
@ -605,19 +607,19 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkParentDir makes the parent of the native path dirPath if
|
// mkParentDir makes the parent of the native path dirPath if
|
||||||
// necessary and any directories above that
|
// necessary and any directories above that
|
||||||
func (f *Fs) mkParentDir(dirPath string) error {
|
func (f *Fs) mkParentDir(ctx context.Context, dirPath string) error {
|
||||||
// defer log.Trace(dirPath, "")("")
|
// defer log.Trace(dirPath, "")("")
|
||||||
// chop off trailing / if it exists
|
// chop off trailing / if it exists
|
||||||
if strings.HasSuffix(dirPath, "/") {
|
if strings.HasSuffix(dirPath, "/") {
|
||||||
@ -627,7 +629,7 @@ func (f *Fs) mkParentDir(dirPath string) error {
|
|||||||
if parent == "." {
|
if parent == "." {
|
||||||
parent = ""
|
parent = ""
|
||||||
}
|
}
|
||||||
return f.mkdir(parent)
|
return f.mkdir(ctx, parent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
// low level mkdir, only makes the directory, doesn't attempt to create parents
|
||||||
@ -660,13 +662,13 @@ func (f *Fs) _mkdir(dirPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// mkdir makes the directory and parents using native paths
|
// mkdir makes the directory and parents using native paths
|
||||||
func (f *Fs) mkdir(dirPath string) error {
|
func (f *Fs) mkdir(ctx context.Context, dirPath string) error {
|
||||||
// defer log.Trace(dirPath, "")("")
|
// defer log.Trace(dirPath, "")("")
|
||||||
err := f._mkdir(dirPath)
|
err := f._mkdir(dirPath)
|
||||||
if apiErr, ok := err.(*api.Error); ok {
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
// parent does not exist so create it first then try again
|
// parent does not exist so create it first then try again
|
||||||
if apiErr.StatusCode == http.StatusConflict {
|
if apiErr.StatusCode == http.StatusConflict {
|
||||||
err = f.mkParentDir(dirPath)
|
err = f.mkParentDir(ctx, dirPath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = f._mkdir(dirPath)
|
err = f._mkdir(dirPath)
|
||||||
}
|
}
|
||||||
@ -676,9 +678,9 @@ func (f *Fs) mkdir(dirPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the directory if it doesn't exist
|
// Mkdir creates the directory if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
dirPath := f.dirPath(dir)
|
dirPath := f.dirPath(dir)
|
||||||
return f.mkdir(dirPath)
|
return f.mkdir(ctx, dirPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// dirNotEmpty returns true if the directory exists and is not Empty
|
// dirNotEmpty returns true if the directory exists and is not Empty
|
||||||
@ -723,7 +725,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the root folder
|
// Rmdir deletes the root folder
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -741,7 +743,7 @@ func (f *Fs) Precision() time.Duration {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy/fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantCopy/fs.ErrorCantMove
|
||||||
func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object, error) {
|
func (f *Fs) copyOrMove(ctx context.Context, src fs.Object, remote string, method string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@ -751,7 +753,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object,
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
dstPath := f.filePath(remote)
|
dstPath := f.filePath(remote)
|
||||||
err := f.mkParentDir(dstPath)
|
err := f.mkParentDir(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
return nil, errors.Wrap(err, "Copy mkParentDir failed")
|
||||||
}
|
}
|
||||||
@ -770,7 +772,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object,
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
if f.useOCMtime {
|
if f.useOCMtime {
|
||||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
|
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err = f.srv.Call(&opts)
|
resp, err = f.srv.Call(&opts)
|
||||||
@ -779,7 +781,7 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy call failed")
|
return nil, errors.Wrap(err, "Copy call failed")
|
||||||
}
|
}
|
||||||
dstObj, err := f.NewObject(remote)
|
dstObj, err := f.NewObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Copy NewObject failed")
|
return nil, errors.Wrap(err, "Copy NewObject failed")
|
||||||
}
|
}
|
||||||
@ -795,8 +797,8 @@ func (f *Fs) copyOrMove(src fs.Object, remote string, method string) (fs.Object,
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
return f.copyOrMove(src, remote, "COPY")
|
return f.copyOrMove(ctx, src, remote, "COPY")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Purge deletes all the files and the container
|
// Purge deletes all the files and the container
|
||||||
@ -804,7 +806,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -817,8 +819,8 @@ func (f *Fs) Purge() error {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
return f.copyOrMove(src, remote, "MOVE")
|
return f.copyOrMove(ctx, src, remote, "MOVE")
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
@ -829,7 +831,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -848,7 +850,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make sure the parent directory exists
|
// Make sure the parent directory exists
|
||||||
err = f.mkParentDir(dstPath)
|
err = f.mkParentDir(ctx, dstPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||||
}
|
}
|
||||||
@ -887,7 +889,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "PROPFIND",
|
Method: "PROPFIND",
|
||||||
Path: "",
|
Path: "",
|
||||||
@ -949,7 +951,7 @@ func (o *Object) Remote() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if o.fs.hasChecksums {
|
if o.fs.hasChecksums {
|
||||||
switch t {
|
switch t {
|
||||||
case hash.SHA1:
|
case hash.SHA1:
|
||||||
@ -1002,7 +1004,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@ -1012,7 +1014,7 @@ func (o *Object) ModTime() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
return fs.ErrorCantSetModTime
|
return fs.ErrorCantSetModTime
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1022,7 +1024,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
@ -1044,8 +1046,8 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// If existing is set then it updates the object rather than creating a new one
|
// If existing is set then it updates the object rather than creating a new one
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
err = o.fs.mkParentDir(o.filePath())
|
err = o.fs.mkParentDir(ctx, o.filePath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Update mkParentDir failed")
|
return errors.Wrap(err, "Update mkParentDir failed")
|
||||||
}
|
}
|
||||||
@ -1058,21 +1060,21 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
Body: in,
|
Body: in,
|
||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
|
||||||
ContentType: fs.MimeType(src),
|
ContentType: fs.MimeType(ctx, src),
|
||||||
}
|
}
|
||||||
if o.fs.useOCMtime || o.fs.hasChecksums {
|
if o.fs.useOCMtime || o.fs.hasChecksums {
|
||||||
opts.ExtraHeaders = map[string]string{}
|
opts.ExtraHeaders = map[string]string{}
|
||||||
if o.fs.useOCMtime {
|
if o.fs.useOCMtime {
|
||||||
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime().UnixNano())/1E9)
|
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1E9)
|
||||||
}
|
}
|
||||||
if o.fs.hasChecksums {
|
if o.fs.hasChecksums {
|
||||||
// Set an upload checksum - prefer SHA1
|
// Set an upload checksum - prefer SHA1
|
||||||
//
|
//
|
||||||
// This is used as an upload integrity test. If we set
|
// This is used as an upload integrity test. If we set
|
||||||
// only SHA1 here, owncloud will calculate the MD5 too.
|
// only SHA1 here, owncloud will calculate the MD5 too.
|
||||||
if sha1, _ := src.Hash(hash.SHA1); sha1 != "" {
|
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
|
||||||
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
|
||||||
} else if md5, _ := src.Hash(hash.MD5); md5 != "" {
|
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
|
||||||
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1089,7 +1091,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// finished - ncw
|
// finished - ncw
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
// Remove failed upload
|
// Remove failed upload
|
||||||
_ = o.Remove()
|
_ = o.Remove(ctx)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// read metadata from remote
|
// read metadata from remote
|
||||||
@ -1098,7 +1100,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
Path: o.filePath(),
|
Path: o.filePath(),
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package yandex
|
package yandex
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -330,7 +331,7 @@ func (f *Fs) itemToDirEntry(remote string, object *api.ResourceInfoResponse) (fs
|
|||||||
//
|
//
|
||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
root := f.dirPath(dir)
|
root := f.dirPath(dir)
|
||||||
|
|
||||||
var limit uint64 = 1000 // max number of objects per request
|
var limit uint64 = 1000 // max number of objects per request
|
||||||
@ -410,7 +411,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.ResourceInfoResponse) (f
|
|||||||
|
|
||||||
// NewObject finds the Object at remote. If it can't be found it
|
// NewObject finds the Object at remote. If it can't be found it
|
||||||
// returns the error fs.ErrorObjectNotFound.
|
// returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithInfo(remote, nil)
|
return f.newObjectWithInfo(remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -434,14 +435,14 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// Copy the reader in to the new object which is returned
|
// Copy the reader in to the new object which is returned
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
o := f.createObject(src.Remote(), src.ModTime(), src.Size())
|
o := f.createObject(src.Remote(), src.ModTime(ctx), src.Size())
|
||||||
return o, o.Update(in, src, options...)
|
return o, o.Update(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
return f.Put(in, src, options...)
|
return f.Put(ctx, in, src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDir makes a directory
|
// CreateDir makes a directory
|
||||||
@ -518,7 +519,7 @@ func (f *Fs) mkParentDirs(resPath string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
func (f *Fs) Mkdir(dir string) error {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||||
path := f.filePath(dir)
|
path := f.filePath(dir)
|
||||||
return f.mkDirs(path)
|
return f.mkDirs(path)
|
||||||
}
|
}
|
||||||
@ -621,7 +622,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
|||||||
// Rmdir deletes the container
|
// Rmdir deletes the container
|
||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||||
return f.purgeCheck(dir, true)
|
return f.purgeCheck(dir, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -630,7 +631,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
// Optional interface: Only implement this if you have a way of
|
// Optional interface: Only implement this if you have a way of
|
||||||
// deleting all the files quicker than just running Remove() on the
|
// deleting all the files quicker than just running Remove() on the
|
||||||
// result of List()
|
// result of List()
|
||||||
func (f *Fs) Purge() error {
|
func (f *Fs) Purge(ctx context.Context) error {
|
||||||
return f.purgeCheck("", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -681,7 +682,7 @@ func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't copy - not same remote type")
|
fs.Debugf(src, "Can't copy - not same remote type")
|
||||||
@ -699,7 +700,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, errors.Wrap(err, "couldn't copy file")
|
return nil, errors.Wrap(err, "couldn't copy file")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move src to this remote using server side move operations.
|
// Move src to this remote using server side move operations.
|
||||||
@ -711,7 +712,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// Will only be called if src.Fs().Name() == f.Name()
|
// Will only be called if src.Fs().Name() == f.Name()
|
||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantMove
|
// If it isn't possible then return fs.ErrorCantMove
|
||||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
@ -729,7 +730,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
return nil, errors.Wrap(err, "couldn't move file")
|
return nil, errors.Wrap(err, "couldn't move file")
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.NewObject(remote)
|
return f.NewObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||||
@ -740,7 +741,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// If it isn't possible then return fs.ErrorCantDirMove
|
// If it isn't possible then return fs.ErrorCantDirMove
|
||||||
//
|
//
|
||||||
// If destination exists then return fs.ErrorDirExists
|
// If destination exists then return fs.ErrorDirExists
|
||||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||||
srcFs, ok := src.(*Fs)
|
srcFs, ok := src.(*Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||||
@ -783,7 +784,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||||
var path string
|
var path string
|
||||||
if f.opt.Unlink {
|
if f.opt.Unlink {
|
||||||
path = "/resources/unpublish"
|
path = "/resources/unpublish"
|
||||||
@ -830,7 +831,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp permanently deletes all trashed files/folders
|
// CleanUp permanently deletes all trashed files/folders
|
||||||
func (f *Fs) CleanUp() (err error) {
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "DELETE",
|
Method: "DELETE",
|
||||||
@ -846,7 +847,7 @@ func (f *Fs) CleanUp() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About() (*fs.Usage, error) {
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/",
|
Path: "/",
|
||||||
@ -941,7 +942,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
//
|
//
|
||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime() time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
err := o.readMetaData()
|
err := o.readMetaData()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||||
@ -961,7 +962,7 @@ func (o *Object) Size() int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t != hash.MD5 {
|
if t != hash.MD5 {
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@ -998,7 +999,7 @@ func (o *Object) setCustomProperty(property string, value string) (err error) {
|
|||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
//
|
//
|
||||||
// Commits the datastore
|
// Commits the datastore
|
||||||
func (o *Object) SetModTime(modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// set custom_property 'rclone_modified' of object to modTime
|
// set custom_property 'rclone_modified' of object to modTime
|
||||||
err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano))
|
err := o.setCustomProperty("rclone_modified", modTime.Format(time.RFC3339Nano))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1009,7 +1010,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
// prepare download
|
// prepare download
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
var dl api.AsyncInfo
|
var dl api.AsyncInfo
|
||||||
@ -1090,9 +1091,9 @@ func (o *Object) upload(in io.Reader, overwrite bool, mimeType string) (err erro
|
|||||||
// Copy the reader into the object updating modTime and size
|
// Copy the reader into the object updating modTime and size
|
||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||||
in1 := readers.NewCountingReader(in)
|
in1 := readers.NewCountingReader(in)
|
||||||
modTime := src.ModTime()
|
modTime := src.ModTime(ctx)
|
||||||
remote := o.filePath()
|
remote := o.filePath()
|
||||||
|
|
||||||
//create full path to file before upload.
|
//create full path to file before upload.
|
||||||
@ -1102,7 +1103,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
//upload file
|
//upload file
|
||||||
err = o.upload(in1, true, fs.MimeType(src))
|
err = o.upload(in1, true, fs.MimeType(ctx, src))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1112,18 +1113,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
o.md5sum = "" // according to unit tests after put the md5 is empty.
|
o.md5sum = "" // according to unit tests after put the md5 is empty.
|
||||||
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?
|
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?
|
||||||
//and set modTime of uploaded file
|
//and set modTime of uploaded file
|
||||||
err = o.SetModTime(modTime)
|
err = o.SetModTime(ctx, modTime)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
return o.fs.delete(o.filePath(), false)
|
return o.fs.delete(o.filePath(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MimeType of an Object if known, "" otherwise
|
// MimeType of an Object if known, "" otherwise
|
||||||
func (o *Object) MimeType() string {
|
func (o *Object) MimeType(ctx context.Context) string {
|
||||||
return o.mimeType
|
return o.mimeType
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package about
|
package about
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -91,7 +92,7 @@ Use the --json flag for a computer readable output, eg
|
|||||||
if doAbout == nil {
|
if doAbout == nil {
|
||||||
return errors.Errorf("%v doesn't support about", f)
|
return errors.Errorf("%v doesn't support about", f)
|
||||||
}
|
}
|
||||||
u, err := doAbout()
|
u, err := doAbout(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "About call failed")
|
return errors.Wrap(err, "About call failed")
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package cat
|
package cat
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
@ -74,7 +75,7 @@ Note that if offset is negative it will count from the end, so
|
|||||||
w = ioutil.Discard
|
w = ioutil.Discard
|
||||||
}
|
}
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.Cat(fsrc, w, offset, count)
|
return operations.Cat(context.Background(), fsrc, w, offset, count)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package check
|
package check
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -43,9 +45,9 @@ destination that are not in the source will not trigger an error.
|
|||||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
if download {
|
if download {
|
||||||
return operations.CheckDownload(fdst, fsrc, oneway)
|
return operations.CheckDownload(context.Background(), fdst, fsrc, oneway)
|
||||||
}
|
}
|
||||||
return operations.Check(fdst, fsrc, oneway)
|
return operations.Check(context.Background(), fdst, fsrc, oneway)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package cleanup
|
package cleanup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -21,7 +23,7 @@ versions. Not supported by all remotes.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.CleanUp(fsrc)
|
return operations.CleanUp(context.Background(), fsrc)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package copy
|
package copy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/ncw/rclone/fs/sync"
|
"github.com/ncw/rclone/fs/sync"
|
||||||
@ -74,9 +76,9 @@ changed recently very efficiently like this:
|
|||||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
if srcFileName == "" {
|
if srcFileName == "" {
|
||||||
return sync.CopyDir(fdst, fsrc, createEmptySrcDirs)
|
return sync.CopyDir(context.Background(), fdst, fsrc, createEmptySrcDirs)
|
||||||
}
|
}
|
||||||
return operations.CopyFile(fdst, fsrc, srcFileName, srcFileName)
|
return operations.CopyFile(context.Background(), fdst, fsrc, srcFileName, srcFileName)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package copyto
|
package copyto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/ncw/rclone/fs/sync"
|
"github.com/ncw/rclone/fs/sync"
|
||||||
@ -48,9 +50,9 @@ destination.
|
|||||||
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
fsrc, srcFileName, fdst, dstFileName := cmd.NewFsSrcDstFiles(args)
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
if srcFileName == "" {
|
if srcFileName == "" {
|
||||||
return sync.CopyDir(fdst, fsrc, false)
|
return sync.CopyDir(context.Background(), fdst, fsrc, false)
|
||||||
}
|
}
|
||||||
return operations.CopyFile(fdst, fsrc, dstFileName, srcFileName)
|
return operations.CopyFile(context.Background(), fdst, fsrc, dstFileName, srcFileName)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package copyurl
|
package copyurl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -22,7 +24,7 @@ without saving it in tmp storage.
|
|||||||
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
|
fsdst, dstFileName := cmd.NewFsDstFile(args[1:])
|
||||||
|
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
_, err := operations.CopyURL(fsdst, dstFileName, args[0])
|
_, err := operations.CopyURL(context.Background(), fsdst, dstFileName, args[0])
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package cryptcheck
|
package cryptcheck
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/backend/crypt"
|
"github.com/ncw/rclone/backend/crypt"
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
@ -55,13 +57,13 @@ destination that are not in the source will not trigger an error.
|
|||||||
cmd.CheckArgs(2, 2, command, args)
|
cmd.CheckArgs(2, 2, command, args)
|
||||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||||
cmd.Run(false, true, command, func() error {
|
cmd.Run(false, true, command, func() error {
|
||||||
return cryptCheck(fdst, fsrc)
|
return cryptCheck(context.Background(), fdst, fsrc)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// cryptCheck checks the integrity of a crypted remote
|
// cryptCheck checks the integrity of a crypted remote
|
||||||
func cryptCheck(fdst, fsrc fs.Fs) error {
|
func cryptCheck(ctx context.Context, fdst, fsrc fs.Fs) error {
|
||||||
// Check to see fcrypt is a crypt
|
// Check to see fcrypt is a crypt
|
||||||
fcrypt, ok := fdst.(*crypt.Fs)
|
fcrypt, ok := fdst.(*crypt.Fs)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -79,10 +81,10 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
|
|||||||
//
|
//
|
||||||
// it returns true if differences were found
|
// it returns true if differences were found
|
||||||
// it also returns whether it couldn't be hashed
|
// it also returns whether it couldn't be hashed
|
||||||
checkIdentical := func(dst, src fs.Object) (differ bool, noHash bool) {
|
checkIdentical := func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
||||||
cryptDst := dst.(*crypt.Object)
|
cryptDst := dst.(*crypt.Object)
|
||||||
underlyingDst := cryptDst.UnWrap()
|
underlyingDst := cryptDst.UnWrap()
|
||||||
underlyingHash, err := underlyingDst.Hash(hashType)
|
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.CountError(err)
|
fs.CountError(err)
|
||||||
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
|
||||||
@ -91,7 +93,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
|
|||||||
if underlyingHash == "" {
|
if underlyingHash == "" {
|
||||||
return false, true
|
return false, true
|
||||||
}
|
}
|
||||||
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
|
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.CountError(err)
|
fs.CountError(err)
|
||||||
fs.Errorf(dst, "Error computing hash: %v", err)
|
fs.Errorf(dst, "Error computing hash: %v", err)
|
||||||
@ -110,5 +112,5 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
|
|||||||
return false, false
|
return false, false
|
||||||
}
|
}
|
||||||
|
|
||||||
return operations.CheckFn(fcrypt, fsrc, checkIdentical, oneway)
|
return operations.CheckFn(ctx, fcrypt, fsrc, checkIdentical, oneway)
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package dbhashsum
|
package dbhashsum
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -25,7 +26,7 @@ The output is in the same format as md5sum and sha1sum.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.DropboxHashSum(fsrc, os.Stdout)
|
return operations.DropboxHashSum(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package dedupe
|
package dedupe
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -112,7 +113,7 @@ Or
|
|||||||
}
|
}
|
||||||
fdst := cmd.NewFsSrc(args)
|
fdst := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.Deduplicate(fdst, dedupeMode)
|
return operations.Deduplicate(context.Background(), fdst, dedupeMode)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package delete
|
package delete
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -39,7 +41,7 @@ delete all files bigger than 100MBytes.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.Delete(fsrc)
|
return operations.Delete(context.Background(), fsrc)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package deletefile
|
package deletefile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -26,11 +28,11 @@ it will always be removed.
|
|||||||
if fileName == "" {
|
if fileName == "" {
|
||||||
return errors.Errorf("%s is a directory or doesn't exist", args[0])
|
return errors.Errorf("%s is a directory or doesn't exist", args[0])
|
||||||
}
|
}
|
||||||
fileObj, err := fs.NewObject(fileName)
|
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return operations.DeleteFile(fileObj)
|
return operations.DeleteFile(context.Background(), fileObj)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package hashsum
|
package hashsum
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -54,7 +55,7 @@ Then
|
|||||||
}
|
}
|
||||||
fsrc := cmd.NewFsSrc(args[1:])
|
fsrc := cmd.NewFsSrc(args[1:])
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.HashLister(ht, fsrc, os.Stdout)
|
return operations.HashLister(context.Background(), ht, fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -5,6 +5,7 @@ package info
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"sort"
|
||||||
@ -61,13 +62,14 @@ a bit of go code for each one.
|
|||||||
for i := range args {
|
for i := range args {
|
||||||
f := cmd.NewFsDir(args[i : i+1])
|
f := cmd.NewFsDir(args[i : i+1])
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return readInfo(f)
|
return readInfo(context.Background(), f)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
type results struct {
|
type results struct {
|
||||||
|
ctx context.Context
|
||||||
f fs.Fs
|
f fs.Fs
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
stringNeedsEscaping map[string]position
|
stringNeedsEscaping map[string]position
|
||||||
@ -78,8 +80,9 @@ type results struct {
|
|||||||
canStream bool
|
canStream bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func newResults(f fs.Fs) *results {
|
func newResults(ctx context.Context, f fs.Fs) *results {
|
||||||
return &results{
|
return &results{
|
||||||
|
ctx: ctx,
|
||||||
f: f,
|
f: f,
|
||||||
stringNeedsEscaping: make(map[string]position),
|
stringNeedsEscaping: make(map[string]position),
|
||||||
}
|
}
|
||||||
@ -117,7 +120,7 @@ func (r *results) Print() {
|
|||||||
func (r *results) writeFile(path string) (fs.Object, error) {
|
func (r *results) writeFile(path string) (fs.Object, error) {
|
||||||
contents := fstest.RandomString(50)
|
contents := fstest.RandomString(50)
|
||||||
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
src := object.NewStaticObjectInfo(path, time.Now(), int64(len(contents)), true, nil, r.f)
|
||||||
return r.f.Put(bytes.NewBufferString(contents), src)
|
return r.f.Put(r.ctx, bytes.NewBufferString(contents), src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check whether normalization is enforced and check whether it is
|
// check whether normalization is enforced and check whether it is
|
||||||
@ -131,11 +134,11 @@ func (r *results) checkUTF8Normalization() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
r.canWriteUnnormalized = true
|
r.canWriteUnnormalized = true
|
||||||
_, err = r.f.NewObject(unnormalized)
|
_, err = r.f.NewObject(r.ctx, unnormalized)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r.canReadUnnormalized = true
|
r.canReadUnnormalized = true
|
||||||
}
|
}
|
||||||
_, err = r.f.NewObject(normalized)
|
_, err = r.f.NewObject(r.ctx, normalized)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
r.canReadRenormalized = true
|
r.canReadRenormalized = true
|
||||||
}
|
}
|
||||||
@ -163,7 +166,7 @@ func (r *results) checkStringPositions(s string) {
|
|||||||
} else {
|
} else {
|
||||||
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
fs.Infof(r.f, "Writing %s position file 0x%0X OK", pos.String(), s)
|
||||||
}
|
}
|
||||||
obj, getErr := r.f.NewObject(path)
|
obj, getErr := r.f.NewObject(r.ctx, path)
|
||||||
if getErr != nil {
|
if getErr != nil {
|
||||||
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
fs.Infof(r.f, "Getting %s position file 0x%0X Error: %s", pos.String(), s, getErr)
|
||||||
} else {
|
} else {
|
||||||
@ -262,7 +265,7 @@ func (r *results) checkStreaming() {
|
|||||||
in := io.TeeReader(buf, hashIn)
|
in := io.TeeReader(buf, hashIn)
|
||||||
|
|
||||||
objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
|
objIn := object.NewStaticObjectInfo("checkStreamingTest", time.Now(), -1, true, nil, r.f)
|
||||||
objR, err := putter(in, objIn)
|
objR, err := putter(r.ctx, in, objIn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
|
fs.Infof(r.f, "Streamed file failed to upload (%v)", err)
|
||||||
r.canStream = false
|
r.canStream = false
|
||||||
@ -272,7 +275,7 @@ func (r *results) checkStreaming() {
|
|||||||
hashes := hashIn.Sums()
|
hashes := hashIn.Sums()
|
||||||
types := objR.Fs().Hashes().Array()
|
types := objR.Fs().Hashes().Array()
|
||||||
for _, Hash := range types {
|
for _, Hash := range types {
|
||||||
sum, err := objR.Hash(Hash)
|
sum, err := objR.Hash(r.ctx, Hash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
|
fs.Infof(r.f, "Streamed file failed when getting hash %v (%v)", Hash, err)
|
||||||
r.canStream = false
|
r.canStream = false
|
||||||
@ -292,12 +295,12 @@ func (r *results) checkStreaming() {
|
|||||||
r.canStream = true
|
r.canStream = true
|
||||||
}
|
}
|
||||||
|
|
||||||
func readInfo(f fs.Fs) error {
|
func readInfo(ctx context.Context, f fs.Fs) error {
|
||||||
err := f.Mkdir("")
|
err := f.Mkdir(ctx, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "couldn't mkdir")
|
return errors.Wrap(err, "couldn't mkdir")
|
||||||
}
|
}
|
||||||
r := newResults(f)
|
r := newResults(ctx, f)
|
||||||
if checkControl {
|
if checkControl {
|
||||||
r.checkControls()
|
r.checkControls()
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package link
|
package link
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -30,7 +31,7 @@ without account.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc, remote := cmd.NewFsFile(args[0])
|
fsrc, remote := cmd.NewFsFile(args[0])
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
link, err := operations.PublicLink(fsrc, remote)
|
link, err := operations.PublicLink(context.Background(), fsrc, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package ls
|
package ls
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -33,7 +34,7 @@ Eg
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.List(fsrc, os.Stdout)
|
return operations.List(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package lsd
|
package lsd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -52,7 +53,7 @@ If you just want the directory names use "rclone lsf --dirs-only".
|
|||||||
}
|
}
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.ListDir(fsrc, os.Stdout)
|
return operations.ListDir(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package lsf
|
package lsf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -150,14 +151,14 @@ those only (without traversing the whole directory structure):
|
|||||||
if csv && !separatorFlagSupplied {
|
if csv && !separatorFlagSupplied {
|
||||||
separator = ","
|
separator = ","
|
||||||
}
|
}
|
||||||
return Lsf(fsrc, os.Stdout)
|
return Lsf(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Lsf lists all the objects in the path with modification time, size
|
// Lsf lists all the objects in the path with modification time, size
|
||||||
// and path in specific format.
|
// and path in specific format.
|
||||||
func Lsf(fsrc fs.Fs, out io.Writer) error {
|
func Lsf(ctx context.Context, fsrc fs.Fs, out io.Writer) error {
|
||||||
var list operations.ListFormat
|
var list operations.ListFormat
|
||||||
list.SetSeparator(separator)
|
list.SetSeparator(separator)
|
||||||
list.SetCSV(csv)
|
list.SetCSV(csv)
|
||||||
@ -199,7 +200,7 @@ func Lsf(fsrc fs.Fs, out io.Writer) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
return operations.ListJSON(ctx, fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||||
_, _ = fmt.Fprintln(out, list.Format(item))
|
_, _ = fmt.Fprintln(out, list.Format(item))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -2,6 +2,7 @@ package lsf
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
_ "github.com/ncw/rclone/backend/local"
|
||||||
@ -19,7 +20,7 @@ func TestDefaultLsf(t *testing.T) {
|
|||||||
f, err := fs.NewFs("testfiles")
|
f, err := fs.NewFs("testfiles")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -36,7 +37,7 @@ func TestRecurseFlag(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
recurse = true
|
recurse = true
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -58,7 +59,7 @@ func TestDirSlashFlag(t *testing.T) {
|
|||||||
|
|
||||||
dirSlash = true
|
dirSlash = true
|
||||||
format = "p"
|
format = "p"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -68,7 +69,7 @@ subdir/
|
|||||||
|
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
dirSlash = false
|
dirSlash = false
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -84,7 +85,7 @@ func TestFormat(t *testing.T) {
|
|||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
format = "p"
|
format = "p"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -94,7 +95,7 @@ subdir
|
|||||||
|
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "s"
|
format = "s"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `0
|
assert.Equal(t, `0
|
||||||
321
|
321
|
||||||
@ -104,7 +105,7 @@ subdir
|
|||||||
|
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "hp"
|
format = "hp"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `d41d8cd98f00b204e9800998ecf8427e;file1
|
assert.Equal(t, `d41d8cd98f00b204e9800998ecf8427e;file1
|
||||||
409d6c19451dd39d4a94e42d2ff2c834;file2
|
409d6c19451dd39d4a94e42d2ff2c834;file2
|
||||||
@ -115,7 +116,7 @@ subdir
|
|||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "p"
|
format = "p"
|
||||||
filesOnly = true
|
filesOnly = true
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1
|
assert.Equal(t, `file1
|
||||||
file2
|
file2
|
||||||
@ -126,7 +127,7 @@ file3
|
|||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "p"
|
format = "p"
|
||||||
dirsOnly = true
|
dirsOnly = true
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `subdir
|
assert.Equal(t, `subdir
|
||||||
`, buf.String())
|
`, buf.String())
|
||||||
@ -134,20 +135,20 @@ file3
|
|||||||
|
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "t"
|
format = "t"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
items, _ := list.DirSorted(f, true, "")
|
items, _ := list.DirSorted(context.Background(), f, true, "")
|
||||||
var expectedOutput string
|
var expectedOutput string
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
expectedOutput += item.ModTime().Format("2006-01-02 15:04:05") + "\n"
|
expectedOutput += item.ModTime(context.Background()).Format("2006-01-02 15:04:05") + "\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, expectedOutput, buf.String())
|
assert.Equal(t, expectedOutput, buf.String())
|
||||||
|
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
format = "sp"
|
format = "sp"
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `0;file1
|
assert.Equal(t, `0;file1
|
||||||
321;file2
|
321;file2
|
||||||
@ -164,7 +165,7 @@ func TestSeparator(t *testing.T) {
|
|||||||
format = "ps"
|
format = "ps"
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1;0
|
assert.Equal(t, `file1;0
|
||||||
file2;321
|
file2;321
|
||||||
@ -174,7 +175,7 @@ subdir;-1
|
|||||||
|
|
||||||
separator = "__SEP__"
|
separator = "__SEP__"
|
||||||
buf = new(bytes.Buffer)
|
buf = new(bytes.Buffer)
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, `file1__SEP__0
|
assert.Equal(t, `file1__SEP__0
|
||||||
file2__SEP__321
|
file2__SEP__321
|
||||||
@ -195,17 +196,17 @@ func TestWholeLsf(t *testing.T) {
|
|||||||
dirSlash = true
|
dirSlash = true
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
err = Lsf(f, buf)
|
err = Lsf(context.Background(), f, buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
items, _ := list.DirSorted(f, true, "")
|
items, _ := list.DirSorted(context.Background(), f, true, "")
|
||||||
itemsInSubdir, _ := list.DirSorted(f, true, "subdir")
|
itemsInSubdir, _ := list.DirSorted(context.Background(), f, true, "subdir")
|
||||||
var expectedOutput []string
|
var expectedOutput []string
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05"))
|
expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05"))
|
||||||
}
|
}
|
||||||
for _, item := range itemsInSubdir {
|
for _, item := range itemsInSubdir {
|
||||||
expectedOutput = append(expectedOutput, item.ModTime().Format("2006-01-02 15:04:05"))
|
expectedOutput = append(expectedOutput, item.ModTime(context.Background()).Format("2006-01-02 15:04:05"))
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+`
|
assert.Equal(t, `file1_+_0_+_`+expectedOutput[0]+`
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package lsjson
|
package lsjson
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -90,7 +91,7 @@ can be processed line by line as each item is written one to a line.
|
|||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
fmt.Println("[")
|
fmt.Println("[")
|
||||||
first := true
|
first := true
|
||||||
err := operations.ListJSON(fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
err := operations.ListJSON(context.Background(), fsrc, "", &opt, func(item *operations.ListJSONItem) error {
|
||||||
out, err := json.Marshal(item)
|
out, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "failed to marshal list object")
|
return errors.Wrap(err, "failed to marshal list object")
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package lsl
|
package lsl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -33,7 +34,7 @@ Eg
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.ListLong(fsrc, os.Stdout)
|
return operations.ListLong(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package md5sum
|
package md5sum
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -23,7 +24,7 @@ is in the same format as the standard md5sum tool produces.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.Md5sum(fsrc, os.Stdout)
|
return operations.Md5sum(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package memtest
|
package memtest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -22,7 +23,8 @@ var commandDefintion = &cobra.Command{
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
objects, _, err := operations.Count(fsrc)
|
ctx := context.Background()
|
||||||
|
objects, _, err := operations.Count(ctx, fsrc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -31,7 +33,7 @@ var commandDefintion = &cobra.Command{
|
|||||||
runtime.GC()
|
runtime.GC()
|
||||||
runtime.ReadMemStats(&before)
|
runtime.ReadMemStats(&before)
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
err = operations.ListFn(fsrc, func(o fs.Object) {
|
err = operations.ListFn(ctx, fsrc, func(o fs.Object) {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
objs = append(objs, o)
|
objs = append(objs, o)
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package mkdir
|
package mkdir
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -17,7 +19,7 @@ var commandDefintion = &cobra.Command{
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fdst := cmd.NewFsDir(args)
|
fdst := cmd.NewFsDir(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.Mkdir(fdst, "")
|
return operations.Mkdir(context.Background(), fdst, "")
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package mounttest
|
package mounttest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -172,7 +173,7 @@ func TestDirCacheFlush(t *testing.T) {
|
|||||||
run.readLocal(t, localDm, "")
|
run.readLocal(t, localDm, "")
|
||||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||||
|
|
||||||
err := run.fremote.Mkdir("dir/subdir")
|
err := run.fremote.Mkdir(context.Background(), "dir/subdir")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
root, err := run.vfs.Root()
|
root, err := run.vfs.Root()
|
||||||
@ -208,7 +209,7 @@ func TestDirCacheFlushOnDirRename(t *testing.T) {
|
|||||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||||
|
|
||||||
// expect remotely created directory to not show up
|
// expect remotely created directory to not show up
|
||||||
err := run.fremote.Mkdir("dir/subdir")
|
err := run.fremote.Mkdir(context.Background(), "dir/subdir")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
run.readLocal(t, localDm, "")
|
run.readLocal(t, localDm, "")
|
||||||
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
assert.Equal(t, dm, localDm, "expected vs fuse mount")
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package mounttest
|
package mounttest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -119,7 +120,7 @@ func newRun() *Run {
|
|||||||
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
|
log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.fremote.Mkdir("")
|
err = r.fremote.Mkdir(context.Background(), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
||||||
}
|
}
|
||||||
@ -211,7 +212,7 @@ func (r *Run) cacheMode(cacheMode vfs.CacheMode) {
|
|||||||
r.vfs.WaitForWriters(30 * time.Second)
|
r.vfs.WaitForWriters(30 * time.Second)
|
||||||
// Empty and remake the remote
|
// Empty and remake the remote
|
||||||
r.cleanRemote()
|
r.cleanRemote()
|
||||||
err := r.fremote.Mkdir("")
|
err := r.fremote.Mkdir(context.Background(), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err)
|
||||||
}
|
}
|
||||||
@ -296,7 +297,7 @@ func (r *Run) readLocal(t *testing.T, dir dirMap, filePath string) {
|
|||||||
|
|
||||||
// reads the remote tree into dir
|
// reads the remote tree into dir
|
||||||
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
|
func (r *Run) readRemote(t *testing.T, dir dirMap, filepath string) {
|
||||||
objs, dirs, err := walk.GetAll(r.fremote, filepath, true, 1)
|
objs, dirs, err := walk.GetAll(context.Background(), r.fremote, filepath, true, 1)
|
||||||
if err == fs.ErrorDirNotFound {
|
if err == fs.ErrorDirNotFound {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package move
|
package move
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/ncw/rclone/fs/sync"
|
"github.com/ncw/rclone/fs/sync"
|
||||||
@ -54,9 +56,9 @@ can speed transfers up greatly.
|
|||||||
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
fsrc, srcFileName, fdst := cmd.NewFsSrcFileDst(args)
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
if srcFileName == "" {
|
if srcFileName == "" {
|
||||||
return sync.MoveDir(fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
return sync.MoveDir(context.Background(), fdst, fsrc, deleteEmptySrcDirs, createEmptySrcDirs)
|
||||||
}
|
}
|
||||||
return operations.MoveFile(fdst, fsrc, srcFileName, srcFileName)
|
return operations.MoveFile(context.Background(), fdst, fsrc, srcFileName, srcFileName)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package moveto
|
package moveto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/ncw/rclone/fs/sync"
|
"github.com/ncw/rclone/fs/sync"
|
||||||
@ -52,9 +54,9 @@ transfer.
|
|||||||
|
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
if srcFileName == "" {
|
if srcFileName == "" {
|
||||||
return sync.MoveDir(fdst, fsrc, false, false)
|
return sync.MoveDir(context.Background(), fdst, fsrc, false, false)
|
||||||
}
|
}
|
||||||
return operations.MoveFile(fdst, fsrc, dstFileName, srcFileName)
|
return operations.MoveFile(context.Background(), fdst, fsrc, dstFileName, srcFileName)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
package ncdu
|
package ncdu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
"sort"
|
||||||
@ -423,6 +424,7 @@ func (u *UI) removeEntry(pos int) {
|
|||||||
|
|
||||||
// delete the entry at the current position
|
// delete the entry at the current position
|
||||||
func (u *UI) delete() {
|
func (u *UI) delete() {
|
||||||
|
ctx := context.Background()
|
||||||
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
|
dirPos := u.sortPerm[u.dirPosMap[u.path].entry]
|
||||||
entry := u.entries[dirPos]
|
entry := u.entries[dirPos]
|
||||||
u.boxMenu = []string{"cancel", "confirm"}
|
u.boxMenu = []string{"cancel", "confirm"}
|
||||||
@ -431,7 +433,7 @@ func (u *UI) delete() {
|
|||||||
if o != 1 {
|
if o != 1 {
|
||||||
return "Aborted!", nil
|
return "Aborted!", nil
|
||||||
}
|
}
|
||||||
err := operations.DeleteFile(obj)
|
err := operations.DeleteFile(ctx, obj)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -446,7 +448,7 @@ func (u *UI) delete() {
|
|||||||
if o != 1 {
|
if o != 1 {
|
||||||
return "Aborted!", nil
|
return "Aborted!", nil
|
||||||
}
|
}
|
||||||
err := operations.Purge(f, entry.String())
|
err := operations.Purge(ctx, f, entry.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -636,7 +638,7 @@ func (u *UI) Show() error {
|
|||||||
|
|
||||||
// scan the disk in the background
|
// scan the disk in the background
|
||||||
u.listing = true
|
u.listing = true
|
||||||
rootChan, errChan, updated := scan.Scan(u.f)
|
rootChan, errChan, updated := scan.Scan(context.Background(), u.f)
|
||||||
|
|
||||||
// Poll the events into a channel
|
// Poll the events into a channel
|
||||||
events := make(chan termbox.Event)
|
events := make(chan termbox.Event)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
package scan
|
package scan
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"path"
|
"path"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -160,13 +161,13 @@ func (d *Dir) AttrI(i int) (size int64, count int64, isDir bool, readable bool)
|
|||||||
|
|
||||||
// Scan the Fs passed in, returning a root directory channel and an
|
// Scan the Fs passed in, returning a root directory channel and an
|
||||||
// error channel
|
// error channel
|
||||||
func Scan(f fs.Fs) (chan *Dir, chan error, chan struct{}) {
|
func Scan(ctx context.Context, f fs.Fs) (chan *Dir, chan error, chan struct{}) {
|
||||||
root := make(chan *Dir, 1)
|
root := make(chan *Dir, 1)
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
updated := make(chan struct{}, 1)
|
updated := make(chan struct{}, 1)
|
||||||
go func() {
|
go func() {
|
||||||
parents := map[string]*Dir{}
|
parents := map[string]*Dir{}
|
||||||
err := walk.Walk(f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
err := walk.Walk(ctx, f, "", false, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err // FIXME mark directory as errored instead of aborting
|
return err // FIXME mark directory as errored instead of aborting
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package purge
|
package purge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -22,7 +24,7 @@ you want to selectively delete files.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fdst := cmd.NewFsDir(args)
|
fdst := cmd.NewFsDir(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.Purge(fdst, "")
|
return operations.Purge(context.Background(), fdst, "")
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package rc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
@ -116,7 +117,7 @@ func doCall(path string, in rc.Params) (out rc.Params, err error) {
|
|||||||
if call == nil {
|
if call == nil {
|
||||||
return nil, errors.Errorf("method %q not found", path)
|
return nil, errors.Errorf("method %q not found", path)
|
||||||
}
|
}
|
||||||
return call.Fn(in)
|
return call.Fn(context.Background(), in)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do HTTP request
|
// Do HTTP request
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package rcat
|
package rcat
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
@ -50,7 +51,7 @@ a lot of data, you're better off caching locally and then
|
|||||||
|
|
||||||
fdst, dstFileName := cmd.NewFsDstFile(args)
|
fdst, dstFileName := cmd.NewFsDstFile(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
_, err := operations.Rcat(fdst, dstFileName, os.Stdin, time.Now())
|
_, err := operations.Rcat(context.Background(), fdst, dstFileName, os.Stdin, time.Now())
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package rmdir
|
package rmdir
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -20,7 +22,7 @@ objects in it, use purge for that.`,
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fdst := cmd.NewFsDir(args)
|
fdst := cmd.NewFsDir(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.Rmdir(fdst, "")
|
return operations.Rmdir(context.Background(), fdst, "")
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package rmdir
|
package rmdir
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -32,7 +34,7 @@ empty directories in.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fdst := cmd.NewFsDir(args)
|
fdst := cmd.NewFsDir(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return operations.Rmdirs(fdst, "", leaveRoot)
|
return operations.Rmdirs(context.Background(), fdst, "", leaveRoot)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package dlna
|
package dlna
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -39,7 +40,7 @@ func TestInit(t *testing.T) {
|
|||||||
config.LoadConfig()
|
config.LoadConfig()
|
||||||
|
|
||||||
f, err := fs.NewFs("testdata/files")
|
f, err := fs.NewFs("testdata/files")
|
||||||
l, _ := f.List("")
|
l, _ := f.List(context.Background(), "")
|
||||||
fmt.Println(l)
|
fmt.Println(l)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
package ftp
|
package ftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -41,7 +42,7 @@ func TestFTP(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
err = fremote.Mkdir("")
|
err = fremote.Mkdir(context.Background(), "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Start the server
|
// Start the server
|
||||||
|
@ -161,7 +161,7 @@ func (s *server) serveFile(w http.ResponseWriter, r *http.Request, remote string
|
|||||||
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(node.Size(), 10))
|
||||||
|
|
||||||
// Set content type
|
// Set content type
|
||||||
mimeType := fs.MimeType(obj)
|
mimeType := fs.MimeType(r.Context(), obj)
|
||||||
if mimeType == "application/octet-stream" && path.Ext(remote) == "" {
|
if mimeType == "application/octet-stream" && path.Ext(remote) == "" {
|
||||||
// Leave header blank so http server guesses
|
// Leave header blank so http server guesses
|
||||||
} else {
|
} else {
|
||||||
|
@ -28,7 +28,7 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set content type
|
// Set content type
|
||||||
mimeType := fs.MimeType(o)
|
mimeType := fs.MimeType(r.Context(), o)
|
||||||
if mimeType == "application/octet-stream" && path.Ext(o.Remote()) == "" {
|
if mimeType == "application/octet-stream" && path.Ext(o.Remote()) == "" {
|
||||||
// Leave header blank so http server guesses
|
// Leave header blank so http server guesses
|
||||||
} else {
|
} else {
|
||||||
@ -69,7 +69,7 @@ func Object(w http.ResponseWriter, r *http.Request, o fs.Object) {
|
|||||||
}
|
}
|
||||||
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
|
w.Header().Set("Content-Length", strconv.FormatInt(size, 10))
|
||||||
|
|
||||||
file, err := o.Open(options...)
|
file, err := o.Open(r.Context(), options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Get request open error: %v", err)
|
fs.Debugf(o, "Get request open error: %v", err)
|
||||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
|
@ -247,7 +247,7 @@ func (s *server) handler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// get the remote
|
// get the remote
|
||||||
func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote string) {
|
func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote string) {
|
||||||
o, err := s.f.NewObject(remote)
|
o, err := s.f.NewObject(r.Context(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "%s request error: %v", r.Method, err)
|
fs.Debugf(remote, "%s request error: %v", r.Method, err)
|
||||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
@ -260,7 +260,7 @@ func (s *server) serveObject(w http.ResponseWriter, r *http.Request, remote stri
|
|||||||
func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote string) {
|
func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote string) {
|
||||||
if appendOnly {
|
if appendOnly {
|
||||||
// make sure the file does not exist yet
|
// make sure the file does not exist yet
|
||||||
_, err := s.f.NewObject(remote)
|
_, err := s.f.NewObject(r.Context(), remote)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Errorf(remote, "Post request: file already exists, refusing to overwrite in append-only mode")
|
fs.Errorf(remote, "Post request: file already exists, refusing to overwrite in append-only mode")
|
||||||
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
|
||||||
@ -269,7 +269,7 @@ func (s *server) postObject(w http.ResponseWriter, r *http.Request, remote strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := operations.RcatSize(s.f, remote, r.Body, r.ContentLength, time.Now())
|
_, err := operations.RcatSize(r.Context(), s.f, remote, r.Body, r.ContentLength, time.Now())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
accounting.Stats.Error(err)
|
accounting.Stats.Error(err)
|
||||||
fs.Errorf(remote, "Post request rcat error: %v", err)
|
fs.Errorf(remote, "Post request rcat error: %v", err)
|
||||||
@ -291,14 +291,14 @@ func (s *server) deleteObject(w http.ResponseWriter, r *http.Request, remote str
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
o, err := s.f.NewObject(remote)
|
o, err := s.f.NewObject(r.Context(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "Delete request error: %v", err)
|
fs.Debugf(remote, "Delete request error: %v", err)
|
||||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := o.Remove(); err != nil {
|
if err := o.Remove(r.Context()); err != nil {
|
||||||
fs.Errorf(remote, "Delete request remove error: %v", err)
|
fs.Errorf(remote, "Delete request remove error: %v", err)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
@ -342,7 +342,7 @@ func (s *server) listObjects(w http.ResponseWriter, r *http.Request, remote stri
|
|||||||
ls := listItems{}
|
ls := listItems{}
|
||||||
|
|
||||||
// if remote supports ListR use that directly, otherwise use recursive Walk
|
// if remote supports ListR use that directly, otherwise use recursive Walk
|
||||||
err := walk.ListR(s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
err := walk.ListR(r.Context(), s.f, remote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
ls.add(entry)
|
ls.add(entry)
|
||||||
}
|
}
|
||||||
@ -378,7 +378,7 @@ func (s *server) createRepo(w http.ResponseWriter, r *http.Request, remote strin
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err := s.f.Mkdir(remote)
|
err := s.f.Mkdir(r.Context(), remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(remote, "Create repo failed to Mkdir: %v", err)
|
fs.Errorf(remote, "Create repo failed to Mkdir: %v", err)
|
||||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||||
@ -387,7 +387,7 @@ func (s *server) createRepo(w http.ResponseWriter, r *http.Request, remote strin
|
|||||||
|
|
||||||
for _, name := range []string{"data", "index", "keys", "locks", "snapshots"} {
|
for _, name := range []string{"data", "index", "keys", "locks", "snapshots"} {
|
||||||
dirRemote := path.Join(remote, name)
|
dirRemote := path.Join(remote, name)
|
||||||
err := s.f.Mkdir(dirRemote)
|
err := s.f.Mkdir(r.Context(), dirRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(dirRemote, "Create repo failed to Mkdir: %v", err)
|
fs.Errorf(dirRemote, "Create repo failed to Mkdir: %v", err)
|
||||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package restic
|
package restic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"testing"
|
"testing"
|
||||||
@ -38,7 +39,7 @@ func TestRestic(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
err = fremote.Mkdir("")
|
err = fremote.Mkdir(context.Background(), "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Start the server
|
// Start the server
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package sftp
|
package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -53,7 +54,7 @@ type conn struct {
|
|||||||
|
|
||||||
// execCommand implements an extrememly limited number of commands to
|
// execCommand implements an extrememly limited number of commands to
|
||||||
// interoperate with the rclone sftp backend
|
// interoperate with the rclone sftp backend
|
||||||
func (c *conn) execCommand(out io.Writer, command string) (err error) {
|
func (c *conn) execCommand(ctx context.Context, out io.Writer, command string) (err error) {
|
||||||
binary, args := command, ""
|
binary, args := command, ""
|
||||||
space := strings.Index(command, " ")
|
space := strings.Index(command, " ")
|
||||||
if space >= 0 {
|
if space >= 0 {
|
||||||
@ -68,7 +69,7 @@ func (c *conn) execCommand(out io.Writer, command string) (err error) {
|
|||||||
if about == nil {
|
if about == nil {
|
||||||
return errors.New("df not supported")
|
return errors.New("df not supported")
|
||||||
}
|
}
|
||||||
usage, err := about()
|
usage, err := about(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "About failed")
|
return errors.Wrap(err, "About failed")
|
||||||
}
|
}
|
||||||
@ -108,7 +109,7 @@ func (c *conn) execCommand(out io.Writer, command string) (err error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("unexpected non file")
|
return errors.New("unexpected non file")
|
||||||
}
|
}
|
||||||
hash, err := o.Hash(ht)
|
hash, err := o.Hash(ctx, ht)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "hash failed")
|
return errors.Wrap(err, "hash failed")
|
||||||
}
|
}
|
||||||
@ -230,7 +231,7 @@ func (c *conn) handleChannel(newChannel ssh.NewChannel) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var rc = uint32(0)
|
var rc = uint32(0)
|
||||||
err := c.execCommand(channel, command.Command)
|
err := c.execCommand(context.TODO(), channel, command.Command)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rc = 1
|
rc = 1
|
||||||
_, errPrint := fmt.Fprintf(channel.Stderr(), "%v\n", err)
|
_, errPrint := fmt.Fprintf(channel.Stderr(), "%v\n", err)
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
package sftp
|
package sftp
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@ -43,7 +44,7 @@ func TestSftp(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
err = fremote.Mkdir("")
|
err = fremote.Mkdir(context.Background(), "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
opt := DefaultOpt
|
opt := DefaultOpt
|
||||||
|
@ -284,7 +284,7 @@ func (fi FileInfo) ETag(ctx context.Context) (etag string, err error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
return "", webdav.ErrNotImplemented
|
return "", webdav.ErrNotImplemented
|
||||||
}
|
}
|
||||||
hash, err := o.Hash(hashType)
|
hash, err := o.Hash(ctx, hashType)
|
||||||
if err != nil || hash == "" {
|
if err != nil || hash == "" {
|
||||||
return "", webdav.ErrNotImplemented
|
return "", webdav.ErrNotImplemented
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ func (fi FileInfo) ContentType(ctx context.Context) (contentType string, err err
|
|||||||
entry := node.DirEntry()
|
entry := node.DirEntry()
|
||||||
switch x := entry.(type) {
|
switch x := entry.(type) {
|
||||||
case fs.Object:
|
case fs.Object:
|
||||||
return fs.MimeType(x), nil
|
return fs.MimeType(ctx, x), nil
|
||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
return "inode/directory", nil
|
return "inode/directory", nil
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
package webdav
|
package webdav
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -50,7 +51,7 @@ func TestWebDav(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer clean()
|
defer clean()
|
||||||
|
|
||||||
err = fremote.Mkdir("")
|
err = fremote.Mkdir(context.Background(), "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Start the server
|
// Start the server
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package settier
|
package settier
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/operations"
|
"github.com/ncw/rclone/fs/operations"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -48,7 +50,7 @@ Or just provide remote directory and all files in directory will be tiered
|
|||||||
return errors.Errorf("Remote %s does not support settier", fsrc.Name())
|
return errors.Errorf("Remote %s does not support settier", fsrc.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
return operations.SetTier(fsrc, tier)
|
return operations.SetTier(context.Background(), fsrc, tier)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package sha1sum
|
package sha1sum
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -23,7 +24,7 @@ is in the same format as the standard sha1sum tool produces.
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
return operations.Sha1sum(fsrc, os.Stdout)
|
return operations.Sha1sum(context.Background(), fsrc, os.Stdout)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package size
|
package size
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@ -31,7 +32,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Bytes int64 `json:"bytes"`
|
Bytes int64 `json:"bytes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
results.Count, results.Bytes, err = operations.Count(fsrc)
|
results.Count, results.Bytes, err = operations.Count(context.Background(), fsrc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package sync
|
package sync
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs/sync"
|
"github.com/ncw/rclone/fs/sync"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -44,7 +46,7 @@ go there.
|
|||||||
cmd.CheckArgs(2, 2, command, args)
|
cmd.CheckArgs(2, 2, command, args)
|
||||||
fsrc, fdst := cmd.NewFsSrcDst(args)
|
fsrc, fdst := cmd.NewFsSrcDst(args)
|
||||||
cmd.Run(true, true, command, func() error {
|
cmd.Run(true, true, command, func() error {
|
||||||
return sync.Sync(fdst, fsrc, createEmptySrcDirs)
|
return sync.Sync(context.Background(), fdst, fsrc, createEmptySrcDirs)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package touch
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
@ -33,13 +34,13 @@ var commandDefintion = &cobra.Command{
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc, srcFileName := cmd.NewFsDstFile(args)
|
fsrc, srcFileName := cmd.NewFsDstFile(args)
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
return Touch(fsrc, srcFileName)
|
return Touch(context.Background(), fsrc, srcFileName)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
//Touch create new file or change file modification time.
|
//Touch create new file or change file modification time.
|
||||||
func Touch(fsrc fs.Fs, srcFileName string) error {
|
func Touch(ctx context.Context, fsrc fs.Fs, srcFileName string) error {
|
||||||
timeAtr := time.Now()
|
timeAtr := time.Now()
|
||||||
if timeAsArgument != "" {
|
if timeAsArgument != "" {
|
||||||
layout := defaultLayout
|
layout := defaultLayout
|
||||||
@ -52,19 +53,19 @@ func Touch(fsrc fs.Fs, srcFileName string) error {
|
|||||||
}
|
}
|
||||||
timeAtr = timeAtrFromFlags
|
timeAtr = timeAtrFromFlags
|
||||||
}
|
}
|
||||||
file, err := fsrc.NewObject(srcFileName)
|
file, err := fsrc.NewObject(ctx, srcFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !notCreateNewFile {
|
if !notCreateNewFile {
|
||||||
var buffer []byte
|
var buffer []byte
|
||||||
src := object.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc)
|
src := object.NewStaticObjectInfo(srcFileName, timeAtr, int64(len(buffer)), true, nil, fsrc)
|
||||||
_, err = fsrc.Put(bytes.NewBuffer(buffer), src)
|
_, err = fsrc.Put(ctx, bytes.NewBuffer(buffer), src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
err = file.SetModTime(timeAtr)
|
err = file.SetModTime(ctx, timeAtr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "touch: couldn't set mod time")
|
return errors.Wrap(err, "touch: couldn't set mod time")
|
||||||
}
|
}
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package touch
|
package touch
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -34,9 +35,9 @@ func TestTouchOneFile(t *testing.T) {
|
|||||||
r := fstest.NewRun(t)
|
r := fstest.NewRun(t)
|
||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
|
|
||||||
err := Touch(r.Fremote, "newFile")
|
err := Touch(context.Background(), r.Fremote, "newFile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = r.Fremote.NewObject("newFile")
|
_, err = r.Fremote.NewObject(context.Background(), "newFile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,9 +46,9 @@ func TestTouchWithNoCreateFlag(t *testing.T) {
|
|||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
|
|
||||||
notCreateNewFile = true
|
notCreateNewFile = true
|
||||||
err := Touch(r.Fremote, "newFile")
|
err := Touch(context.Background(), r.Fremote, "newFile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
_, err = r.Fremote.NewObject("newFile")
|
_, err = r.Fremote.NewObject(context.Background(), "newFile")
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
notCreateNewFile = false
|
notCreateNewFile = false
|
||||||
}
|
}
|
||||||
@ -58,7 +59,7 @@ func TestTouchWithTimestamp(t *testing.T) {
|
|||||||
|
|
||||||
timeAsArgument = "060102"
|
timeAsArgument = "060102"
|
||||||
srcFileName := "oldFile"
|
srcFileName := "oldFile"
|
||||||
err := Touch(r.Fremote, srcFileName)
|
err := Touch(context.Background(), r.Fremote, srcFileName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(t, r.Fremote, srcFileName, "")
|
checkFile(t, r.Fremote, srcFileName, "")
|
||||||
}
|
}
|
||||||
@ -69,7 +70,7 @@ func TestTouchWithLognerTimestamp(t *testing.T) {
|
|||||||
|
|
||||||
timeAsArgument = "2006-01-02T15:04:05"
|
timeAsArgument = "2006-01-02T15:04:05"
|
||||||
srcFileName := "oldFile"
|
srcFileName := "oldFile"
|
||||||
err := Touch(r.Fremote, srcFileName)
|
err := Touch(context.Background(), r.Fremote, srcFileName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(t, r.Fremote, srcFileName, "")
|
checkFile(t, r.Fremote, srcFileName, "")
|
||||||
}
|
}
|
||||||
@ -80,11 +81,11 @@ func TestTouchUpdateTimestamp(t *testing.T) {
|
|||||||
|
|
||||||
srcFileName := "a"
|
srcFileName := "a"
|
||||||
content := "aaa"
|
content := "aaa"
|
||||||
file1 := r.WriteObject(srcFileName, content, t1)
|
file1 := r.WriteObject(context.Background(), srcFileName, content, t1)
|
||||||
fstest.CheckItems(t, r.Fremote, file1)
|
fstest.CheckItems(t, r.Fremote, file1)
|
||||||
|
|
||||||
timeAsArgument = "121212"
|
timeAsArgument = "121212"
|
||||||
err := Touch(r.Fremote, "a")
|
err := Touch(context.Background(), r.Fremote, "a")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(t, r.Fremote, srcFileName, content)
|
checkFile(t, r.Fremote, srcFileName, content)
|
||||||
}
|
}
|
||||||
@ -95,12 +96,12 @@ func TestTouchUpdateTimestampWithCFlag(t *testing.T) {
|
|||||||
|
|
||||||
srcFileName := "a"
|
srcFileName := "a"
|
||||||
content := "aaa"
|
content := "aaa"
|
||||||
file1 := r.WriteObject(srcFileName, content, t1)
|
file1 := r.WriteObject(context.Background(), srcFileName, content, t1)
|
||||||
fstest.CheckItems(t, r.Fremote, file1)
|
fstest.CheckItems(t, r.Fremote, file1)
|
||||||
|
|
||||||
notCreateNewFile = true
|
notCreateNewFile = true
|
||||||
timeAsArgument = "121212"
|
timeAsArgument = "121212"
|
||||||
err := Touch(r.Fremote, "a")
|
err := Touch(context.Background(), r.Fremote, "a")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
checkFile(t, r.Fremote, srcFileName, content)
|
checkFile(t, r.Fremote, srcFileName, content)
|
||||||
notCreateNewFile = false
|
notCreateNewFile = false
|
||||||
@ -111,7 +112,7 @@ func TestTouchCreateMultipleDirAndFile(t *testing.T) {
|
|||||||
defer r.Finalise()
|
defer r.Finalise()
|
||||||
|
|
||||||
longPath := "a/b/c.txt"
|
longPath := "a/b/c.txt"
|
||||||
err := Touch(r.Fremote, longPath)
|
err := Touch(context.Background(), r.Fremote, longPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
file1 := fstest.NewItem("a/b/c.txt", "", t1)
|
file1 := fstest.NewItem("a/b/c.txt", "", t1)
|
||||||
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported)
|
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{file1}, []string{"a", "a/b"}, fs.ModTimeNotSupported)
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package tree
|
package tree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
@ -117,7 +118,7 @@ short options as they conflict with rclone's short options.
|
|||||||
|
|
||||||
// Tree lists fsrc to outFile using the Options passed in
|
// Tree lists fsrc to outFile using the Options passed in
|
||||||
func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error {
|
func Tree(fsrc fs.Fs, outFile io.Writer, opts *tree.Options) error {
|
||||||
dirs, err := walk.NewDirTree(fsrc, "", false, opts.DeepLevel)
|
dirs, err := walk.NewDirTree(context.Background(), fsrc, "", false, opts.DeepLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -165,7 +166,7 @@ func (to *FileInfo) Mode() os.FileMode {
|
|||||||
|
|
||||||
// ModTime is modification time
|
// ModTime is modification time
|
||||||
func (to *FileInfo) ModTime() time.Time {
|
func (to *FileInfo) ModTime() time.Time {
|
||||||
return to.entry.ModTime()
|
return to.entry.ModTime(context.Background())
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsDir is abbreviation for Mode().IsDir()
|
// IsDir is abbreviation for Mode().IsDir()
|
||||||
|
@ -2,6 +2,7 @@ package accounting
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -101,7 +102,7 @@ func NewStats() *StatsInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoteStats returns stats for rc
|
// RemoteStats returns stats for rc
|
||||||
func (s *StatsInfo) RemoteStats(in rc.Params) (out rc.Params, err error) {
|
func (s *StatsInfo) RemoteStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = make(rc.Params)
|
out = make(rc.Params)
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
dt := time.Now().Sub(s.start)
|
dt := time.Now().Sub(s.start)
|
||||||
|
@ -132,7 +132,7 @@ func SetBwLimit(bandwidth fs.SizeSuffix) {
|
|||||||
func init() {
|
func init() {
|
||||||
rc.Add(rc.Call{
|
rc.Add(rc.Call{
|
||||||
Path: "core/bwlimit",
|
Path: "core/bwlimit",
|
||||||
Fn: func(in rc.Params) (out rc.Params, err error) {
|
Fn: func(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
ibwlimit, ok := in["rate"]
|
ibwlimit, ok := in["rate"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return out, errors.Errorf("parameter rate not found")
|
return out, errors.Errorf("parameter rate not found")
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package chunkedreader
|
package chunkedreader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
@ -19,6 +20,7 @@ var (
|
|||||||
//
|
//
|
||||||
// A initialChunkSize of <= 0 will disable chunked reading.
|
// A initialChunkSize of <= 0 will disable chunked reading.
|
||||||
type ChunkedReader struct {
|
type ChunkedReader struct {
|
||||||
|
ctx context.Context
|
||||||
mu sync.Mutex // protects following fields
|
mu sync.Mutex // protects following fields
|
||||||
o fs.Object // source to read from
|
o fs.Object // source to read from
|
||||||
rc io.ReadCloser // reader for the current open chunk
|
rc io.ReadCloser // reader for the current open chunk
|
||||||
@ -37,7 +39,7 @@ type ChunkedReader struct {
|
|||||||
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
// If maxChunkSize is greater than initialChunkSize, the chunk size will be
|
||||||
// doubled after each chunk read with a maximun of maxChunkSize.
|
// doubled after each chunk read with a maximun of maxChunkSize.
|
||||||
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
// A Seek or RangeSeek will reset the chunk size to it's initial value
|
||||||
func New(o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
|
func New(ctx context.Context, o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader {
|
||||||
if initialChunkSize <= 0 {
|
if initialChunkSize <= 0 {
|
||||||
initialChunkSize = -1
|
initialChunkSize = -1
|
||||||
}
|
}
|
||||||
@ -45,6 +47,7 @@ func New(o fs.Object, initialChunkSize int64, maxChunkSize int64) *ChunkedReader
|
|||||||
maxChunkSize = initialChunkSize
|
maxChunkSize = initialChunkSize
|
||||||
}
|
}
|
||||||
return &ChunkedReader{
|
return &ChunkedReader{
|
||||||
|
ctx: ctx,
|
||||||
o: o,
|
o: o,
|
||||||
offset: -1,
|
offset: -1,
|
||||||
chunkSize: initialChunkSize,
|
chunkSize: initialChunkSize,
|
||||||
@ -129,14 +132,14 @@ func (cr *ChunkedReader) Close() error {
|
|||||||
|
|
||||||
// Seek the file - for details see io.Seeker
|
// Seek the file - for details see io.Seeker
|
||||||
func (cr *ChunkedReader) Seek(offset int64, whence int) (int64, error) {
|
func (cr *ChunkedReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
return cr.RangeSeek(offset, whence, -1)
|
return cr.RangeSeek(context.TODO(), offset, whence, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RangeSeek the file - for details see RangeSeeker
|
// RangeSeek the file - for details see RangeSeeker
|
||||||
//
|
//
|
||||||
// The specified length will only apply to the next chunk opened.
|
// The specified length will only apply to the next chunk opened.
|
||||||
// RangeSeek will not reopen the source until Read is called.
|
// RangeSeek will not reopen the source until Read is called.
|
||||||
func (cr *ChunkedReader) RangeSeek(offset int64, whence int, length int64) (int64, error) {
|
func (cr *ChunkedReader) RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) {
|
||||||
cr.mu.Lock()
|
cr.mu.Lock()
|
||||||
defer cr.mu.Unlock()
|
defer cr.mu.Unlock()
|
||||||
|
|
||||||
@ -196,7 +199,7 @@ func (cr *ChunkedReader) openRange() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if rs, ok := cr.rc.(fs.RangeSeeker); ok {
|
if rs, ok := cr.rc.(fs.RangeSeeker); ok {
|
||||||
n, err := rs.RangeSeek(offset, io.SeekStart, length)
|
n, err := rs.RangeSeek(cr.ctx, offset, io.SeekStart, length)
|
||||||
if err == nil && n == offset {
|
if err == nil && n == offset {
|
||||||
cr.offset = offset
|
cr.offset = offset
|
||||||
return nil
|
return nil
|
||||||
@ -212,12 +215,12 @@ func (cr *ChunkedReader) openRange() error {
|
|||||||
var err error
|
var err error
|
||||||
if length <= 0 {
|
if length <= 0 {
|
||||||
if offset == 0 {
|
if offset == 0 {
|
||||||
rc, err = cr.o.Open()
|
rc, err = cr.o.Open(cr.ctx)
|
||||||
} else {
|
} else {
|
||||||
rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: -1})
|
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: -1})
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
rc, err = cr.o.Open(&fs.RangeOption{Start: offset, End: offset + length - 1})
|
rc, err = cr.o.Open(cr.ctx, &fs.RangeOption{Start: offset, End: offset + length - 1})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package chunkedreader
|
package chunkedreader
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@ -38,13 +39,13 @@ func testRead(content []byte, mode mockobject.SeekMode) func(*testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("Chunksize_%d_%d", cs, csMax), func(t *testing.T) {
|
t.Run(fmt.Sprintf("Chunksize_%d_%d", cs, csMax), func(t *testing.T) {
|
||||||
cr := New(o, cs, csMax)
|
cr := New(context.Background(), o, cs, csMax)
|
||||||
|
|
||||||
for _, offset := range offsets {
|
for _, offset := range offsets {
|
||||||
for _, limit := range limits {
|
for _, limit := range limits {
|
||||||
what := fmt.Sprintf("offset %d, limit %d", offset, limit)
|
what := fmt.Sprintf("offset %d, limit %d", offset, limit)
|
||||||
|
|
||||||
p, err := cr.RangeSeek(offset, io.SeekStart, limit)
|
p, err := cr.RangeSeek(context.Background(), offset, io.SeekStart, limit)
|
||||||
if offset >= cl {
|
if offset >= cl {
|
||||||
require.Error(t, err, what)
|
require.Error(t, err, what)
|
||||||
return
|
return
|
||||||
@ -78,27 +79,27 @@ func TestErrorAfterClose(t *testing.T) {
|
|||||||
o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone)
|
o := mockobject.New("test.bin").WithContent(content, mockobject.SeekModeNone)
|
||||||
|
|
||||||
// Close
|
// Close
|
||||||
cr := New(o, 0, 0)
|
cr := New(context.Background(), o, 0, 0)
|
||||||
require.NoError(t, cr.Close())
|
require.NoError(t, cr.Close())
|
||||||
require.Error(t, cr.Close())
|
require.Error(t, cr.Close())
|
||||||
|
|
||||||
// Read
|
// Read
|
||||||
cr = New(o, 0, 0)
|
cr = New(context.Background(), o, 0, 0)
|
||||||
require.NoError(t, cr.Close())
|
require.NoError(t, cr.Close())
|
||||||
var buf [1]byte
|
var buf [1]byte
|
||||||
_, err := cr.Read(buf[:])
|
_, err := cr.Read(buf[:])
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// Seek
|
// Seek
|
||||||
cr = New(o, 0, 0)
|
cr = New(context.Background(), o, 0, 0)
|
||||||
require.NoError(t, cr.Close())
|
require.NoError(t, cr.Close())
|
||||||
_, err = cr.Seek(1, io.SeekCurrent)
|
_, err = cr.Seek(1, io.SeekCurrent)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
|
|
||||||
// RangeSeek
|
// RangeSeek
|
||||||
cr = New(o, 0, 0)
|
cr = New(context.Background(), o, 0, 0)
|
||||||
require.NoError(t, cr.Close())
|
require.NoError(t, cr.Close())
|
||||||
_, err = cr.RangeSeek(1, io.SeekCurrent, 0)
|
_, err = cr.RangeSeek(context.Background(), 1, io.SeekCurrent, 0)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/rc"
|
"github.com/ncw/rclone/fs/rc"
|
||||||
)
|
)
|
||||||
@ -23,7 +25,7 @@ See the [config dump command](/commands/rclone_config_dump/) command for more in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the config file dump
|
// Return the config file dump
|
||||||
func rcDump(in rc.Params) (out rc.Params, err error) {
|
func rcDump(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
return DumpRcBlob(), nil
|
return DumpRcBlob(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,7 +45,7 @@ See the [config dump command](/commands/rclone_config_dump/) command for more in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the config file get
|
// Return the config file get
|
||||||
func rcGet(in rc.Params) (out rc.Params, err error) {
|
func rcGet(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
name, err := in.GetString("name")
|
name, err := in.GetString("name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -67,7 +69,7 @@ See the [listremotes command](/commands/rclone_listremotes/) command for more in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the a list of remotes in the config file
|
// Return the a list of remotes in the config file
|
||||||
func rcListRemotes(in rc.Params) (out rc.Params, err error) {
|
func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
var remotes = []string{}
|
var remotes = []string{}
|
||||||
for _, remote := range getConfigData().GetSectionList() {
|
for _, remote := range getConfigData().GetSectionList() {
|
||||||
remotes = append(remotes, remote)
|
remotes = append(remotes, remote)
|
||||||
@ -94,7 +96,7 @@ See the [config providers command](/commands/rclone_config_providers/) command f
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the config file providers
|
// Return the config file providers
|
||||||
func rcProviders(in rc.Params) (out rc.Params, err error) {
|
func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
out = rc.Params{
|
out = rc.Params{
|
||||||
"providers": fs.Registry,
|
"providers": fs.Registry,
|
||||||
}
|
}
|
||||||
@ -111,8 +113,8 @@ func init() {
|
|||||||
rc.Add(rc.Call{
|
rc.Add(rc.Call{
|
||||||
Path: "config/" + name,
|
Path: "config/" + name,
|
||||||
AuthRequired: true,
|
AuthRequired: true,
|
||||||
Fn: func(in rc.Params) (rc.Params, error) {
|
Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||||
return rcConfig(in, name)
|
return rcConfig(ctx, in, name)
|
||||||
},
|
},
|
||||||
Title: name + " the config for a remote.",
|
Title: name + " the config for a remote.",
|
||||||
Help: `This takes the following parameters
|
Help: `This takes the following parameters
|
||||||
@ -126,7 +128,7 @@ See the [config ` + name + ` command](/commands/rclone_config_` + name + `/) com
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Manipulate the config file
|
// Manipulate the config file
|
||||||
func rcConfig(in rc.Params, what string) (out rc.Params, err error) {
|
func rcConfig(ctx context.Context, in rc.Params, what string) (out rc.Params, err error) {
|
||||||
name, err := in.GetString("name")
|
name, err := in.GetString("name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -167,7 +169,7 @@ See the [config delete command](/commands/rclone_config_delete/) command for mor
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Return the config file delete
|
// Return the config file delete
|
||||||
func rcDelete(in rc.Params) (out rc.Params, err error) {
|
func rcDelete(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||||
name, err := in.GetString("name")
|
name, err := in.GetString("name")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/ncw/rclone/backend/local"
|
_ "github.com/ncw/rclone/backend/local"
|
||||||
@ -24,7 +25,7 @@ func TestRc(t *testing.T) {
|
|||||||
"test_key": "sausage",
|
"test_key": "sausage",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Nil(t, out)
|
require.Nil(t, out)
|
||||||
assert.Equal(t, "local", FileGet(testName, "type"))
|
assert.Equal(t, "local", FileGet(testName, "type"))
|
||||||
@ -37,7 +38,7 @@ func TestRc(t *testing.T) {
|
|||||||
call := rc.Calls.Get("config/dump")
|
call := rc.Calls.Get("config/dump")
|
||||||
assert.NotNil(t, call)
|
assert.NotNil(t, call)
|
||||||
in := rc.Params{}
|
in := rc.Params{}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, out)
|
require.NotNil(t, out)
|
||||||
|
|
||||||
@ -54,7 +55,7 @@ func TestRc(t *testing.T) {
|
|||||||
in := rc.Params{
|
in := rc.Params{
|
||||||
"name": testName,
|
"name": testName,
|
||||||
}
|
}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, out)
|
require.NotNil(t, out)
|
||||||
|
|
||||||
@ -66,7 +67,7 @@ func TestRc(t *testing.T) {
|
|||||||
call := rc.Calls.Get("config/listremotes")
|
call := rc.Calls.Get("config/listremotes")
|
||||||
assert.NotNil(t, call)
|
assert.NotNil(t, call)
|
||||||
in := rc.Params{}
|
in := rc.Params{}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, out)
|
require.NotNil(t, out)
|
||||||
|
|
||||||
@ -87,7 +88,7 @@ func TestRc(t *testing.T) {
|
|||||||
"test_key2": "cabbage",
|
"test_key2": "cabbage",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, out)
|
assert.Nil(t, out)
|
||||||
|
|
||||||
@ -106,7 +107,7 @@ func TestRc(t *testing.T) {
|
|||||||
"test_key2": "cabbage",
|
"test_key2": "cabbage",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, out)
|
assert.Nil(t, out)
|
||||||
|
|
||||||
@ -121,7 +122,7 @@ func TestRc(t *testing.T) {
|
|||||||
in = rc.Params{
|
in = rc.Params{
|
||||||
"name": testName,
|
"name": testName,
|
||||||
}
|
}
|
||||||
out, err = call.Fn(in)
|
out, err = call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Nil(t, out)
|
assert.Nil(t, out)
|
||||||
assert.Equal(t, "", FileGet(testName, "type"))
|
assert.Equal(t, "", FileGet(testName, "type"))
|
||||||
@ -132,7 +133,7 @@ func TestRcProviders(t *testing.T) {
|
|||||||
call := rc.Calls.Get("config/providers")
|
call := rc.Calls.Get("config/providers")
|
||||||
assert.NotNil(t, call)
|
assert.NotNil(t, call)
|
||||||
in := rc.Params{}
|
in := rc.Params{}
|
||||||
out, err := call.Fn(in)
|
out, err := call.Fn(context.Background(), in)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.NotNil(t, out)
|
require.NotNil(t, out)
|
||||||
var registry []*fs.RegInfo
|
var registry []*fs.RegInfo
|
||||||
|
11
fs/dir.go
11
fs/dir.go
@ -1,6 +1,9 @@
|
|||||||
package fs
|
package fs
|
||||||
|
|
||||||
import "time"
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
// Dir describes an unspecialized directory for directory/container/bucket lists
|
// Dir describes an unspecialized directory for directory/container/bucket lists
|
||||||
type Dir struct {
|
type Dir struct {
|
||||||
@ -22,10 +25,10 @@ func NewDir(remote string, modTime time.Time) *Dir {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewDirCopy creates an unspecialized copy of the Directory object passed in
|
// NewDirCopy creates an unspecialized copy of the Directory object passed in
|
||||||
func NewDirCopy(d Directory) *Dir {
|
func NewDirCopy(ctx context.Context, d Directory) *Dir {
|
||||||
return &Dir{
|
return &Dir{
|
||||||
remote: d.Remote(),
|
remote: d.Remote(),
|
||||||
modTime: d.ModTime(),
|
modTime: d.ModTime(ctx),
|
||||||
size: d.Size(),
|
size: d.Size(),
|
||||||
items: d.Items(),
|
items: d.Items(),
|
||||||
id: d.ID(),
|
id: d.ID(),
|
||||||
@ -61,7 +64,7 @@ func (d *Dir) SetID(id string) *Dir {
|
|||||||
|
|
||||||
// ModTime returns the modification date of the file
|
// ModTime returns the modification date of the file
|
||||||
// It should return a best guess if one isn't available
|
// It should return a best guess if one isn't available
|
||||||
func (d *Dir) ModTime() time.Time {
|
func (d *Dir) ModTime(ctx context.Context) time.Time {
|
||||||
if !d.modTime.IsZero() {
|
if !d.modTime.IsZero() {
|
||||||
return d.modTime
|
return d.modTime
|
||||||
}
|
}
|
||||||
|
@ -3,6 +3,7 @@ package filter
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
@ -399,12 +400,12 @@ func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool {
|
|||||||
|
|
||||||
// IncludeDirectory returns a function which checks whether this
|
// IncludeDirectory returns a function which checks whether this
|
||||||
// directory should be included in the sync or not.
|
// directory should be included in the sync or not.
|
||||||
func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
|
func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (bool, error) {
|
||||||
return func(remote string) (bool, error) {
|
return func(remote string) (bool, error) {
|
||||||
remote = strings.Trim(remote, "/")
|
remote = strings.Trim(remote, "/")
|
||||||
// first check if we need to remove directory based on
|
// first check if we need to remove directory based on
|
||||||
// the exclude file
|
// the exclude file
|
||||||
excl, err := f.DirContainsExcludeFile(fs, remote)
|
excl, err := f.DirContainsExcludeFile(ctx, fs, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -431,9 +432,9 @@ func (f *Filter) IncludeDirectory(fs fs.Fs) func(string) (bool, error) {
|
|||||||
// DirContainsExcludeFile checks if exclude file is present in a
|
// DirContainsExcludeFile checks if exclude file is present in a
|
||||||
// directroy. If fs is nil, it works properly if ExcludeFile is an
|
// directroy. If fs is nil, it works properly if ExcludeFile is an
|
||||||
// empty string (for testing).
|
// empty string (for testing).
|
||||||
func (f *Filter) DirContainsExcludeFile(fremote fs.Fs, remote string) (bool, error) {
|
func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) {
|
||||||
if len(f.Opt.ExcludeFile) > 0 {
|
if len(f.Opt.ExcludeFile) > 0 {
|
||||||
exists, err := fs.FileExists(fremote, path.Join(remote, f.Opt.ExcludeFile))
|
exists, err := fs.FileExists(ctx, fremote, path.Join(remote, f.Opt.ExcludeFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -470,11 +471,11 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
|
|||||||
// IncludeObject returns whether this object should be included into
|
// IncludeObject returns whether this object should be included into
|
||||||
// the sync or not. This is a convenience function to avoid calling
|
// the sync or not. This is a convenience function to avoid calling
|
||||||
// o.ModTime(), which is an expensive operation.
|
// o.ModTime(), which is an expensive operation.
|
||||||
func (f *Filter) IncludeObject(o fs.Object) bool {
|
func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool {
|
||||||
var modTime time.Time
|
var modTime time.Time
|
||||||
|
|
||||||
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
|
if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() {
|
||||||
modTime = o.ModTime()
|
modTime = o.ModTime(ctx)
|
||||||
} else {
|
} else {
|
||||||
modTime = time.Unix(0, 0)
|
modTime = time.Unix(0, 0)
|
||||||
}
|
}
|
||||||
@ -534,8 +535,8 @@ func (f *Filter) HaveFilesFrom() bool {
|
|||||||
var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR")
|
var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR")
|
||||||
|
|
||||||
// MakeListR makes function to return all the files set using --files-from
|
// MakeListR makes function to return all the files set using --files-from
|
||||||
func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.ListRFn {
|
func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn {
|
||||||
return func(dir string, callback fs.ListRCallback) error {
|
return func(ctx context.Context, dir string, callback fs.ListRCallback) error {
|
||||||
if !f.HaveFilesFrom() {
|
if !f.HaveFilesFrom() {
|
||||||
return errFilesFromNotSet
|
return errFilesFromNotSet
|
||||||
}
|
}
|
||||||
@ -547,7 +548,7 @@ func (f *Filter) MakeListR(NewObject func(remote string) (fs.Object, error)) fs.
|
|||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
var entries = make(fs.DirEntries, 1)
|
var entries = make(fs.DirEntries, 1)
|
||||||
for remote := range remotes {
|
for remote := range remotes {
|
||||||
entries[0], err = NewObject(remote)
|
entries[0], err = NewObject(ctx, remote)
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// Skip files that are not found
|
// Skip files that are not found
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user