Add context propagation to rclone

- Change rclone/fs interfaces to accept context.Context
- Update interface implementations to use context.Context
- Change top level usage to propagate context to lover level functions

Context propagation is needed for stopping transfers and passing other
request-scoped values.
This commit is contained in:
Aleksandar Jankovic
2019-06-17 10:34:30 +02:00
committed by Nick Craig-Wood
parent a2c317b46e
commit f78cd1e043
156 changed files with 2570 additions and 2380 deletions

140
backend/cache/cache.go vendored
View File

@@ -509,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
pollInterval := make(chan time.Duration, 1)
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
doChangeNotify(f.receiveChangeNotify, pollInterval)
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
}
f.features = (&fs.Features{
@@ -600,7 +600,7 @@ is used on top of the cache.
return f, fsErr
}
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
m, err := f.Stats()
if err != nil {
@@ -627,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
return remote
}
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
out = make(rc.Params)
remoteInt, ok := in["remote"]
if !ok {
@@ -672,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
return out, nil
}
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
type chunkRange struct {
start, end int64
}
@@ -777,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
for _, pair := range files {
file, remote := pair[0], pair[1]
var status fileStatus
o, err := f.NewObject(remote)
o, err := f.NewObject(ctx, remote)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
co := o.(*Object)
err = co.refreshFromSource(true)
err = co.refreshFromSource(ctx, true)
if err != nil {
fetchedChunks[file] = fileStatus{Error: err.Error()}
continue
}
handle := NewObjectHandle(co, f)
handle := NewObjectHandle(ctx, co, f)
handle.UseMemory = false
handle.scaleWorkers(1)
walkChunkRanges(crs, co.Size(), func(chunk int64) {
@@ -874,7 +874,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
// ChangeNotify can subscribe multiple callers
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
// and also notifies other caches (i.e VFS) to clear out whenever something changes
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
f.parentsForgetMu.Lock()
defer f.parentsForgetMu.Unlock()
fs.Debugf(f, "subscribing to ChangeNotify")
@@ -921,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
}
// NewObject finds the Object at remote.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
var err error
fs.Debugf(f, "new object '%s'", remote)
@@ -940,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
// search for entry in source or temp fs
var obj fs.Object
if f.opt.TempWritePath != "" {
obj, err = f.tempFs.NewObject(remote)
obj, err = f.tempFs.NewObject(ctx, remote)
// not found in temp fs
if err != nil {
fs.Debugf(remote, "find: not found in local cache fs")
obj, err = f.Fs.NewObject(remote)
obj, err = f.Fs.NewObject(ctx, remote)
} else {
fs.Debugf(obj, "find: found in local cache fs")
}
} else {
obj, err = f.Fs.NewObject(remote)
obj, err = f.Fs.NewObject(ctx, remote)
}
// not found in either fs
@@ -959,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
}
// cache the new entry
co = ObjectFromOriginal(f, obj).persist()
co = ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(co, "find: cached object")
return co, nil
}
// List the objects and directories in dir into entries
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
fs.Debugf(f, "list '%s'", dir)
cd := ShallowDirectory(f, dir)
@@ -995,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
for _, queuedRemote := range queuedEntries {
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
if err != nil {
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
continue
}
co := ObjectFromOriginal(f, queuedEntry).persist()
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
fs.Debugf(co, "list: cached temp object")
cachedEntries = append(cachedEntries, co)
}
@@ -1008,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
}
// search from the source
sourceEntries, err := f.Fs.List(dir)
sourceEntries, err := f.Fs.List(ctx, dir)
if err != nil {
return nil, err
}
@@ -1046,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
continue
}
co := ObjectFromOriginal(f, o).persist()
co := ObjectFromOriginal(ctx, f, o).persist()
cachedEntries = append(cachedEntries, co)
fs.Debugf(dir, "list: cached object: %v", co)
case fs.Directory:
cdd := DirectoryFromOriginal(f, o)
cdd := DirectoryFromOriginal(ctx, f, o)
// check if the dir isn't expired and add it in cache if it isn't
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
batchDirectories = append(batchDirectories, cdd)
@@ -1080,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
return cachedEntries, nil
}
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
entries, err := f.List(dir)
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
entries, err := f.List(ctx, dir)
if err != nil {
return err
}
@@ -1089,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
for i := 0; i < len(entries); i++ {
innerDir, ok := entries[i].(fs.Directory)
if ok {
err := f.recurse(innerDir.Remote(), list)
err := f.recurse(ctx, innerDir.Remote(), list)
if err != nil {
return err
}
@@ -1106,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
fs.Debugf(f, "list recursively from '%s'", dir)
// we check if the source FS supports ListR
// if it does, we'll use that to get all the entries, cache them and return
do := f.Fs.Features().ListR
if do != nil {
return do(dir, func(entries fs.DirEntries) error {
return do(ctx, dir, func(entries fs.DirEntries) error {
// we got called back with a set of entries so let's cache them and call the original callback
for _, entry := range entries {
switch o := entry.(type) {
case fs.Object:
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
case fs.Directory:
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
default:
return errors.Errorf("Unknown object type %T", entry)
}
@@ -1133,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
// if we're here, we're gonna do a standard recursive traversal and cache everything
list := walk.NewListRHelper(callback)
err = f.recurse(dir, list)
err = f.recurse(ctx, dir, list)
if err != nil {
return err
}
@@ -1142,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
}
// Mkdir makes the directory (container, bucket)
func (f *Fs) Mkdir(dir string) error {
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
fs.Debugf(f, "mkdir '%s'", dir)
err := f.Fs.Mkdir(dir)
err := f.Fs.Mkdir(ctx, dir)
if err != nil {
return err
}
@@ -1172,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
}
// Rmdir removes the directory (container, bucket) if empty
func (f *Fs) Rmdir(dir string) error {
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
fs.Debugf(f, "rmdir '%s'", dir)
if f.opt.TempWritePath != "" {
@@ -1182,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
_, err := f.UnWrap().List(dir)
_, err := f.UnWrap().List(ctx, dir)
if err == nil {
err := f.Fs.Rmdir(dir)
err := f.Fs.Rmdir(ctx, dir)
if err != nil {
return err
}
@@ -1192,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
}
var queuedEntries []*Object
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo)
co := ObjectFromOriginal(ctx, f, oo)
queuedEntries = append(queuedEntries, co)
}
}
@@ -1212,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
}
}
} else {
err := f.Fs.Rmdir(dir)
err := f.Fs.Rmdir(ctx, dir)
if err != nil {
return err
}
@@ -1243,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
// DirMove moves src, srcRemote to this remote at dstRemote
// using server side move operations.
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
do := f.Fs.Features().DirMove
@@ -1265,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
f.backgroundRunner.pause()
defer f.backgroundRunner.play()
_, errInWrap := srcFs.UnWrap().List(srcRemote)
_, errInTemp := f.tempFs.List(srcRemote)
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
_, errInTemp := f.tempFs.List(ctx, srcRemote)
// not found in either fs
if errInWrap != nil && errInTemp != nil {
return fs.ErrorDirNotFound
@@ -1275,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
// we check if the source exists on the remote and make the same move on it too if it does
// otherwise, we skip this step
if errInWrap == nil {
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil {
return err
}
@@ -1288,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
}
var queuedEntries []*Object
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
for _, o := range entries {
if oo, ok := o.(fs.Object); ok {
co := ObjectFromOriginal(f, oo)
co := ObjectFromOriginal(ctx, f, oo)
queuedEntries = append(queuedEntries, co)
if co.tempFileStartedUpload() {
fs.Errorf(co, "can't move - upload has already started. need to finish that")
@@ -1312,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
return fs.ErrorCantDirMove
}
err = do(f.tempFs, srcRemote, dstRemote)
err = do(ctx, f.tempFs, srcRemote, dstRemote)
if err != nil {
return err
}
err = f.cache.ReconcileTempUploads(f)
err = f.cache.ReconcileTempUploads(ctx, f)
if err != nil {
return err
}
} else {
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
if err != nil {
return err
}
@@ -1427,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
}
}
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
// put in to the remote path
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
var err error
var obj fs.Object
@@ -1441,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
_ = f.cache.ExpireDir(parentCd)
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
obj, err = f.tempFs.Put(in, src, options...)
obj, err = f.tempFs.Put(ctx, in, src, options...)
if err != nil {
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
return nil, err
@@ -1456,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
// if cache writes is enabled write it first through cache
} else if f.opt.StoreWrites {
f.cacheReader(in, src, func(inn io.Reader) {
obj, err = put(inn, src, options...)
obj, err = put(ctx, inn, src, options...)
})
if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
}
// last option: save it directly in remote fs
} else {
obj, err = put(in, src, options...)
obj, err = put(ctx, in, src, options...)
if err == nil {
fs.Debugf(obj, "put: uploaded to remote fs")
}
@@ -1475,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
}
// cache the new file
cachedObj := ObjectFromOriginal(f, obj)
cachedObj := ObjectFromOriginal(ctx, f, obj)
// deleting cached chunks and info to be replaced with new ones
_ = f.cache.RemoveObject(cachedObj.abs())
@@ -1498,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
}
// Put in to the remote path with the modTime given of the given size
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
fs.Debugf(f, "put data at '%s'", src.Remote())
return f.put(in, src, options, f.Fs.Put)
return f.put(ctx, in, src, options, f.Fs.Put)
}
// PutUnchecked uploads the object
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutUnchecked
if do == nil {
return nil, errors.New("can't PutUnchecked")
}
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
return f.put(in, src, options, do)
return f.put(ctx, in, src, options, do)
}
// PutStream uploads the object
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
do := f.Fs.Features().PutStream
if do == nil {
return nil, errors.New("can't PutStream")
}
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
return f.put(in, src, options, do)
return f.put(ctx, in, src, options, do)
}
// Copy src to this remote using server side copy operations.
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
do := f.Fs.Features().Copy
@@ -1544,7 +1544,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantCopy
}
// refresh from source or abort
if err := srcObj.refreshFromSource(false); err != nil {
if err := srcObj.refreshFromSource(ctx, false); err != nil {
fs.Errorf(f, "can't copy %v - %v", src, err)
return nil, fs.ErrorCantCopy
}
@@ -1563,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
}
obj, err := do(srcObj.Object, remote)
obj, err := do(ctx, srcObj.Object, remote)
if err != nil {
fs.Errorf(srcObj, "error moving in cache: %v", err)
return nil, err
@@ -1571,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(obj, "copy: file copied")
// persist new
co := ObjectFromOriginal(f, obj).persist()
co := ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(co, "copy: added to cache")
// expire the destination path
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
@@ -1598,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
}
// Move src to this remote using server side move operations.
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
// if source fs doesn't support move abort
@@ -1619,7 +1619,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
return nil, fs.ErrorCantMove
}
// refresh from source or abort
if err := srcObj.refreshFromSource(false); err != nil {
if err := srcObj.refreshFromSource(ctx, false); err != nil {
fs.Errorf(f, "can't move %v - %v", src, err)
return nil, fs.ErrorCantMove
}
@@ -1655,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
}
obj, err := do(srcObj.Object, remote)
obj, err := do(ctx, srcObj.Object, remote)
if err != nil {
fs.Errorf(srcObj, "error moving: %v", err)
return nil, err
@@ -1680,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
// advertise to ChangeNotify if wrapped doesn't do that
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
// persist new
cachedObj := ObjectFromOriginal(f, obj).persist()
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
fs.Debugf(cachedObj, "move: added to cache")
// expire new parent
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
@@ -1702,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
}
// Purge all files in the root and the root directory
func (f *Fs) Purge() error {
func (f *Fs) Purge(ctx context.Context) error {
fs.Infof(f, "purging cache")
f.cache.Purge()
@@ -1711,7 +1711,7 @@ func (f *Fs) Purge() error {
return nil
}
err := do()
err := do(ctx)
if err != nil {
return err
}
@@ -1720,7 +1720,7 @@ func (f *Fs) Purge() error {
}
// CleanUp the trash in the Fs
func (f *Fs) CleanUp() error {
func (f *Fs) CleanUp(ctx context.Context) error {
f.CleanUpCache(false)
do := f.Fs.Features().CleanUp
@@ -1728,16 +1728,16 @@ func (f *Fs) CleanUp() error {
return nil
}
return do()
return do(ctx)
}
// About gets quota information from the Fs
func (f *Fs) About() (*fs.Usage, error) {
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
do := f.Fs.Features().About
if do == nil {
return nil, errors.New("About not supported")
}
return do()
return do(ctx)
}
// Stats returns stats about the cache storage