mirror of
https://github.com/rclone/rclone.git
synced 2025-08-19 09:52:05 +02:00
Fix excessive retries missing --max-duration timeout - fixes #4504
This change checks the context whenever rclone might retry, and doesn't retry if the current context has an error. This fixes the pathological behaviour of `--max-duration` refusing to exit because all the context deadline exceeded errors were being retried. This unfortunately meant changing the shouldRetry logic in every backend and doing a lot of context propagation. See: https://forum.rclone.org/t/add-flag-to-exit-immediately-when-max-duration-reached/22723
This commit is contained in:
@@ -329,7 +329,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
}
|
||||
|
||||
// shouldRetry determines whether a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
func shouldRetry(ctx context.Context, err error) (again bool, errOut error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
@@ -455,7 +458,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
newRoot := path.Dir(f.root)
|
||||
@@ -521,7 +524,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -624,7 +627,7 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -750,7 +753,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
@@ -785,7 +788,7 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
@@ -802,7 +805,7 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -848,7 +851,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
for {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
rewriteResponse, err = rewriteRequest.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -941,7 +944,7 @@ func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, er
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
@@ -1012,7 +1015,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error)
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1043,7 +1046,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1109,7 +1112,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1124,7 +1127,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user