dropbox: fix return status when full to be fatal error

This will stop the sync, but won't stop a mount.

Fixes #7334
This commit is contained in:
Nick Craig-Wood 2023-09-24 17:33:06 +01:00
parent 2d1c2b1f76
commit 3fd5905175
2 changed files with 28 additions and 23 deletions

View File

@ -11,7 +11,6 @@ import (
"fmt" "fmt"
"github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files" "github.com/dropbox/dropbox-sdk-go-unofficial/v6/dropbox/files"
"github.com/rclone/rclone/fs/fserrors"
) )
// finishBatch commits the batch, returning a batch status to poll or maybe complete // finishBatch commits the batch, returning a batch status to poll or maybe complete
@ -21,14 +20,10 @@ func (f *Fs) finishBatch(ctx context.Context, items []*files.UploadSessionFinish
} }
err = f.pacer.Call(func() (bool, error) { err = f.pacer.Call(func() (bool, error) {
complete, err = f.srv.UploadSessionFinishBatchV2(arg) complete, err = f.srv.UploadSessionFinishBatchV2(arg)
// If error is insufficient space then don't retry if retry, err := shouldRetryExclude(ctx, err); !retry {
if e, ok := err.(files.UploadSessionFinishAPIError); ok { return retry, err
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything // after the first chunk is uploaded, we retry everything except the excluded errors
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {

View File

@ -316,32 +316,46 @@ func (f *Fs) Features() *fs.Features {
return f.features return f.features
} }
// shouldRetry returns a boolean as to whether this err deserves to be // Some specific errors which should be excluded from retries
// retried. It returns the err as a convenience func shouldRetryExclude(ctx context.Context, err error) (bool, error) {
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
if err == nil { if err == nil {
return false, err return false, err
} }
errString := err.Error() if fserrors.ContextError(ctx, &err) {
return false, err
}
// First check for specific errors // First check for specific errors
//
// These come back from the SDK in a whole host of different
// error types, but there doesn't seem to be a consistent way
// of reading the error cause, so here we just check using the
// error string which isn't perfect but does the job.
errString := err.Error()
if strings.Contains(errString, "insufficient_space") { if strings.Contains(errString, "insufficient_space") {
return false, fserrors.FatalError(err) return false, fserrors.FatalError(err)
} else if strings.Contains(errString, "malformed_path") { } else if strings.Contains(errString, "malformed_path") {
return false, fserrors.NoRetryError(err) return false, fserrors.NoRetryError(err)
} }
return true, err
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if retry, err := shouldRetryExclude(ctx, err); !retry {
return retry, err
}
// Then handle any official Retry-After header from Dropbox's SDK // Then handle any official Retry-After header from Dropbox's SDK
switch e := err.(type) { switch e := err.(type) {
case auth.RateLimitAPIError: case auth.RateLimitAPIError:
if e.RateLimitError.RetryAfter > 0 { if e.RateLimitError.RetryAfter > 0 {
fs.Logf(errString, "Too many requests or write operations. Trying again in %d seconds.", e.RateLimitError.RetryAfter) fs.Logf(nil, "Error %v. Too many requests or write operations. Trying again in %d seconds.", err, e.RateLimitError.RetryAfter)
err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second) err = pacer.RetryAfterError(err, time.Duration(e.RateLimitError.RetryAfter)*time.Second)
} }
return true, err return true, err
} }
// Keep old behavior for backward compatibility // Keep old behavior for backward compatibility
errString := err.Error()
if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" { if strings.Contains(errString, "too_many_write_operations") || strings.Contains(errString, "too_many_requests") || errString == "" {
return true, err return true, err
} }
@ -1692,14 +1706,10 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
err = o.fs.pacer.Call(func() (bool, error) { err = o.fs.pacer.Call(func() (bool, error) {
entry, err = o.fs.srv.UploadSessionFinish(args, nil) entry, err = o.fs.srv.UploadSessionFinish(args, nil)
// If error is insufficient space then don't retry if retry, err := shouldRetryExclude(ctx, err); !retry {
if e, ok := err.(files.UploadSessionFinishAPIError); ok { return retry, err
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
err = fserrors.NoRetryError(err)
return false, err
}
} }
// after the first chunk is uploaded, we retry everything // after the first chunk is uploaded, we retry everything except the excluded errors
return err != nil, err return err != nil, err
}) })
if err != nil { if err != nil {