Implement --low-level-retries flag - fixes #266

This commit is contained in:
Nick Craig-Wood 2016-01-12 17:38:28 +00:00
parent 3890105cdc
commit 867a26fe4f
5 changed files with 53 additions and 32 deletions

View File

@ -363,6 +363,22 @@ Log all of rclone's output to FILE. This is not active by default.
This can be useful for tracking down problems with syncs in
combination with the `-v` flag.
### --low-level-retries NUMBER ###
This controls the number of low level retries rclone does.
A low level retry is used to retry a failing operation - typically one
HTTP request. This might be uploading a chunk of a big file for
example. You will see low level retries in the log with the `-v`
flag.
This shouldn't need to be changed from the default in normal
operations, however if you get a lot of low level retries you may wish
to reduce the value so rclone moves on to a high level retry (see the
`--retries` flag) quicker.
Disable low level retries with `--low-level-retries 1`.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum

View File

@ -61,26 +61,27 @@ var (
// Config is the global config
Config = &ConfigInfo{}
// Flags
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
bwLimit SizeSuffix
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
lowLevelRetries = pflag.IntP("low-level-retries", "", 10, "Number of low level retries to do.")
bwLimit SizeSuffix
// Key to use for password en/decryption.
// When nil, no encryption will be used for saving.
@ -197,6 +198,7 @@ type ConfigInfo struct {
DeleteBefore bool // Delete before checking
DeleteDuring bool // Delete during checking/transfer
DeleteAfter bool // Delete after successful transfer.
LowLevelRetries int
}
// Transport returns an http.RoundTripper with the correct timeouts
@ -285,6 +287,7 @@ func LoadConfig() {
Config.DumpHeaders = *dumpHeaders
Config.DumpBodies = *dumpBodies
Config.InsecureSkipVerify = *skipVerify
Config.LowLevelRetries = *lowLevelRetries
ConfigPath = *configFile

View File

@ -185,7 +185,7 @@ func removeFailedCopy(dst Object) bool {
// call Copy() with dst nil on a pre-existing file then some filing
// systems (eg Drive) may duplicate the file.
func Copy(f Fs, dst, src Object) {
const maxTries = 10
maxTries := Config.LowLevelRetries
tries := 0
doUpdate := dst != nil
var err, inErr error
@ -231,7 +231,7 @@ tryAgain:
// Retry if err returned a retry error
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
tries++
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
Log(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
if removeFailedCopy(dst) {
// If we removed dst, then nil it out and note we are not updating
dst = nil

View File

@ -39,12 +39,13 @@ import (
// Globals
var (
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
)
// Some times used in the tests
@ -103,7 +104,7 @@ func newRun() *Run {
fs.Config.Quiet = !*Verbose
fs.Config.DumpHeaders = *DumpHeaders
fs.Config.DumpBodies = *DumpBodies
fs.Config.LowLevelRetries = *LowLevelRetries
var err error
r.fremote, r.cleanRemote, err = fstest.RandomRemote(*RemoteName, *SubDir)
if err != nil {

View File

@ -61,7 +61,7 @@ func New() *Pacer {
minSleep: 10 * time.Millisecond,
maxSleep: 2 * time.Second,
decayConstant: 2,
retries: 10,
retries: fs.Config.LowLevelRetries,
pacer: make(chan struct{}, 1),
}
p.sleepTime = p.minSleep
@ -231,7 +231,7 @@ func (p *Pacer) acdPacer(retry bool) {
if p.sleepTime < p.minSleep {
p.sleepTime = p.minSleep
}
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
fs.Debug("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, consecutiveRetries)
}
}
@ -256,13 +256,14 @@ func (p *Pacer) endCall(retry bool) {
// call implements Call but with settable retries
func (p *Pacer) call(fn Paced, retries int) (err error) {
var retry bool
for i := 0; i < retries; i++ {
for i := 1; i <= retries; i++ {
p.beginCall()
retry, err = fn()
p.endCall(retry)
if !retry {
break
}
fs.Debug("pacer", "low level retry %d/%d", i, retries)
}
if retry {
err = fs.RetryError(err)