mirror of
https://github.com/rclone/rclone.git
synced 2025-01-27 00:32:02 +01:00
Implement --low-level-retries flag - fixes #266
This commit is contained in:
parent
3890105cdc
commit
867a26fe4f
@ -363,6 +363,22 @@ Log all of rclone's output to FILE. This is not active by default.
|
|||||||
This can be useful for tracking down problems with syncs in
|
This can be useful for tracking down problems with syncs in
|
||||||
combination with the `-v` flag.
|
combination with the `-v` flag.
|
||||||
|
|
||||||
|
### --low-level-retries NUMBER ###
|
||||||
|
|
||||||
|
This controls the number of low level retries rclone does.
|
||||||
|
|
||||||
|
A low level retry is used to retry a failing operation - typically one
|
||||||
|
HTTP request. This might be uploading a chunk of a big file for
|
||||||
|
example. You will see low level retries in the log with the `-v`
|
||||||
|
flag.
|
||||||
|
|
||||||
|
This shouldn't need to be changed from the default in normal
|
||||||
|
operations, however if you get a lot of low level retries you may wish
|
||||||
|
to reduce the value so rclone moves on to a high level retry (see the
|
||||||
|
`--retries` flag) quicker.
|
||||||
|
|
||||||
|
Disable low level retries with `--low-level-retries 1`.
|
||||||
|
|
||||||
### --modify-window=TIME ###
|
### --modify-window=TIME ###
|
||||||
|
|
||||||
When checking whether a file has been modified, this is the maximum
|
When checking whether a file has been modified, this is the maximum
|
||||||
|
43
fs/config.go
43
fs/config.go
@ -61,26 +61,27 @@ var (
|
|||||||
// Config is the global config
|
// Config is the global config
|
||||||
Config = &ConfigInfo{}
|
Config = &ConfigInfo{}
|
||||||
// Flags
|
// Flags
|
||||||
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
verbose = pflag.BoolP("verbose", "v", false, "Print lots more stuff")
|
||||||
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
quiet = pflag.BoolP("quiet", "q", false, "Print as little stuff as possible")
|
||||||
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
modifyWindow = pflag.DurationP("modify-window", "", time.Nanosecond, "Max time diff to be considered the same")
|
||||||
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
checkers = pflag.IntP("checkers", "", 8, "Number of checkers to run in parallel.")
|
||||||
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
transfers = pflag.IntP("transfers", "", 4, "Number of file transfers to run in parallel.")
|
||||||
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
configFile = pflag.StringP("config", "", ConfigPath, "Config file.")
|
||||||
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
checkSum = pflag.BoolP("checksum", "c", false, "Skip based on checksum & size, not mod-time & size")
|
||||||
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
sizeOnly = pflag.BoolP("size-only", "", false, "Skip based on size only, not mod-time or checksum")
|
||||||
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
|
ignoreExisting = pflag.BoolP("ignore-existing", "", false, "Skip all files that exist on destination")
|
||||||
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
dryRun = pflag.BoolP("dry-run", "n", false, "Do a trial run with no permanent changes")
|
||||||
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
connectTimeout = pflag.DurationP("contimeout", "", 60*time.Second, "Connect timeout")
|
||||||
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
timeout = pflag.DurationP("timeout", "", 5*60*time.Second, "IO idle timeout")
|
||||||
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
dumpHeaders = pflag.BoolP("dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
|
||||||
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
dumpBodies = pflag.BoolP("dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
|
||||||
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
skipVerify = pflag.BoolP("no-check-certificate", "", false, "Do not verify the server SSL certificate. Insecure.")
|
||||||
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
|
AskPassword = pflag.BoolP("ask-password", "", true, "Allow prompt for password for encrypted configuration.")
|
||||||
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
deleteBefore = pflag.BoolP("delete-before", "", false, "When synchronizing, delete files on destination before transfering")
|
||||||
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
deleteDuring = pflag.BoolP("delete-during", "", false, "When synchronizing, delete files during transfer (default)")
|
||||||
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
deleteAfter = pflag.BoolP("delete-after", "", false, "When synchronizing, delete files on destination after transfering")
|
||||||
bwLimit SizeSuffix
|
lowLevelRetries = pflag.IntP("low-level-retries", "", 10, "Number of low level retries to do.")
|
||||||
|
bwLimit SizeSuffix
|
||||||
|
|
||||||
// Key to use for password en/decryption.
|
// Key to use for password en/decryption.
|
||||||
// When nil, no encryption will be used for saving.
|
// When nil, no encryption will be used for saving.
|
||||||
@ -197,6 +198,7 @@ type ConfigInfo struct {
|
|||||||
DeleteBefore bool // Delete before checking
|
DeleteBefore bool // Delete before checking
|
||||||
DeleteDuring bool // Delete during checking/transfer
|
DeleteDuring bool // Delete during checking/transfer
|
||||||
DeleteAfter bool // Delete after successful transfer.
|
DeleteAfter bool // Delete after successful transfer.
|
||||||
|
LowLevelRetries int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transport returns an http.RoundTripper with the correct timeouts
|
// Transport returns an http.RoundTripper with the correct timeouts
|
||||||
@ -285,6 +287,7 @@ func LoadConfig() {
|
|||||||
Config.DumpHeaders = *dumpHeaders
|
Config.DumpHeaders = *dumpHeaders
|
||||||
Config.DumpBodies = *dumpBodies
|
Config.DumpBodies = *dumpBodies
|
||||||
Config.InsecureSkipVerify = *skipVerify
|
Config.InsecureSkipVerify = *skipVerify
|
||||||
|
Config.LowLevelRetries = *lowLevelRetries
|
||||||
|
|
||||||
ConfigPath = *configFile
|
ConfigPath = *configFile
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ func removeFailedCopy(dst Object) bool {
|
|||||||
// call Copy() with dst nil on a pre-existing file then some filing
|
// call Copy() with dst nil on a pre-existing file then some filing
|
||||||
// systems (eg Drive) may duplicate the file.
|
// systems (eg Drive) may duplicate the file.
|
||||||
func Copy(f Fs, dst, src Object) {
|
func Copy(f Fs, dst, src Object) {
|
||||||
const maxTries = 10
|
maxTries := Config.LowLevelRetries
|
||||||
tries := 0
|
tries := 0
|
||||||
doUpdate := dst != nil
|
doUpdate := dst != nil
|
||||||
var err, inErr error
|
var err, inErr error
|
||||||
@ -231,7 +231,7 @@ tryAgain:
|
|||||||
// Retry if err returned a retry error
|
// Retry if err returned a retry error
|
||||||
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
|
if r, ok := err.(Retry); ok && r.Retry() && tries < maxTries {
|
||||||
tries++
|
tries++
|
||||||
Log(src, "Received error: %v - retrying %d/%d", err, tries, maxTries)
|
Log(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
|
||||||
if removeFailedCopy(dst) {
|
if removeFailedCopy(dst) {
|
||||||
// If we removed dst, then nil it out and note we are not updating
|
// If we removed dst, then nil it out and note we are not updating
|
||||||
dst = nil
|
dst = nil
|
||||||
|
@ -39,12 +39,13 @@ import (
|
|||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
|
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
|
||||||
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
|
SubDir = flag.Bool("subdir", false, "Set to test with a sub directory")
|
||||||
Verbose = flag.Bool("verbose", false, "Set to enable logging")
|
Verbose = flag.Bool("verbose", false, "Set to enable logging")
|
||||||
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
|
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
|
||||||
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
|
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
|
||||||
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
|
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
|
||||||
|
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Some times used in the tests
|
// Some times used in the tests
|
||||||
@ -103,7 +104,7 @@ func newRun() *Run {
|
|||||||
fs.Config.Quiet = !*Verbose
|
fs.Config.Quiet = !*Verbose
|
||||||
fs.Config.DumpHeaders = *DumpHeaders
|
fs.Config.DumpHeaders = *DumpHeaders
|
||||||
fs.Config.DumpBodies = *DumpBodies
|
fs.Config.DumpBodies = *DumpBodies
|
||||||
|
fs.Config.LowLevelRetries = *LowLevelRetries
|
||||||
var err error
|
var err error
|
||||||
r.fremote, r.cleanRemote, err = fstest.RandomRemote(*RemoteName, *SubDir)
|
r.fremote, r.cleanRemote, err = fstest.RandomRemote(*RemoteName, *SubDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -61,7 +61,7 @@ func New() *Pacer {
|
|||||||
minSleep: 10 * time.Millisecond,
|
minSleep: 10 * time.Millisecond,
|
||||||
maxSleep: 2 * time.Second,
|
maxSleep: 2 * time.Second,
|
||||||
decayConstant: 2,
|
decayConstant: 2,
|
||||||
retries: 10,
|
retries: fs.Config.LowLevelRetries,
|
||||||
pacer: make(chan struct{}, 1),
|
pacer: make(chan struct{}, 1),
|
||||||
}
|
}
|
||||||
p.sleepTime = p.minSleep
|
p.sleepTime = p.minSleep
|
||||||
@ -231,7 +231,7 @@ func (p *Pacer) acdPacer(retry bool) {
|
|||||||
if p.sleepTime < p.minSleep {
|
if p.sleepTime < p.minSleep {
|
||||||
p.sleepTime = p.minSleep
|
p.sleepTime = p.minSleep
|
||||||
}
|
}
|
||||||
fs.Debug("pacer", "Rate limited, sleeping for %v (%d retries)", p.sleepTime, consecutiveRetries)
|
fs.Debug("pacer", "Rate limited, sleeping for %v (%d consecutive low level retries)", p.sleepTime, consecutiveRetries)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,13 +256,14 @@ func (p *Pacer) endCall(retry bool) {
|
|||||||
// call implements Call but with settable retries
|
// call implements Call but with settable retries
|
||||||
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
func (p *Pacer) call(fn Paced, retries int) (err error) {
|
||||||
var retry bool
|
var retry bool
|
||||||
for i := 0; i < retries; i++ {
|
for i := 1; i <= retries; i++ {
|
||||||
p.beginCall()
|
p.beginCall()
|
||||||
retry, err = fn()
|
retry, err = fn()
|
||||||
p.endCall(retry)
|
p.endCall(retry)
|
||||||
if !retry {
|
if !retry {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
fs.Debug("pacer", "low level retry %d/%d", i, retries)
|
||||||
}
|
}
|
||||||
if retry {
|
if retry {
|
||||||
err = fs.RetryError(err)
|
err = fs.RetryError(err)
|
||||||
|
Loading…
Reference in New Issue
Block a user