rclone: implement exit codes - #1136

This commit is contained in:
ishuah 2017-11-15 08:32:00 +03:00
parent 2423fa40e2
commit aec2265be0
12 changed files with 140 additions and 66 deletions

View File

@ -16,6 +16,7 @@ import (
"runtime/pprof"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@ -31,6 +32,23 @@ var (
dataRateUnit = fs.StringP("stats-unit", "", "bytes", "Show data rate in stats as either 'bits' or 'bytes'/s")
version bool
retries = fs.IntP("retries", "", 3, "Retry operations this many times if they fail")
// Errors
errorCommandNotFound = errors.New("command not found")
errorUncategorized = errors.New("uncategorized error")
errorNotEnoughArguments = errors.New("not enough arguments")
errorTooManyArguents = errors.New("too many arguments")
errorUsageError = errors.New("usage error")
)
const (
exitCodeSuccess = iota
exitCodeUsageError
exitCodeUncategorizedError
exitCodeDirNotFound
exitCodeFileNotFound
exitCodeRetryError
exitCodeNoRetryError
exitCodeFatalError
)
// Root is the main rclone command
@ -86,11 +104,11 @@ and configuration walkthroughs.
func runRoot(cmd *cobra.Command, args []string) {
if version {
ShowVersion()
os.Exit(0)
resolveExitCode(nil)
} else {
_ = Root.Usage()
fmt.Fprintf(os.Stderr, "Command not found.\n")
os.Exit(1)
resolveExitCode(errorCommandNotFound)
}
}
@ -113,7 +131,7 @@ func ShowVersion() {
func newFsFile(remote string) (fs.Fs, string) {
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
f, err := fsInfo.NewFs(configName, fsPath)
@ -123,7 +141,7 @@ func newFsFile(remote string) (fs.Fs, string) {
case nil:
return f, ""
default:
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return nil, ""
@ -138,13 +156,14 @@ func newFsSrc(remote string) (fs.Fs, string) {
f, fileName := newFsFile(remote)
if fileName != "" {
if !fs.Config.Filter.InActive() {
fs.Stats.Error()
log.Fatalf("Can't limit to single files when using filters: %v", remote)
err := errors.Errorf("Can't limit to single files when using filters: %v", remote)
fs.Stats.Error(err)
log.Fatalf(err.Error())
}
// Limit transfers to this file
err := fs.Config.Filter.AddFile(fileName)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
}
// Set --no-traverse as only one file
@ -159,7 +178,7 @@ func newFsSrc(remote string) (fs.Fs, string) {
func newFsDst(remote string) fs.Fs {
f, err := fs.NewFs(remote)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatalf("Failed to create file system for %q: %v", remote, err)
}
return f
@ -271,14 +290,15 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
close(stopStats)
}
if err != nil {
log.Fatalf("Failed to %s: %v", cmd.Name(), err)
log.Printf("Failed to %s: %v", cmd.Name(), err)
resolveExitCode(err)
}
if showStats && (fs.Stats.Errored() || *statsInterval > 0) {
fs.Stats.Log()
}
fs.Debugf(nil, "Go routines at exit %d\n", runtime.NumGoroutine())
if fs.Stats.Errored() {
os.Exit(1)
resolveExitCode(fs.Stats.GetLastError())
}
}
@ -287,11 +307,13 @@ func CheckArgs(MinArgs, MaxArgs int, cmd *cobra.Command, args []string) {
if len(args) < MinArgs {
_ = cmd.Usage()
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments mininum\n", cmd.Name(), MinArgs)
os.Exit(1)
// os.Exit(1)
resolveExitCode(errorNotEnoughArguments)
} else if len(args) > MaxArgs {
_ = cmd.Usage()
fmt.Fprintf(os.Stderr, "Command %s needs %d arguments maximum\n", cmd.Name(), MaxArgs)
os.Exit(1)
// os.Exit(1)
resolveExitCode(errorTooManyArguents)
}
}
@ -333,12 +355,12 @@ func initConfig() {
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
f, err := os.Create(*cpuProfile)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatal(err)
}
err = pprof.StartCPUProfile(f)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatal(err)
}
AtExit(func() {
@ -352,17 +374,17 @@ func initConfig() {
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
f, err := os.Create(*memProfile)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatal(err)
}
err = pprof.WriteHeapProfile(f)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatal(err)
}
err = f.Close()
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
log.Fatal(err)
}
})
@ -375,3 +397,28 @@ func initConfig() {
fs.Config.DataRateUnit = *dataRateUnit
}
}
func resolveExitCode(err error) {
if err == nil {
os.Exit(exitCodeSuccess)
}
err = errors.Cause(err)
switch {
case err == fs.ErrorDirNotFound:
os.Exit(exitCodeDirNotFound)
case err == fs.ErrorObjectNotFound:
os.Exit(exitCodeFileNotFound)
case err == errorUncategorized:
os.Exit(exitCodeUncategorizedError)
case fs.ShouldRetry(err):
os.Exit(exitCodeRetryError)
case fs.IsNoRetryError(err):
os.Exit(exitCodeNoRetryError)
case fs.IsFatalError(err):
os.Exit(exitCodeFatalError)
default:
os.Exit(exitCodeUsageError)
}
}

View File

@ -72,7 +72,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
underlyingDst := cryptDst.UnWrap()
underlyingHash, err := underlyingDst.Hash(hashType)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
fs.Errorf(dst, "Error reading hash from underlying %v: %v", underlyingDst, err)
return true, false
}
@ -81,7 +81,7 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
}
cryptHash, err := fcrypt.ComputeHash(cryptDst, src, hashType)
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
fs.Errorf(dst, "Error computing hash: %v", err)
return true, false
}
@ -89,8 +89,9 @@ func cryptCheck(fdst, fsrc fs.Fs) error {
return false, true
}
if cryptHash != underlyingHash {
fs.Stats.Error()
fs.Errorf(src, "hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
err = errors.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
fs.Stats.Error(err)
fs.Errorf(src, err.Error())
return true, false
}
fs.Debugf(src, "OK")

View File

@ -27,7 +27,7 @@ var commandDefintion = &cobra.Command{
cmd.Run(false, false, command, func() error {
return fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(path string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
fs.Errorf(path, "error listing: %v", err)
return nil
}

View File

@ -85,7 +85,7 @@ can be processed line by line as each item is written one to a line.
first := true
err := fs.Walk(fsrc, "", false, fs.ConfigMaxDepth(recurse), func(dirPath string, entries fs.DirEntries, err error) error {
if err != nil {
fs.Stats.Error()
fs.Stats.Error(err)
fs.Errorf(dirPath, "error listing: %v", err)
return nil
}

View File

@ -142,7 +142,7 @@ type indexData struct {
// error returns an http.StatusInternalServerError and logs the error
func internalError(what interface{}, w http.ResponseWriter, text string, err error) {
fs.Stats.Error()
fs.Stats.Error(err)
fs.Errorf(what, "%s: %v", text, err)
http.Error(w, text+".", http.StatusInternalServerError)
}

View File

@ -1023,6 +1023,16 @@ when starting a retry so the user can see that any previous error
messages may not be valid after the retry. If rclone has done a retry
it will log a high priority message if the retry was successful.
### List of exit codes ###
* `0` - success
* `1` - Syntax or usage error
* `2` - Error not otherwise categorised
* `3` - Directory not found
* `4` - File not found
* `5` - Temporary error (one that more retries might fix) (Retry errors)
* `6` - Less serious errors (like 461 errors from dropbox) (NoRetry errors)
* `7` - Fatal error (one that more retries won't fix, like account suspended) (Fatal errors)
Environment Variables
---------------------

View File

@ -169,6 +169,7 @@ type StatsInfo struct {
lock sync.RWMutex
bytes int64
errors int64
lastError error
checks int64
checking stringSet
transfers int64
@ -251,6 +252,13 @@ func (s *StatsInfo) GetErrors() int64 {
return s.errors
}
// GetLastError returns the lastError
func (s *StatsInfo) GetLastError() error {
s.lock.RLock()
defer s.lock.RUnlock()
return s.lastError
}
// ResetCounters sets the counters (bytes, checks, errors, transfers) to 0
func (s *StatsInfo) ResetCounters() {
s.lock.RLock()
@ -275,11 +283,12 @@ func (s *StatsInfo) Errored() bool {
return s.errors != 0
}
// Error adds a single error into the stats
func (s *StatsInfo) Error() {
// Error adds a single error into the stats and assigns lastError
func (s *StatsInfo) Error(err error) {
s.lock.Lock()
defer s.lock.Unlock()
s.errors++
s.lastError = err
}
// Checking adds a check into the stats

View File

@ -342,14 +342,14 @@ func (m *march) processJob(job listDirJob) (jobs []listDirJob) {
wg.Wait()
if srcListErr != nil {
Errorf(job.srcRemote, "error reading source directory: %v", srcListErr)
Stats.Error()
Stats.Error(srcListErr)
return nil
}
if dstListErr == ErrorDirNotFound {
// Copy the stuff anyway
} else if dstListErr != nil {
Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr)
Stats.Error()
Stats.Error(dstListErr)
return nil
}

View File

@ -73,7 +73,7 @@ func CheckHashes(src ObjectInfo, dst Object) (equal bool, hash HashType, err err
hash = common.GetOne()
srcHash, err := src.Hash(hash)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(src, "Failed to calculate src hash: %v", err)
return false, hash, err
}
@ -82,7 +82,7 @@ func CheckHashes(src ObjectInfo, dst Object) (equal bool, hash HashType, err err
}
dstHash, err := dst.Hash(hash)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(dst, "Failed to calculate dst hash: %v", err)
return false, hash, err
}
@ -199,7 +199,7 @@ func equal(src ObjectInfo, dst Object, sizeOnly, checkSum bool) bool {
}
return false
} else if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(dst, "Failed to set modification time: %v", err)
} else {
Infof(src, "Updated modification time in destination")
@ -345,16 +345,16 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
break
}
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(src, "Failed to copy: %v", err)
return err
}
// Verify sizes are the same after transfer
if !Config.IgnoreSize && src.Size() != dst.Size() {
Stats.Error()
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
Errorf(dst, "%v", err)
Stats.Error(err)
removeFailedCopy(dst)
return err
}
@ -366,18 +366,18 @@ func Copy(f Fs, dst Object, remote string, src Object) (err error) {
var srcSum string
srcSum, err = src.Hash(hashType)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(src, "Failed to read src hash: %v", err)
} else if srcSum != "" {
var dstSum string
dstSum, err = dst.Hash(hashType)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(dst, "Failed to read hash: %v", err)
} else if !Config.IgnoreChecksum && !HashEquals(srcSum, dstSum) {
Stats.Error()
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
Errorf(dst, "%v", err)
Stats.Error(err)
removeFailedCopy(dst)
return err
}
@ -413,7 +413,7 @@ func Move(fdst Fs, dst Object, remote string, src Object) (err error) {
case ErrorCantMove:
Debugf(src, "Can't move, switching to copy")
default:
Stats.Error()
Stats.Error(err)
Errorf(src, "Couldn't move: %v", err)
return err
}
@ -464,7 +464,7 @@ func deleteFileWithBackupDir(dst Object, backupDir Fs) (err error) {
err = dst.Remove()
}
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(dst, "Couldn't %s: %v", action, err)
} else if !Config.DryRun {
Infof(dst, actioned)
@ -730,8 +730,9 @@ func checkIdentical(dst, src Object) (differ bool, noHash bool) {
return false, true
}
if !same {
Stats.Error()
Errorf(src, "%v differ", hash)
err = errors.Errorf("%v differ", hash)
Errorf(src, "%v", err)
Stats.Error(err)
return true, false
}
return false, false
@ -755,8 +756,9 @@ type checkMarch struct {
func (c *checkMarch) DstOnly(dst DirEntry) (recurse bool) {
switch dst.(type) {
case Object:
Stats.Error()
Errorf(dst, "File not in %v", c.fsrc)
err := errors.Errorf("File not in %v", c.fsrc)
Errorf(dst, "%v", err)
Stats.Error(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
case Directory:
@ -772,8 +774,9 @@ func (c *checkMarch) DstOnly(dst DirEntry) (recurse bool) {
func (c *checkMarch) SrcOnly(src DirEntry) (recurse bool) {
switch src.(type) {
case Object:
Stats.Error()
Errorf(src, "File not in %v", c.fdst)
err := errors.Errorf("File not in %v", c.fdst)
Errorf(src, "%v", err)
Stats.Error(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
case Directory:
@ -790,8 +793,9 @@ func (c *checkMarch) checkIdentical(dst, src Object) (differ bool, noHash bool)
Stats.Checking(src.Remote())
defer Stats.DoneChecking(src.Remote())
if !Config.IgnoreSize && src.Size() != dst.Size() {
Stats.Error()
Errorf(src, "Sizes differ")
err := errors.Errorf("Sizes differ")
Errorf(src, "%v", err)
Stats.Error(err)
return true, false
}
if Config.SizeOnly {
@ -816,8 +820,9 @@ func (c *checkMarch) Match(dst, src DirEntry) (recurse bool) {
atomic.AddInt32(&c.noHashes, 1)
}
} else {
Stats.Error()
Errorf(src, "is file on %v but directory on %v", c.fsrc, c.fdst)
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
Errorf(src, "%v", err)
Stats.Error(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.dstFilesMissing, 1)
}
@ -827,8 +832,9 @@ func (c *checkMarch) Match(dst, src DirEntry) (recurse bool) {
if ok {
return true
}
Stats.Error()
Errorf(dst, "is file on %v but directory on %v", c.fdst, c.fsrc)
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
Errorf(dst, "%v", err)
Stats.Error(err)
atomic.AddInt32(&c.differences, 1)
atomic.AddInt32(&c.srcFilesMissing, 1)
@ -952,7 +958,7 @@ func CheckDownload(fdst, fsrc Fs) error {
check := func(a, b Object) (differ bool, noHash bool) {
differ, err := CheckIdentical(a, b)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(a, "Failed to download: %v", err)
return true, true
}
@ -1108,7 +1114,7 @@ func Mkdir(f Fs, dir string) error {
Debugf(logDirName(f, dir), "Making directory")
err := f.Mkdir(dir)
if err != nil {
Stats.Error()
Stats.Error(err)
return err
}
return nil
@ -1129,7 +1135,7 @@ func TryRmdir(f Fs, dir string) error {
func Rmdir(f Fs, dir string) error {
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
Stats.Error(err)
return err
}
return err
@ -1159,7 +1165,7 @@ func Purge(f Fs) error {
err = Rmdirs(f, "")
}
if err != nil {
Stats.Error()
Stats.Error(err)
return err
}
return nil
@ -1198,7 +1204,7 @@ func dedupeRename(remote string, objs []Object) {
if !Config.DryRun {
newObj, err := doMove(o, newName)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(o, "Failed to rename: %v", err)
continue
}
@ -1469,8 +1475,9 @@ func listToChan(f Fs) ObjectsChan {
if err == ErrorDirNotFound {
return nil
}
Stats.Error()
Errorf(nil, "Failed to list: %v", err)
err = errors.Errorf("Failed to list: %v", err)
Stats.Error(err)
Errorf(nil, "%v", err)
return nil
}
entries.ForObject(func(obj Object) {
@ -1535,7 +1542,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
}
in, err := o.Open(options...)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(o, "Failed to open: %v", err)
return
}
@ -1550,7 +1557,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
defer func() {
err = in.Close()
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(o, "Failed to close: %v", err)
}
}()
@ -1559,7 +1566,7 @@ func Cat(f Fs, w io.Writer, offset, count int64) error {
defer mu.Unlock()
_, err = io.Copy(w, in)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(o, "Failed to send to output: %v", err)
}
})
@ -1586,8 +1593,8 @@ func Rcat(fdst Fs, dstFileName string, in0 io.ReadCloser, modTime time.Time) (ds
compare := func(dst Object) error {
src := NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
if !Equal(src, dst) {
Stats.Error()
err = errors.Errorf("corrupted on transfer")
Stats.Error(err)
Errorf(dst, "%v", err)
return err
}
@ -1659,7 +1666,7 @@ func Rmdirs(f Fs, dir string) error {
dirEmpty[""] = true
err := Walk(f, dir, true, Config.MaxDepth, func(dirPath string, entries DirEntries, err error) error {
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(f, "Failed to list %q: %v", dirPath, err)
return nil
}
@ -1706,7 +1713,7 @@ func Rmdirs(f Fs, dir string) error {
dir := toDelete[i]
err := TryRmdir(f, dir)
if err != nil {
Stats.Error()
Stats.Error(err)
Errorf(dir, "Failed to rmdir: %v", err)
return err
}

View File

@ -885,7 +885,7 @@ func MoveDir(fdst, fsrc Fs) error {
Infof(fdst, "Server side directory move succeeded")
return nil
default:
Stats.Error()
Stats.Error(err)
Errorf(fdst, "Server side directory move failed: %v", err)
return err
}

View File

@ -580,7 +580,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) {
)
fs.Stats.ResetCounters()
fs.Stats.Error()
fs.Stats.Error(nil)
err := fs.Sync(r.Fremote, r.Flocal)
assert.Equal(t, fs.ErrorNotDeleting, err)

View File

@ -134,7 +134,7 @@ func walk(f Fs, path string, includeAll bool, maxLevel int, fn WalkFunc, listDir
// NB once we have passed entries to fn we mustn't touch it again
if err != nil && err != ErrorSkipDir {
traversing.Done()
Stats.Error()
Stats.Error(err)
Errorf(job.remote, "error listing: %v", err)
closeQuit()
// Send error to error channel if space