2018-01-12 17:30:54 +01:00
|
|
|
// Package operations does generic operations on filesystems and objects
|
|
|
|
package operations
|
2014-03-28 18:56:04 +01:00
|
|
|
|
|
|
|
import (
|
2017-02-13 11:48:26 +01:00
|
|
|
"bytes"
|
2018-04-06 20:13:27 +02:00
|
|
|
"context"
|
2018-05-13 13:15:05 +02:00
|
|
|
"encoding/csv"
|
2014-03-28 18:56:04 +01:00
|
|
|
"fmt"
|
2014-08-01 18:58:39 +02:00
|
|
|
"io"
|
2017-08-03 21:42:35 +02:00
|
|
|
"io/ioutil"
|
2015-03-01 13:38:31 +01:00
|
|
|
"path"
|
2019-02-28 12:39:32 +01:00
|
|
|
"path/filepath"
|
2016-03-05 17:10:51 +01:00
|
|
|
"sort"
|
2018-01-06 15:39:31 +01:00
|
|
|
"strconv"
|
2016-01-23 21:16:47 +01:00
|
|
|
"strings"
|
2014-03-28 18:56:04 +01:00
|
|
|
"sync"
|
2015-10-02 20:48:48 +02:00
|
|
|
"sync/atomic"
|
2017-08-03 21:42:35 +02:00
|
|
|
"time"
|
2016-01-23 21:16:47 +01:00
|
|
|
|
2016-06-12 16:06:02 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/cache"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/march"
|
|
|
|
"github.com/rclone/rclone/fs/object"
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2019-08-06 13:44:08 +02:00
|
|
|
"github.com/rclone/rclone/lib/random"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/lib/readers"
|
2019-01-15 17:43:55 +01:00
|
|
|
"golang.org/x/sync/errgroup"
|
2014-03-28 18:56:04 +01:00
|
|
|
)
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
// CheckHashes checks the two files to see if they have common
|
|
|
|
// known hash types and compares them
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
2016-01-24 19:06:57 +01:00
|
|
|
// Returns
|
2015-08-20 21:48:58 +02:00
|
|
|
//
|
2016-01-24 19:06:57 +01:00
|
|
|
// equal - which is equality of the hashes
|
|
|
|
//
|
|
|
|
// hash - the HashType. This is HashNone if either of the hashes were
|
|
|
|
// unset or a compatible hash couldn't be found.
|
|
|
|
//
|
|
|
|
// err - may return an error which will already have been logged
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
2015-08-20 21:48:58 +02:00
|
|
|
// If an error is returned it will return equal as false
|
2019-06-17 10:34:30 +02:00
|
|
|
func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) {
|
2016-01-11 13:39:33 +01:00
|
|
|
common := src.Fs().Hashes().Overlap(dst.Fs().Hashes())
|
2018-01-12 17:30:54 +01:00
|
|
|
// fs.Debugf(nil, "Shared hashes: %v", common)
|
2016-01-11 13:39:33 +01:00
|
|
|
if common.Count() == 0 {
|
2018-01-18 21:27:52 +01:00
|
|
|
return true, hash.None, nil
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
2019-08-10 11:28:26 +02:00
|
|
|
equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne())
|
|
|
|
return equal, ht, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkHashes does the work of CheckHashes but takes a hash.Type and
|
|
|
|
// returns the effective hash type used.
|
|
|
|
func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) {
|
|
|
|
// Calculate hashes in parallel
|
|
|
|
g, ctx := errgroup.WithContext(ctx)
|
|
|
|
g.Go(func() (err error) {
|
|
|
|
srcHash, err = src.Hash(ctx, ht)
|
|
|
|
if err != nil {
|
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(src, "Failed to calculate src hash: %v", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
g.Go(func() (err error) {
|
|
|
|
dstHash, err = dst.Hash(ctx, ht)
|
|
|
|
if err != nil {
|
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(dst, "Failed to calculate dst hash: %v", err)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
err = g.Wait()
|
2014-03-28 18:56:04 +01:00
|
|
|
if err != nil {
|
2019-08-10 11:28:26 +02:00
|
|
|
return false, ht, srcHash, dstHash, err
|
2015-08-20 21:48:58 +02:00
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
if srcHash == "" {
|
2019-08-10 11:28:26 +02:00
|
|
|
return true, hash.None, srcHash, dstHash, nil
|
2015-08-20 21:48:58 +02:00
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
if dstHash == "" {
|
2019-08-10 11:28:26 +02:00
|
|
|
return true, hash.None, srcHash, dstHash, nil
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2017-02-23 12:23:19 +01:00
|
|
|
if srcHash != dstHash {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs())
|
|
|
|
fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs())
|
2019-08-13 17:43:24 +02:00
|
|
|
} else {
|
|
|
|
fs.Debugf(src, "%v = %s OK", ht, srcHash)
|
2017-02-23 12:23:19 +01:00
|
|
|
}
|
2019-08-10 11:28:26 +02:00
|
|
|
return srcHash == dstHash, ht, srcHash, dstHash, nil
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Equal checks to see if the src and dst objects are equal by looking at
|
2016-01-11 13:39:33 +01:00
|
|
|
// size, mtime and hash
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
|
|
|
// If the src and dst size are different then it is considered to be
|
2015-06-06 09:38:45 +02:00
|
|
|
// not equal. If --size-only is in effect then this is the only check
|
2016-06-17 18:20:08 +02:00
|
|
|
// that is done. If --ignore-size is in effect then this check is
|
|
|
|
// skipped and the files are considered the same size.
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
|
|
|
// If the size is the same and the mtime is the same then it is
|
2015-06-06 09:38:45 +02:00
|
|
|
// considered to be equal. This check is skipped if using --checksum.
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
2015-06-06 09:38:45 +02:00
|
|
|
// If the size is the same and mtime is different, unreadable or
|
2016-01-11 13:39:33 +01:00
|
|
|
// --checksum is set and the hash is the same then the file is
|
2015-06-06 09:38:45 +02:00
|
|
|
// considered to be equal. In this case the mtime on the dst is
|
|
|
|
// updated if --checksum is not set.
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
|
|
|
// Otherwise the file is considered to be not equal including if there
|
|
|
|
// were errors reading info.
|
2019-06-17 10:34:30 +02:00
|
|
|
func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
|
2019-07-08 03:02:53 +02:00
|
|
|
return equal(ctx, src, dst, fs.Config.SizeOnly, fs.Config.CheckSum, !fs.Config.NoUpdateModTime)
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
|
|
|
|
2018-01-31 17:15:30 +01:00
|
|
|
// sizeDiffers compare the size of src and dst taking into account the
|
|
|
|
// various ways of ignoring sizes
|
|
|
|
func sizeDiffers(src, dst fs.ObjectInfo) bool {
|
|
|
|
if fs.Config.IgnoreSize || src.Size() < 0 || dst.Size() < 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return src.Size() != dst.Size()
|
|
|
|
}
|
|
|
|
|
2019-01-10 12:07:10 +01:00
|
|
|
var checksumWarning sync.Once
|
|
|
|
|
2019-07-08 03:02:53 +02:00
|
|
|
func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, sizeOnly, checkSum, UpdateModTime bool) bool {
|
2018-01-31 17:15:30 +01:00
|
|
|
if sizeDiffers(src, dst) {
|
|
|
|
fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size())
|
|
|
|
return false
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2016-12-18 11:03:56 +01:00
|
|
|
if sizeOnly {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Sizes identical")
|
2015-06-06 09:38:45 +02:00
|
|
|
return true
|
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
|
2016-11-28 18:08:15 +01:00
|
|
|
// Assert: Size is equal or being ignored
|
|
|
|
|
|
|
|
// If checking checksum and not modtime
|
2016-12-18 11:03:56 +01:00
|
|
|
if checkSum {
|
2016-11-28 18:08:15 +01:00
|
|
|
// Check the hash
|
2019-06-17 10:34:30 +02:00
|
|
|
same, ht, _ := CheckHashes(ctx, src, dst)
|
2016-11-28 18:08:15 +01:00
|
|
|
if !same {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "%v differ", ht)
|
2016-11-28 18:08:15 +01:00
|
|
|
return false
|
2015-08-20 21:48:58 +02:00
|
|
|
}
|
2018-01-18 21:27:52 +01:00
|
|
|
if ht == hash.None {
|
2019-01-10 12:07:10 +01:00
|
|
|
checksumWarning.Do(func() {
|
|
|
|
fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only")
|
|
|
|
})
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Size of src and dst objects identical")
|
2015-06-03 16:08:27 +02:00
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Size and %v of src and dst objects identical", ht)
|
2015-06-03 16:08:27 +02:00
|
|
|
}
|
2016-11-28 18:08:15 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sizes the same so check the mtime
|
2018-06-03 20:45:34 +02:00
|
|
|
modifyWindow := fs.GetModifyWindow(src.Fs(), dst.Fs())
|
|
|
|
if modifyWindow == fs.ModTimeNotSupported {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Sizes identical")
|
2016-11-28 18:08:15 +01:00
|
|
|
return true
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
srcModTime := src.ModTime(ctx)
|
|
|
|
dstModTime := dst.ModTime(ctx)
|
2016-11-28 18:08:15 +01:00
|
|
|
dt := dstModTime.Sub(srcModTime)
|
2018-06-03 20:45:34 +02:00
|
|
|
if dt < modifyWindow && dt > -modifyWindow {
|
|
|
|
fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
|
2016-11-28 18:08:15 +01:00
|
|
|
return true
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime)
|
2016-11-28 18:08:15 +01:00
|
|
|
|
|
|
|
// Check if the hashes are the same
|
2019-06-17 10:34:30 +02:00
|
|
|
same, ht, _ := CheckHashes(ctx, src, dst)
|
2014-03-28 18:56:04 +01:00
|
|
|
if !same {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "%v differ", ht)
|
2016-11-28 18:08:15 +01:00
|
|
|
return false
|
|
|
|
}
|
2018-01-18 21:27:52 +01:00
|
|
|
if ht == hash.None {
|
2016-11-28 18:08:15 +01:00
|
|
|
// if couldn't check hash, return that they differ
|
2014-03-28 18:56:04 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-11-28 18:08:15 +01:00
|
|
|
// mod time differs but hash is the same to reset mod time if required
|
2019-07-08 03:02:53 +02:00
|
|
|
if UpdateModTime {
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(src, "Not updating modification time as --dry-run")
|
2016-11-28 18:08:15 +01:00
|
|
|
} else {
|
2017-09-02 10:29:01 +02:00
|
|
|
// Size and hash the same but mtime different
|
|
|
|
// Error if objects are treated as immutable
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.Immutable {
|
2019-07-22 21:11:46 +02:00
|
|
|
fs.Errorf(dst, "StartedAt mismatch between immutable objects")
|
2017-09-02 10:29:01 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
// Update the mtime of the dst object here
|
2019-06-17 10:34:30 +02:00
|
|
|
err := dst.SetModTime(ctx, srcModTime)
|
2018-01-12 17:30:54 +01:00
|
|
|
if err == fs.ErrorCantSetModTime {
|
|
|
|
fs.Debugf(dst, "src and dst identical but can't set mod time without re-uploading")
|
2017-06-13 14:58:39 +02:00
|
|
|
return false
|
2018-01-12 17:30:54 +01:00
|
|
|
} else if err == fs.ErrorCantSetModTimeWithoutDelete {
|
|
|
|
fs.Debugf(dst, "src and dst identical but can't set mod time without deleting and re-uploading")
|
2018-03-13 17:05:06 +01:00
|
|
|
// Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file
|
|
|
|
// put in the BackupDir than deleted which is what will happen if we don't delete it.
|
|
|
|
if fs.Config.BackupDir == "" {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = dst.Remove(ctx)
|
2018-03-13 17:05:06 +01:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(dst, "failed to delete before re-upload: %v", err)
|
|
|
|
}
|
2017-06-13 14:58:39 +02:00
|
|
|
}
|
2017-02-16 00:09:44 +01:00
|
|
|
return false
|
|
|
|
} else if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(dst, "Failed to set modification time: %v", err)
|
2017-02-16 00:09:44 +01:00
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(src, "Updated modification time in destination")
|
2017-02-16 00:09:44 +01:00
|
|
|
}
|
2016-03-22 16:07:10 +01:00
|
|
|
}
|
2015-06-03 16:08:27 +02:00
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-07-15 20:27:05 +02:00
|
|
|
// Used to remove a failed copy
|
2015-03-14 18:54:41 +01:00
|
|
|
//
|
2019-04-30 14:06:24 +02:00
|
|
|
// Returns whether the file was successfully removed or not
|
2019-06-17 10:34:30 +02:00
|
|
|
func removeFailedCopy(ctx context.Context, dst fs.Object) bool {
|
2015-03-14 18:54:41 +01:00
|
|
|
if dst == nil {
|
|
|
|
return false
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(dst, "Removing failed copy")
|
2019-06-17 10:34:30 +02:00
|
|
|
removeErr := dst.Remove(ctx)
|
2015-03-14 18:54:41 +01:00
|
|
|
if removeErr != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(dst, "Failed to remove failed copy: %s", removeErr)
|
2015-03-14 18:54:41 +01:00
|
|
|
return false
|
2014-07-15 20:27:05 +02:00
|
|
|
}
|
2015-03-14 18:54:41 +01:00
|
|
|
return true
|
2014-07-15 20:27:05 +02:00
|
|
|
}
|
|
|
|
|
2016-10-23 18:34:17 +02:00
|
|
|
// Wrapper to override the remote for an object
|
|
|
|
type overrideRemoteObject struct {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Object
|
2016-10-23 18:34:17 +02:00
|
|
|
remote string
|
|
|
|
}
|
|
|
|
|
2019-04-30 14:06:24 +02:00
|
|
|
// Remote returns the overridden remote name
|
2016-10-23 18:34:17 +02:00
|
|
|
func (o *overrideRemoteObject) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
2017-03-04 11:10:55 +01:00
|
|
|
// MimeType returns the mime type of the underlying object or "" if it
|
|
|
|
// can't be worked out
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *overrideRemoteObject) MimeType(ctx context.Context) string {
|
2018-01-12 17:30:54 +01:00
|
|
|
if do, ok := o.Object.(fs.MimeTyper); ok {
|
2019-06-17 10:34:30 +02:00
|
|
|
return do.MimeType(ctx)
|
2017-03-04 11:10:55 +01:00
|
|
|
}
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check interface is satisfied
|
2018-01-12 17:30:54 +01:00
|
|
|
var _ fs.MimeTyper = (*overrideRemoteObject)(nil)
|
2017-03-04 11:10:55 +01:00
|
|
|
|
2016-10-22 18:53:10 +02:00
|
|
|
// Copy src object to dst or f if nil. If dst is nil then it uses
|
|
|
|
// remote as the name of the new object.
|
2017-12-01 16:31:20 +01:00
|
|
|
//
|
|
|
|
// It returns the destination object if possible. Note that this may
|
|
|
|
// be nil.
|
2019-06-17 10:34:30 +02:00
|
|
|
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.Stats(ctx).NewTransfer(src)
|
2019-04-23 17:19:12 +02:00
|
|
|
defer func() {
|
2019-07-16 13:56:20 +02:00
|
|
|
tr.Done(err)
|
2019-04-23 17:19:12 +02:00
|
|
|
}()
|
2017-12-01 16:31:20 +01:00
|
|
|
newDst = dst
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(src, "Not copying as --dry-run")
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, nil
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
maxTries := fs.Config.LowLevelRetries
|
2015-02-02 18:29:08 +01:00
|
|
|
tries := 0
|
|
|
|
doUpdate := dst != nil
|
2017-05-28 13:44:22 +02:00
|
|
|
// work out which hash to use - limit to 1 hash in common
|
2018-01-12 17:30:54 +01:00
|
|
|
var common hash.Set
|
2018-01-18 21:27:52 +01:00
|
|
|
hashType := hash.None
|
2019-08-12 21:54:49 +02:00
|
|
|
if !fs.Config.IgnoreChecksum {
|
2017-05-28 13:44:22 +02:00
|
|
|
common = src.Fs().Hashes().Overlap(f.Hashes())
|
|
|
|
if common.Count() > 0 {
|
|
|
|
hashType = common.GetOne()
|
2018-01-12 17:30:54 +01:00
|
|
|
common = hash.Set(hashType)
|
2017-05-28 13:44:22 +02:00
|
|
|
}
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
hashOption := &fs.HashesOption{Hashes: common}
|
2016-06-18 11:55:58 +02:00
|
|
|
var actionTaken string
|
|
|
|
for {
|
|
|
|
// Try server side copy first - if has optional interface and
|
|
|
|
// is same underlying remote
|
|
|
|
actionTaken = "Copied (server side copy)"
|
2019-02-11 02:36:47 +01:00
|
|
|
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
|
2019-03-20 18:17:15 +01:00
|
|
|
// Check transfer limit for server side copies
|
2019-07-18 12:13:54 +02:00
|
|
|
if fs.Config.MaxTransfer >= 0 && accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) {
|
2019-03-20 18:17:15 +01:00
|
|
|
return nil, accounting.ErrorMaxTransferLimitReached
|
|
|
|
}
|
2019-08-28 18:35:58 +02:00
|
|
|
in := tr.Account(nil) // account the transfer
|
|
|
|
in.ServerSideCopyStart()
|
2019-06-17 10:34:30 +02:00
|
|
|
newDst, err = doCopy(ctx, src, remote)
|
2016-06-18 11:55:58 +02:00
|
|
|
if err == nil {
|
|
|
|
dst = newDst
|
2019-08-28 18:35:58 +02:00
|
|
|
in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server side transfer
|
|
|
|
err = in.Close()
|
|
|
|
} else {
|
|
|
|
_ = in.Close()
|
2016-06-18 11:55:58 +02:00
|
|
|
}
|
2019-09-13 19:08:01 +02:00
|
|
|
if err == fs.ErrorCantCopy {
|
|
|
|
tr.Reset() // skip incomplete accounting - will be overwritten by the manual copy below
|
|
|
|
}
|
2016-06-18 11:55:58 +02:00
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
err = fs.ErrorCantCopy
|
2015-10-06 16:35:22 +02:00
|
|
|
}
|
2016-06-18 11:55:58 +02:00
|
|
|
// If can't server side copy, do it manually
|
2018-01-12 17:30:54 +01:00
|
|
|
if err == fs.ErrorCantCopy {
|
2019-08-12 23:09:40 +02:00
|
|
|
if doMultiThreadCopy(f, src) {
|
2019-04-24 18:04:40 +02:00
|
|
|
// Number of streams proportional to size
|
|
|
|
streams := src.Size() / int64(fs.Config.MultiThreadCutoff)
|
|
|
|
// With maximum
|
|
|
|
if streams > int64(fs.Config.MultiThreadStreams) {
|
|
|
|
streams = int64(fs.Config.MultiThreadStreams)
|
|
|
|
}
|
|
|
|
if streams < 2 {
|
|
|
|
streams = 2
|
|
|
|
}
|
2019-07-16 13:56:20 +02:00
|
|
|
dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr)
|
2019-04-24 18:04:40 +02:00
|
|
|
if doUpdate {
|
|
|
|
actionTaken = "Multi-thread Copied (replaced existing)"
|
|
|
|
} else {
|
|
|
|
actionTaken = "Multi-thread Copied (new)"
|
|
|
|
}
|
2016-06-18 11:55:58 +02:00
|
|
|
} else {
|
2019-04-24 18:04:40 +02:00
|
|
|
var in0 io.ReadCloser
|
2019-06-17 10:34:30 +02:00
|
|
|
in0, err = newReOpen(ctx, src, hashOption, nil, fs.Config.LowLevelRetries)
|
2019-04-24 18:04:40 +02:00
|
|
|
if err != nil {
|
|
|
|
err = errors.Wrap(err, "failed to open source object")
|
2019-01-07 09:26:53 +01:00
|
|
|
} else {
|
2019-04-24 18:04:40 +02:00
|
|
|
if src.Size() == -1 {
|
|
|
|
// -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream.
|
|
|
|
if doUpdate {
|
|
|
|
actionTaken = "Copied (Rcat, replaced existing)"
|
|
|
|
} else {
|
|
|
|
actionTaken = "Copied (Rcat, new)"
|
|
|
|
}
|
2019-09-12 12:12:19 +02:00
|
|
|
// NB Rcat closes in0
|
2019-06-17 10:34:30 +02:00
|
|
|
dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx))
|
2019-01-07 09:26:53 +01:00
|
|
|
newDst = dst
|
2019-04-24 18:04:40 +02:00
|
|
|
} else {
|
2019-07-16 13:56:20 +02:00
|
|
|
in := tr.Account(in0).WithBuffer() // account and buffer the transfer
|
2019-04-24 18:04:40 +02:00
|
|
|
var wrappedSrc fs.ObjectInfo = src
|
|
|
|
// We try to pass the original object if possible
|
|
|
|
if src.Remote() != remote {
|
|
|
|
wrappedSrc = &overrideRemoteObject{Object: src, remote: remote}
|
|
|
|
}
|
|
|
|
if doUpdate {
|
|
|
|
actionTaken = "Copied (replaced existing)"
|
2019-06-17 10:34:30 +02:00
|
|
|
err = dst.Update(ctx, in, wrappedSrc, hashOption)
|
2019-04-24 18:04:40 +02:00
|
|
|
} else {
|
|
|
|
actionTaken = "Copied (new)"
|
2019-06-17 10:34:30 +02:00
|
|
|
dst, err = f.Put(ctx, in, wrappedSrc, hashOption)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
closeErr := in.Close()
|
|
|
|
if err == nil {
|
|
|
|
newDst = dst
|
|
|
|
err = closeErr
|
|
|
|
}
|
2019-01-07 09:26:53 +01:00
|
|
|
}
|
2016-06-18 11:55:58 +02:00
|
|
|
}
|
|
|
|
}
|
2015-02-14 19:48:08 +01:00
|
|
|
}
|
2015-02-02 18:29:08 +01:00
|
|
|
tries++
|
2016-06-18 11:55:58 +02:00
|
|
|
if tries >= maxTries {
|
|
|
|
break
|
2015-03-14 18:54:41 +01:00
|
|
|
}
|
2016-06-18 11:55:58 +02:00
|
|
|
// Retry if err returned a retry error
|
2018-01-12 17:30:54 +01:00
|
|
|
if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) {
|
|
|
|
fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries)
|
2016-06-18 11:55:58 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
// otherwise finish
|
|
|
|
break
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(src, "Failed to copy: %v", err)
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2014-07-15 20:27:05 +02:00
|
|
|
|
2014-07-19 13:38:58 +02:00
|
|
|
// Verify sizes are the same after transfer
|
2018-01-31 17:15:30 +01:00
|
|
|
if sizeDiffers(src, dst) {
|
2016-06-12 16:06:02 +02:00
|
|
|
err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size())
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
|
|
|
fs.CountError(err)
|
2019-06-17 10:34:30 +02:00
|
|
|
removeFailedCopy(ctx, dst)
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2014-07-19 13:38:58 +02:00
|
|
|
}
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
// Verify hashes are the same after transfer - ignoring blank hashes
|
2019-08-10 11:40:12 +02:00
|
|
|
if hashType != hash.None {
|
2019-08-10 11:28:26 +02:00
|
|
|
// checkHashes has logged and counted errors
|
|
|
|
equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType)
|
|
|
|
if !equal {
|
|
|
|
err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum)
|
|
|
|
fs.Errorf(dst, "%v", err)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
2019-08-10 11:28:26 +02:00
|
|
|
removeFailedCopy(ctx, dst)
|
|
|
|
return newDst, err
|
2014-07-15 20:27:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(src, actionTaken)
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2015-08-24 22:42:23 +02:00
|
|
|
}
|
|
|
|
|
2019-06-15 14:54:17 +02:00
|
|
|
// SameObject returns true if src and dst could be pointing to the
|
|
|
|
// same object.
|
|
|
|
func SameObject(src, dst fs.Object) bool {
|
|
|
|
if !SameConfig(src.Fs(), dst.Fs()) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
srcPath := path.Join(src.Fs().Root(), src.Remote())
|
|
|
|
dstPath := path.Join(dst.Fs().Root(), dst.Remote())
|
|
|
|
if dst.Fs().Features().CaseInsensitive {
|
|
|
|
srcPath = strings.ToLower(srcPath)
|
|
|
|
dstPath = strings.ToLower(dstPath)
|
|
|
|
}
|
|
|
|
return srcPath == dstPath
|
|
|
|
}
|
|
|
|
|
2016-10-22 18:53:10 +02:00
|
|
|
// Move src object to dst or fdst if nil. If dst is nil then it uses
|
|
|
|
// remote as the name of the new object.
|
2017-12-01 16:31:20 +01:00
|
|
|
//
|
2018-09-20 11:46:44 +02:00
|
|
|
// Note that you must check the destination does not exist before
|
|
|
|
// calling this and pass it as dst. If you pass dst=nil and the
|
|
|
|
// destination does exist then this may create duplicates or return
|
|
|
|
// errors.
|
|
|
|
//
|
2017-12-01 16:31:20 +01:00
|
|
|
// It returns the destination object if possible. Note that this may
|
|
|
|
// be nil.
|
2019-06-17 10:34:30 +02:00
|
|
|
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
2019-04-23 17:19:12 +02:00
|
|
|
defer func() {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr.Done(err)
|
2019-04-23 17:19:12 +02:00
|
|
|
}()
|
2017-12-01 16:31:20 +01:00
|
|
|
newDst = dst
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(src, "Not moving as --dry-run")
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, nil
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
|
|
|
// See if we have Move available
|
2019-02-11 02:36:47 +01:00
|
|
|
if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) {
|
2019-06-10 12:01:13 +02:00
|
|
|
// Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive)
|
2019-06-15 14:54:17 +02:00
|
|
|
if dst != nil && !SameObject(src, dst) {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = DeleteFile(ctx, dst)
|
2016-10-22 18:53:10 +02:00
|
|
|
if err != nil {
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Move dst <- src
|
2019-06-17 10:34:30 +02:00
|
|
|
newDst, err = doMove(ctx, src, remote)
|
2016-10-22 18:53:10 +02:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(src, "Moved (server side)")
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, nil
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.ErrorCantMove:
|
|
|
|
fs.Debugf(src, "Can't move, switching to copy")
|
2016-10-22 18:53:10 +02:00
|
|
|
default:
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(src, "Couldn't move: %v", err)
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Move not found or didn't work so copy dst <- src
|
2019-06-17 10:34:30 +02:00
|
|
|
newDst, err = Copy(ctx, fdst, dst, remote, src)
|
2016-10-22 18:53:10 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(src, "Not deleting source as copy failed: %v", err)
|
2017-12-01 16:31:20 +01:00
|
|
|
return newDst, err
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
|
|
|
// Delete src if no error on copy
|
2019-06-17 10:34:30 +02:00
|
|
|
return newDst, DeleteFile(ctx, src)
|
2016-10-22 18:53:10 +02:00
|
|
|
}
|
|
|
|
|
2017-01-10 21:03:55 +01:00
|
|
|
// CanServerSideMove returns true if fdst support server side moves or
|
|
|
|
// server side copies
|
|
|
|
//
|
|
|
|
// Some remotes simulate rename by server-side copy and delete, so include
|
|
|
|
// remotes that implements either Mover or Copier.
|
2018-01-12 17:30:54 +01:00
|
|
|
func CanServerSideMove(fdst fs.Fs) bool {
|
2017-01-13 18:21:47 +01:00
|
|
|
canMove := fdst.Features().Move != nil
|
|
|
|
canCopy := fdst.Features().Copy != nil
|
2017-01-10 21:03:55 +01:00
|
|
|
return canMove || canCopy
|
|
|
|
}
|
|
|
|
|
2019-03-10 17:50:28 +01:00
|
|
|
// SuffixName adds the current --suffix to the remote, obeying
|
|
|
|
// --suffix-keep-extension if set
|
|
|
|
func SuffixName(remote string) string {
|
|
|
|
if fs.Config.Suffix == "" {
|
|
|
|
return remote
|
|
|
|
}
|
|
|
|
if fs.Config.SuffixKeepExtension {
|
|
|
|
ext := path.Ext(remote)
|
|
|
|
base := remote[:len(remote)-len(ext)]
|
|
|
|
return base + fs.Config.Suffix + ext
|
|
|
|
}
|
|
|
|
return remote + fs.Config.Suffix
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
// DeleteFileWithBackupDir deletes a single file respecting --dry-run
|
2017-01-10 22:47:03 +01:00
|
|
|
// and accumulating stats and errors.
|
|
|
|
//
|
|
|
|
// If backupDir is set then it moves the file to there instead of
|
|
|
|
// deleting
|
2019-06-17 10:34:30 +02:00
|
|
|
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
|
|
|
defer func() {
|
|
|
|
tr.Done(err)
|
|
|
|
}()
|
2019-07-18 12:13:54 +02:00
|
|
|
numDeletes := accounting.Stats(ctx).Deletes(1)
|
2018-01-22 19:53:18 +01:00
|
|
|
if fs.Config.MaxDelete != -1 && numDeletes > fs.Config.MaxDelete {
|
|
|
|
return fserrors.FatalError(errors.New("--max-delete threshold reached"))
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
action, actioned, actioning := "delete", "Deleted", "deleting"
|
|
|
|
if backupDir != nil {
|
|
|
|
action, actioned, actioning = "move into backup dir", "Moved into backup dir", "moving into backup dir"
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(dst, "Not %s as --dry-run", actioning)
|
2017-01-10 22:47:03 +01:00
|
|
|
} else if backupDir != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
err = MoveBackupDir(ctx, backupDir, dst)
|
2017-01-10 22:47:03 +01:00
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = dst.Remove(ctx)
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(dst, "Couldn't %s: %v", action, err)
|
|
|
|
} else if !fs.Config.DryRun {
|
|
|
|
fs.Infof(dst, actioned)
|
2016-03-05 17:10:51 +01:00
|
|
|
}
|
2016-06-25 15:27:44 +02:00
|
|
|
return err
|
2016-03-05 17:10:51 +01:00
|
|
|
}
|
|
|
|
|
2017-01-10 22:47:03 +01:00
|
|
|
// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors.
|
|
|
|
//
|
|
|
|
// If useBackupDir is set and --backup-dir is in effect then it moves
|
|
|
|
// the file to there instead of deleting
|
2019-06-17 10:34:30 +02:00
|
|
|
func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
|
|
|
return DeleteFileWithBackupDir(ctx, dst, nil)
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
// DeleteFilesWithBackupDir removes all the files passed in the
|
2017-01-10 22:47:03 +01:00
|
|
|
// channel
|
|
|
|
//
|
|
|
|
// If backupDir is set the files will be placed into that directory
|
|
|
|
// instead of being deleted.
|
2019-06-17 10:34:30 +02:00
|
|
|
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
|
2014-03-28 18:56:04 +01:00
|
|
|
var wg sync.WaitGroup
|
2018-01-12 17:30:54 +01:00
|
|
|
wg.Add(fs.Config.Transfers)
|
2016-06-25 15:27:44 +02:00
|
|
|
var errorCount int32
|
2018-01-22 19:53:18 +01:00
|
|
|
var fatalErrorCount int32
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
for i := 0; i < fs.Config.Transfers; i++ {
|
2014-03-28 18:56:04 +01:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2015-09-22 19:47:16 +02:00
|
|
|
for dst := range toBeDeleted {
|
2019-06-17 10:34:30 +02:00
|
|
|
err := DeleteFileWithBackupDir(ctx, dst, backupDir)
|
2016-06-25 15:27:44 +02:00
|
|
|
if err != nil {
|
|
|
|
atomic.AddInt32(&errorCount, 1)
|
2018-01-22 19:53:18 +01:00
|
|
|
if fserrors.IsFatalError(err) {
|
|
|
|
fs.Errorf(nil, "Got fatal error on delete: %s", err)
|
|
|
|
atomic.AddInt32(&fatalErrorCount, 1)
|
|
|
|
return
|
|
|
|
}
|
2016-06-25 15:27:44 +02:00
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(nil, "Waiting for deletions to finish")
|
2014-03-28 18:56:04 +01:00
|
|
|
wg.Wait()
|
2016-06-25 15:27:44 +02:00
|
|
|
if errorCount > 0 {
|
2018-01-22 19:53:18 +01:00
|
|
|
err := errors.Errorf("failed to delete %d files", errorCount)
|
|
|
|
if fatalErrorCount > 0 {
|
|
|
|
return fserrors.FatalError(err)
|
|
|
|
}
|
|
|
|
return err
|
2016-06-25 15:27:44 +02:00
|
|
|
}
|
|
|
|
return nil
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2017-01-10 22:47:03 +01:00
|
|
|
// DeleteFiles removes all the files passed in the channel
|
2019-06-17 10:34:30 +02:00
|
|
|
func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error {
|
|
|
|
return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil)
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
|
|
|
|
2019-02-11 02:36:47 +01:00
|
|
|
// SameRemoteType returns true if fdst and fsrc are the same type
|
|
|
|
func SameRemoteType(fdst, fsrc fs.Info) bool {
|
|
|
|
return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc)
|
|
|
|
}
|
|
|
|
|
2017-01-11 15:59:53 +01:00
|
|
|
// SameConfig returns true if fdst and fsrc are using the same config
|
|
|
|
// file entry
|
2018-01-12 17:30:54 +01:00
|
|
|
func SameConfig(fdst, fsrc fs.Info) bool {
|
2017-01-11 15:59:53 +01:00
|
|
|
return fdst.Name() == fsrc.Name()
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Same returns true if fdst and fsrc point to the same underlying Fs
|
2018-01-12 17:30:54 +01:00
|
|
|
func Same(fdst, fsrc fs.Info) bool {
|
2019-02-14 13:06:26 +01:00
|
|
|
return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/")
|
2015-09-01 21:50:28 +02:00
|
|
|
}
|
|
|
|
|
2019-06-23 05:52:09 +02:00
|
|
|
// fixRoot returns the Root with a trailing / if not empty. It is
|
|
|
|
// aware of case insensitive filesystems.
|
|
|
|
func fixRoot(f fs.Info) string {
|
|
|
|
s := strings.Trim(filepath.ToSlash(f.Root()), "/")
|
|
|
|
if s != "" {
|
|
|
|
s += "/"
|
|
|
|
}
|
|
|
|
if f.Features().CaseInsensitive {
|
|
|
|
s = strings.ToLower(s)
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2016-07-11 12:36:46 +02:00
|
|
|
// Overlapping returns true if fdst and fsrc point to the same
|
2017-01-11 15:59:53 +01:00
|
|
|
// underlying Fs and they overlap.
|
2018-01-12 17:30:54 +01:00
|
|
|
func Overlapping(fdst, fsrc fs.Info) bool {
|
2017-01-11 15:59:53 +01:00
|
|
|
if !SameConfig(fdst, fsrc) {
|
|
|
|
return false
|
|
|
|
}
|
2019-06-23 05:52:09 +02:00
|
|
|
fdstRoot := fixRoot(fdst)
|
|
|
|
fsrcRoot := fixRoot(fsrc)
|
2017-01-11 15:59:53 +01:00
|
|
|
return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot)
|
2016-07-11 12:36:46 +02:00
|
|
|
}
|
|
|
|
|
2019-06-23 05:52:09 +02:00
|
|
|
// SameDir returns true if fdst and fsrc point to the same
|
|
|
|
// underlying Fs and they are the same directory.
|
|
|
|
func SameDir(fdst, fsrc fs.Info) bool {
|
|
|
|
if !SameConfig(fdst, fsrc) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
fdstRoot := fixRoot(fdst)
|
|
|
|
fsrcRoot := fixRoot(fsrc)
|
|
|
|
return fdstRoot == fsrcRoot
|
|
|
|
}
|
|
|
|
|
2016-04-07 15:56:27 +02:00
|
|
|
// checkIdentical checks to see if dst and src are identical
|
|
|
|
//
|
|
|
|
// it returns true if differences were found
|
2016-10-12 11:59:55 +02:00
|
|
|
// it also returns whether it couldn't be hashed
|
2019-06-17 10:34:30 +02:00
|
|
|
func checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
|
|
|
same, ht, err := CheckHashes(ctx, src, dst)
|
2017-02-13 11:48:26 +01:00
|
|
|
if err != nil {
|
|
|
|
// CheckHashes will log and count errors
|
2016-10-12 11:59:55 +02:00
|
|
|
return true, false
|
2016-04-07 15:56:27 +02:00
|
|
|
}
|
2018-01-18 21:27:52 +01:00
|
|
|
if ht == hash.None {
|
2017-02-13 11:48:26 +01:00
|
|
|
return false, true
|
|
|
|
}
|
|
|
|
if !same {
|
2018-01-12 17:30:54 +01:00
|
|
|
err = errors.Errorf("%v differ", ht)
|
|
|
|
fs.Errorf(src, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-02-13 11:48:26 +01:00
|
|
|
return true, false
|
2016-04-07 15:56:27 +02:00
|
|
|
}
|
2016-10-12 11:59:55 +02:00
|
|
|
return false, false
|
2016-04-07 15:56:27 +02:00
|
|
|
}
|
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// checkFn is the the type of the checking function used in CheckFn()
|
2019-06-17 10:34:30 +02:00
|
|
|
type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool)
|
2015-11-24 17:54:12 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// checkMarch is used to march over two Fses in the same way as
|
|
|
|
// sync/copy
|
|
|
|
type checkMarch struct {
|
2018-01-12 17:30:54 +01:00
|
|
|
fdst, fsrc fs.Fs
|
2017-09-01 17:33:09 +02:00
|
|
|
check checkFn
|
2018-05-29 19:07:04 +02:00
|
|
|
oneway bool
|
2017-09-01 17:33:09 +02:00
|
|
|
differences int32
|
|
|
|
noHashes int32
|
|
|
|
srcFilesMissing int32
|
|
|
|
dstFilesMissing int32
|
2018-12-31 12:58:55 +01:00
|
|
|
matches int32
|
2017-09-01 17:33:09 +02:00
|
|
|
}
|
2015-03-14 18:11:24 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// DstOnly have an object which is in the destination only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
|
2017-09-01 17:33:09 +02:00
|
|
|
switch dst.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2018-05-29 19:07:04 +02:00
|
|
|
if c.oneway {
|
|
|
|
return false
|
|
|
|
}
|
2017-11-15 06:32:00 +01:00
|
|
|
err := errors.Errorf("File not in %v", c.fsrc)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-09-01 17:33:09 +02:00
|
|
|
atomic.AddInt32(&c.differences, 1)
|
|
|
|
atomic.AddInt32(&c.srcFilesMissing, 1)
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-09-01 17:33:09 +02:00
|
|
|
// Do the same thing to the entire contents of the directory
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2017-09-01 17:33:09 +02:00
|
|
|
return false
|
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// SrcOnly have an object which is in the source only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
2017-09-01 17:33:09 +02:00
|
|
|
switch src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2017-11-15 06:32:00 +01:00
|
|
|
err := errors.Errorf("File not in %v", c.fdst)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(src, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-09-01 17:33:09 +02:00
|
|
|
atomic.AddInt32(&c.differences, 1)
|
|
|
|
atomic.AddInt32(&c.dstFilesMissing, 1)
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-09-01 17:33:09 +02:00
|
|
|
// Do the same thing to the entire contents of the directory
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2017-09-01 17:33:09 +02:00
|
|
|
return false
|
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// check to see if two objects are identical using the check function
|
2019-06-17 10:34:30 +02:00
|
|
|
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool) {
|
2019-07-22 21:11:46 +02:00
|
|
|
var err error
|
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
|
|
|
defer func() {
|
|
|
|
tr.Done(err)
|
|
|
|
}()
|
2018-01-31 17:15:30 +01:00
|
|
|
if sizeDiffers(src, dst) {
|
2019-07-22 21:11:46 +02:00
|
|
|
err = errors.Errorf("Sizes differ")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(src, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-09-01 17:33:09 +02:00
|
|
|
return true, false
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.SizeOnly {
|
2017-09-01 17:33:09 +02:00
|
|
|
return false, false
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return c.check(ctx, dst, src)
|
2017-09-01 17:33:09 +02:00
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// Match is called when src and dst are present, so sync src to dst
|
2019-06-17 10:34:30 +02:00
|
|
|
func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
|
2017-09-01 17:33:09 +02:00
|
|
|
switch srcX := src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
|
|
|
dstX, ok := dst.(fs.Object)
|
2017-09-01 17:33:09 +02:00
|
|
|
if ok {
|
2019-06-17 10:34:30 +02:00
|
|
|
differ, noHash := c.checkIdentical(ctx, dstX, srcX)
|
2017-09-01 17:33:09 +02:00
|
|
|
if differ {
|
|
|
|
atomic.AddInt32(&c.differences, 1)
|
|
|
|
} else {
|
2018-12-31 12:58:55 +01:00
|
|
|
atomic.AddInt32(&c.matches, 1)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(dstX, "OK")
|
2017-09-01 17:33:09 +02:00
|
|
|
}
|
|
|
|
if noHash {
|
|
|
|
atomic.AddInt32(&c.noHashes, 1)
|
|
|
|
}
|
|
|
|
} else {
|
2017-11-15 06:32:00 +01:00
|
|
|
err := errors.Errorf("is file on %v but directory on %v", c.fsrc, c.fdst)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(src, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-09-01 17:33:09 +02:00
|
|
|
atomic.AddInt32(&c.differences, 1)
|
|
|
|
atomic.AddInt32(&c.dstFilesMissing, 1)
|
2017-02-13 11:48:26 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-09-01 17:33:09 +02:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2018-01-12 17:30:54 +01:00
|
|
|
_, ok := dst.(fs.Directory)
|
2017-09-01 17:33:09 +02:00
|
|
|
if ok {
|
|
|
|
return true
|
2017-02-13 11:48:26 +01:00
|
|
|
}
|
2017-11-15 06:32:00 +01:00
|
|
|
err := errors.Errorf("is file on %v but directory on %v", c.fdst, c.fsrc)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
|
|
|
fs.CountError(err)
|
2017-09-01 17:33:09 +02:00
|
|
|
atomic.AddInt32(&c.differences, 1)
|
|
|
|
atomic.AddInt32(&c.srcFilesMissing, 1)
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
2017-02-13 11:48:26 +01:00
|
|
|
}
|
2017-09-01 17:33:09 +02:00
|
|
|
return false
|
|
|
|
}
|
2017-02-13 11:48:26 +01:00
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// CheckFn checks the files in fsrc and fdst according to Size and
|
|
|
|
// hash using checkFunction on each file to check the hashes.
|
|
|
|
//
|
|
|
|
// checkFunction sees if dst and src are identical
|
|
|
|
//
|
|
|
|
// it returns true if differences were found
|
|
|
|
// it also returns whether it couldn't be hashed
|
2019-06-17 10:34:30 +02:00
|
|
|
func CheckFn(ctx context.Context, fdst, fsrc fs.Fs, check checkFn, oneway bool) error {
|
2017-09-01 17:33:09 +02:00
|
|
|
c := &checkMarch{
|
2018-05-29 19:07:04 +02:00
|
|
|
fdst: fdst,
|
|
|
|
fsrc: fsrc,
|
|
|
|
check: check,
|
|
|
|
oneway: oneway,
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2017-09-01 17:33:09 +02:00
|
|
|
// set up a march over fdst and fsrc
|
2018-11-25 18:26:58 +01:00
|
|
|
m := &march.March{
|
2019-06-17 10:34:30 +02:00
|
|
|
Ctx: ctx,
|
2018-11-25 18:26:58 +01:00
|
|
|
Fdst: fdst,
|
|
|
|
Fsrc: fsrc,
|
|
|
|
Dir: "",
|
|
|
|
Callback: c,
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(fdst, "Waiting for checks to finish")
|
2019-06-20 13:50:25 +02:00
|
|
|
err := m.Run()
|
2017-09-01 17:33:09 +02:00
|
|
|
|
|
|
|
if c.dstFilesMissing > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Logf(fdst, "%d files missing", c.dstFilesMissing)
|
2017-09-01 17:33:09 +02:00
|
|
|
}
|
|
|
|
if c.srcFilesMissing > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Logf(fsrc, "%d files missing", c.srcFilesMissing)
|
2017-09-01 17:33:09 +02:00
|
|
|
}
|
|
|
|
|
2019-07-18 12:13:54 +02:00
|
|
|
fs.Logf(fdst, "%d differences found", accounting.Stats(ctx).GetErrors())
|
2017-09-01 17:33:09 +02:00
|
|
|
if c.noHashes > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Logf(fdst, "%d hashes could not be checked", c.noHashes)
|
2016-10-12 11:59:55 +02:00
|
|
|
}
|
2018-12-31 12:58:55 +01:00
|
|
|
if c.matches > 0 {
|
|
|
|
fs.Logf(fdst, "%d matching files", c.matches)
|
|
|
|
}
|
2017-09-01 17:33:09 +02:00
|
|
|
if c.differences > 0 {
|
|
|
|
return errors.Errorf("%d differences found", c.differences)
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2019-06-20 13:50:25 +02:00
|
|
|
return err
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2017-02-12 17:30:18 +01:00
|
|
|
// Check the files in fsrc and fdst according to Size and hash
|
2019-06-17 10:34:30 +02:00
|
|
|
func Check(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
|
|
|
|
return CheckFn(ctx, fdst, fsrc, checkIdentical, oneway)
|
2017-02-12 17:30:18 +01:00
|
|
|
}
|
|
|
|
|
2017-02-13 11:48:26 +01:00
|
|
|
// CheckEqualReaders checks to see if in1 and in2 have the same
|
|
|
|
// content when read.
|
|
|
|
//
|
|
|
|
// it returns true if differences were found
|
|
|
|
func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) {
|
|
|
|
const bufSize = 64 * 1024
|
|
|
|
buf1 := make([]byte, bufSize)
|
|
|
|
buf2 := make([]byte, bufSize)
|
|
|
|
for {
|
2018-01-12 17:30:54 +01:00
|
|
|
n1, err1 := readers.ReadFill(in1, buf1)
|
|
|
|
n2, err2 := readers.ReadFill(in2, buf2)
|
2017-02-13 11:48:26 +01:00
|
|
|
// check errors
|
|
|
|
if err1 != nil && err1 != io.EOF {
|
|
|
|
return true, err1
|
|
|
|
} else if err2 != nil && err2 != io.EOF {
|
|
|
|
return true, err2
|
|
|
|
}
|
|
|
|
// err1 && err2 are nil or io.EOF here
|
|
|
|
// process the data
|
|
|
|
if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
// if both streams finished the we have finished
|
|
|
|
if err1 == io.EOF && err2 == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckIdentical checks to see if dst and src are identical by
|
|
|
|
// reading all their bytes if necessary.
|
|
|
|
//
|
|
|
|
// it returns true if differences were found
|
2019-06-17 10:34:30 +02:00
|
|
|
func CheckIdentical(ctx context.Context, dst, src fs.Object) (differ bool, err error) {
|
|
|
|
in1, err := dst.Open(ctx)
|
2017-02-13 11:48:26 +01:00
|
|
|
if err != nil {
|
|
|
|
return true, errors.Wrapf(err, "failed to open %q", dst)
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
tr1 := accounting.Stats(ctx).NewTransfer(dst)
|
2019-07-16 13:56:20 +02:00
|
|
|
defer func() {
|
|
|
|
tr1.Done(err)
|
|
|
|
}()
|
|
|
|
in1 = tr1.Account(in1).WithBuffer() // account and buffer the transfer
|
2017-02-13 11:48:26 +01:00
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
in2, err := src.Open(ctx)
|
2017-02-13 11:48:26 +01:00
|
|
|
if err != nil {
|
|
|
|
return true, errors.Wrapf(err, "failed to open %q", src)
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
tr2 := accounting.Stats(ctx).NewTransfer(dst)
|
2019-07-16 13:56:20 +02:00
|
|
|
defer func() {
|
|
|
|
tr2.Done(err)
|
|
|
|
}()
|
|
|
|
in2 = tr2.Account(in2).WithBuffer() // account and buffer the transfer
|
2017-02-13 11:48:26 +01:00
|
|
|
|
2019-07-16 13:56:20 +02:00
|
|
|
// To assign err variable before defer.
|
|
|
|
differ, err = CheckEqualReaders(in1, in2)
|
|
|
|
return
|
2017-02-13 11:48:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// CheckDownload checks the files in fsrc and fdst according to Size
|
|
|
|
// and the actual contents of the files.
|
2019-06-17 10:34:30 +02:00
|
|
|
func CheckDownload(ctx context.Context, fdst, fsrc fs.Fs, oneway bool) error {
|
|
|
|
check := func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool) {
|
|
|
|
differ, err := CheckIdentical(ctx, a, b)
|
2017-02-13 11:48:26 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(a, "Failed to download: %v", err)
|
2017-02-13 11:48:26 +01:00
|
|
|
return true, true
|
|
|
|
}
|
|
|
|
return differ, false
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return CheckFn(ctx, fdst, fsrc, check, oneway)
|
2017-02-13 11:48:26 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// ListFn lists the Fs to the supplied function
|
2014-03-28 18:56:04 +01:00
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2019-06-17 10:34:30 +02:00
|
|
|
func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error {
|
|
|
|
return walk.ListR(ctx, f, "", false, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
2017-02-24 23:51:01 +01:00
|
|
|
entries.ForObject(fn)
|
|
|
|
return nil
|
|
|
|
})
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2015-02-28 16:30:40 +01:00
|
|
|
// mutex for synchronized output
|
|
|
|
var outMutex sync.Mutex
|
|
|
|
|
|
|
|
// Synchronized fmt.Fprintf
|
2015-09-22 08:31:12 +02:00
|
|
|
//
|
|
|
|
// Ignores errors from Fprintf
|
|
|
|
func syncFprintf(w io.Writer, format string, a ...interface{}) {
|
2015-02-28 16:30:40 +01:00
|
|
|
outMutex.Lock()
|
|
|
|
defer outMutex.Unlock()
|
2015-09-22 08:31:12 +02:00
|
|
|
_, _ = fmt.Fprintf(w, format, a...)
|
2015-02-28 16:30:40 +01:00
|
|
|
}
|
|
|
|
|
2015-09-15 16:46:06 +02:00
|
|
|
// List the Fs to the supplied writer
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
2015-11-24 17:54:12 +01:00
|
|
|
// Shows size and path - obeys includes and excludes
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2019-06-17 10:34:30 +02:00
|
|
|
func List(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|
|
|
return ListFn(ctx, f, func(o fs.Object) {
|
2015-02-28 16:30:40 +01:00
|
|
|
syncFprintf(w, "%9d %s\n", o.Size(), o.Remote())
|
2014-07-12 13:09:20 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// ListLong lists the Fs to the supplied writer
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
2015-11-24 17:54:12 +01:00
|
|
|
// Shows size, mod time and path - obeys includes and excludes
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2019-06-17 10:34:30 +02:00
|
|
|
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|
|
|
return ListFn(ctx, f, func(o fs.Object) {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
|
|
|
defer func() {
|
|
|
|
tr.Done(nil)
|
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
modTime := o.ModTime(ctx)
|
2015-09-22 20:04:12 +02:00
|
|
|
syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote())
|
2014-07-12 13:09:20 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Md5sum list the Fs to the supplied writer
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
2015-11-24 17:54:12 +01:00
|
|
|
// Produces the same output as the md5sum command - obeys includes and
|
|
|
|
// excludes
|
2014-07-12 13:09:20 +02:00
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2019-06-17 10:34:30 +02:00
|
|
|
func Md5sum(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|
|
|
return HashLister(ctx, hash.MD5, f, w)
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Sha1sum list the Fs to the supplied writer
|
|
|
|
//
|
|
|
|
// Obeys includes and excludes
|
|
|
|
//
|
|
|
|
// Lists in parallel which may get them out of order
|
2019-06-17 10:34:30 +02:00
|
|
|
func Sha1sum(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|
|
|
return HashLister(ctx, hash.SHA1, f, w)
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
|
|
|
|
2018-01-06 18:53:37 +01:00
|
|
|
// hashSum returns the human readable hash for ht passed in. This may
|
|
|
|
// be UNSUPPORTED or ERROR.
|
2019-06-17 10:34:30 +02:00
|
|
|
func hashSum(ctx context.Context, ht hash.Type, o fs.Object) string {
|
2019-07-22 21:11:46 +02:00
|
|
|
var err error
|
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
|
|
|
defer func() {
|
|
|
|
tr.Done(err)
|
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
sum, err := o.Hash(ctx, ht)
|
2018-01-18 21:27:52 +01:00
|
|
|
if err == hash.ErrUnsupported {
|
2018-01-06 18:53:37 +01:00
|
|
|
sum = "UNSUPPORTED"
|
|
|
|
} else if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(o, "Failed to read %v: %v", ht, err)
|
2018-01-06 18:53:37 +01:00
|
|
|
sum = "ERROR"
|
|
|
|
}
|
|
|
|
return sum
|
|
|
|
}
|
|
|
|
|
2018-04-20 12:33:50 +02:00
|
|
|
// HashLister does a md5sum equivalent for the hash type passed in
|
2019-06-17 10:34:30 +02:00
|
|
|
func HashLister(ctx context.Context, ht hash.Type, f fs.Fs, w io.Writer) error {
|
|
|
|
return ListFn(ctx, f, func(o fs.Object) {
|
|
|
|
sum := hashSum(ctx, ht, o)
|
2019-09-23 15:32:36 +02:00
|
|
|
syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote())
|
2014-07-12 13:09:20 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-10-02 20:48:48 +02:00
|
|
|
// Count counts the objects and their sizes in the Fs
|
2015-11-24 17:54:12 +01:00
|
|
|
//
|
|
|
|
// Obeys includes and excludes
|
2019-06-17 10:34:30 +02:00
|
|
|
func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) {
|
|
|
|
err = ListFn(ctx, f, func(o fs.Object) {
|
2015-10-02 20:48:48 +02:00
|
|
|
atomic.AddInt64(&objects, 1)
|
2019-05-28 20:51:25 +02:00
|
|
|
objectSize := o.Size()
|
|
|
|
if objectSize > 0 {
|
|
|
|
atomic.AddInt64(&size, objectSize)
|
|
|
|
}
|
2015-10-02 20:48:48 +02:00
|
|
|
})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-24 23:51:01 +01:00
|
|
|
// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing.
|
|
|
|
func ConfigMaxDepth(recursive bool) int {
|
2018-01-12 17:30:54 +01:00
|
|
|
depth := fs.Config.MaxDepth
|
2017-02-24 23:51:01 +01:00
|
|
|
if !recursive && depth < 0 {
|
|
|
|
depth = 1
|
|
|
|
}
|
|
|
|
return depth
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer
|
2019-06-17 10:34:30 +02:00
|
|
|
func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|
|
|
return walk.ListR(ctx, f, "", false, ConfigMaxDepth(false), walk.ListDirs, func(entries fs.DirEntries) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
entries.ForDir(func(dir fs.Directory) {
|
2017-02-24 23:51:01 +01:00
|
|
|
if dir != nil {
|
2019-06-17 10:34:30 +02:00
|
|
|
syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote())
|
2017-02-24 23:51:01 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
})
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Mkdir makes a destination directory or container
|
2019-06-17 10:34:30 +02:00
|
|
|
func Mkdir(ctx context.Context, f fs.Fs, dir string) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(fs.LogDirName(f, dir), "Not making directory as dry run is set")
|
2016-02-28 20:47:22 +01:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fs.LogDirName(f, dir), "Making directory")
|
2019-06-17 10:34:30 +02:00
|
|
|
err := f.Mkdir(ctx, dir)
|
2014-03-28 18:56:04 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
2014-03-28 18:56:04 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-25 21:05:34 +01:00
|
|
|
// TryRmdir removes a container but not if not empty. It doesn't
|
|
|
|
// count errors but may return one.
|
2019-06-17 10:34:30 +02:00
|
|
|
func TryRmdir(ctx context.Context, f fs.Fs, dir string) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(fs.LogDirName(f, dir), "Not deleting as dry run is set")
|
2016-02-25 21:05:34 +01:00
|
|
|
return nil
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fs.LogDirName(f, dir), "Removing directory")
|
2019-06-17 10:34:30 +02:00
|
|
|
return f.Rmdir(ctx, dir)
|
2016-02-25 21:05:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir removes a container but not if not empty
|
2019-06-17 10:34:30 +02:00
|
|
|
func Rmdir(ctx context.Context, f fs.Fs, dir string) error {
|
|
|
|
err := TryRmdir(ctx, f, dir)
|
2016-02-25 21:05:34 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
2016-02-25 21:05:34 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
return err
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
2017-12-07 13:25:56 +01:00
|
|
|
// Purge removes a directory and all of its contents
|
2019-06-17 10:34:30 +02:00
|
|
|
func Purge(ctx context.Context, f fs.Fs, dir string) error {
|
2015-11-08 15:16:00 +01:00
|
|
|
doFallbackPurge := true
|
2014-07-25 19:19:49 +02:00
|
|
|
var err error
|
2017-12-07 13:25:56 +01:00
|
|
|
if dir == "" {
|
|
|
|
// FIXME change the Purge interface so it takes a dir - see #1891
|
|
|
|
if doPurge := f.Features().Purge; doPurge != nil {
|
|
|
|
doFallbackPurge = false
|
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(f, "Not purging as --dry-run set")
|
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = doPurge(ctx)
|
2017-12-07 13:25:56 +01:00
|
|
|
if err == fs.ErrorCantPurge {
|
|
|
|
doFallbackPurge = true
|
|
|
|
}
|
2015-11-08 15:16:00 +01:00
|
|
|
}
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
2015-11-08 15:16:00 +01:00
|
|
|
}
|
|
|
|
if doFallbackPurge {
|
2014-07-25 19:19:49 +02:00
|
|
|
// DeleteFiles and Rmdir observe --dry-run
|
2019-06-17 10:34:30 +02:00
|
|
|
err = DeleteFiles(ctx, listToChan(ctx, f, dir))
|
2016-06-25 15:27:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
err = Rmdirs(ctx, f, dir, false)
|
2014-07-25 19:19:49 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
2014-07-25 19:19:49 +02:00
|
|
|
return err
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2015-12-02 23:25:32 +01:00
|
|
|
|
|
|
|
// Delete removes all the contents of a container. Unlike Purge, it
|
|
|
|
// obeys includes and excludes.
|
2019-06-17 10:34:30 +02:00
|
|
|
func Delete(ctx context.Context, f fs.Fs) error {
|
2018-08-04 12:16:43 +02:00
|
|
|
delChan := make(fs.ObjectsChan, fs.Config.Transfers)
|
2016-06-25 15:27:44 +02:00
|
|
|
delErr := make(chan error, 1)
|
2015-12-02 23:25:32 +01:00
|
|
|
go func() {
|
2019-06-17 10:34:30 +02:00
|
|
|
delErr <- DeleteFiles(ctx, delChan)
|
2015-12-02 23:25:32 +01:00
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
err := ListFn(ctx, f, func(o fs.Object) {
|
2018-08-04 12:16:43 +02:00
|
|
|
delChan <- o
|
2015-12-02 23:25:32 +01:00
|
|
|
})
|
2018-08-04 12:16:43 +02:00
|
|
|
close(delChan)
|
2016-06-25 15:27:44 +02:00
|
|
|
delError := <-delErr
|
|
|
|
if err == nil {
|
|
|
|
err = delError
|
|
|
|
}
|
2015-12-02 23:25:32 +01:00
|
|
|
return err
|
|
|
|
}
|
2016-01-31 13:58:41 +01:00
|
|
|
|
2017-02-24 23:51:01 +01:00
|
|
|
// listToChan will transfer all objects in the listing to the output
|
2016-04-21 21:06:21 +02:00
|
|
|
//
|
|
|
|
// If an error occurs, the error will be logged, and it will close the
|
|
|
|
// channel.
|
|
|
|
//
|
|
|
|
// If the error was ErrorDirNotFound then it will be ignored
|
2019-06-17 10:34:30 +02:00
|
|
|
func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan {
|
2018-01-12 17:30:54 +01:00
|
|
|
o := make(fs.ObjectsChan, fs.Config.Checkers)
|
2016-04-21 21:06:21 +02:00
|
|
|
go func() {
|
|
|
|
defer close(o)
|
2019-06-17 10:34:30 +02:00
|
|
|
err := walk.ListR(ctx, f, dir, true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
entries.ForObject(func(obj fs.Object) {
|
2017-02-24 23:51:01 +01:00
|
|
|
o <- obj
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
})
|
2019-01-21 17:53:05 +01:00
|
|
|
if err != nil && err != fs.ErrorDirNotFound {
|
|
|
|
err = errors.Wrap(err, "failed to list")
|
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(nil, "%v", err)
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
}()
|
|
|
|
return o
|
|
|
|
}
|
2016-07-01 17:35:36 +02:00
|
|
|
|
|
|
|
// CleanUp removes the trash for the Fs
|
2019-06-17 10:34:30 +02:00
|
|
|
func CleanUp(ctx context.Context, f fs.Fs) error {
|
2017-01-13 18:21:47 +01:00
|
|
|
doCleanUp := f.Features().CleanUp
|
|
|
|
if doCleanUp == nil {
|
2016-07-01 17:35:36 +02:00
|
|
|
return errors.Errorf("%v doesn't support cleanup", f)
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf(f, "Not running cleanup as --dry-run set")
|
2016-07-02 17:58:50 +02:00
|
|
|
return nil
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return doCleanUp(ctx)
|
2016-07-01 17:35:36 +02:00
|
|
|
}
|
2016-08-18 23:43:02 +02:00
|
|
|
|
2017-02-09 12:25:36 +01:00
|
|
|
// wrap a Reader and a Closer together into a ReadCloser
|
|
|
|
type readCloser struct {
|
|
|
|
io.Reader
|
2017-11-11 19:43:00 +01:00
|
|
|
io.Closer
|
2017-02-09 12:25:36 +01:00
|
|
|
}
|
|
|
|
|
2016-08-18 23:43:02 +02:00
|
|
|
// Cat any files to the io.Writer
|
2017-02-08 09:09:41 +01:00
|
|
|
//
|
|
|
|
// if offset == 0 it will be ignored
|
|
|
|
// if offset > 0 then the file will be seeked to that offset
|
|
|
|
// if offset < 0 then the file will be seeked that far from the end
|
|
|
|
//
|
|
|
|
// if count < 0 then it will be ignored
|
|
|
|
// if count >= 0 then only that many characters will be output
|
2019-06-17 10:34:30 +02:00
|
|
|
func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error {
|
2016-08-18 23:43:02 +02:00
|
|
|
var mu sync.Mutex
|
2019-06-17 10:34:30 +02:00
|
|
|
return ListFn(ctx, f, func(o fs.Object) {
|
2016-09-12 19:15:58 +02:00
|
|
|
var err error
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.Stats(ctx).NewTransfer(o)
|
2016-09-12 19:15:58 +02:00
|
|
|
defer func() {
|
2019-07-16 13:56:20 +02:00
|
|
|
tr.Done(err)
|
2016-09-12 19:15:58 +02:00
|
|
|
}()
|
2018-02-19 17:12:43 +01:00
|
|
|
opt := fs.RangeOption{Start: offset, End: -1}
|
2017-02-09 12:46:53 +01:00
|
|
|
size := o.Size()
|
2018-02-19 17:12:43 +01:00
|
|
|
if opt.Start < 0 {
|
|
|
|
opt.Start += size
|
|
|
|
}
|
|
|
|
if count >= 0 {
|
|
|
|
opt.End = opt.Start + count - 1
|
2017-02-08 09:09:41 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
var options []fs.OpenOption
|
2018-02-19 17:12:43 +01:00
|
|
|
if opt.Start > 0 || opt.End >= 0 {
|
|
|
|
options = append(options, &opt)
|
2017-02-08 09:09:41 +01:00
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
in, err := o.Open(ctx, options...)
|
2016-08-18 23:43:02 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(o, "Failed to open: %v", err)
|
2016-08-18 23:43:02 +02:00
|
|
|
return
|
|
|
|
}
|
2017-02-08 09:09:41 +01:00
|
|
|
if count >= 0 {
|
2017-02-09 12:25:36 +01:00
|
|
|
in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in}
|
2017-02-08 09:09:41 +01:00
|
|
|
}
|
2019-07-16 13:56:20 +02:00
|
|
|
in = tr.Account(in).WithBuffer() // account and buffer the transfer
|
2017-02-08 09:09:41 +01:00
|
|
|
// take the lock just before we output stuff, so at the last possible moment
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
2017-02-09 12:25:36 +01:00
|
|
|
_, err = io.Copy(w, in)
|
2016-08-18 23:43:02 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(o, "Failed to send to output: %v", err)
|
2016-08-18 23:43:02 +02:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2016-11-27 12:49:31 +01:00
|
|
|
|
2017-08-03 21:42:35 +02:00
|
|
|
// Rcat reads data from the Reader until EOF and uploads it to a file on remote
|
2019-06-17 10:34:30 +02:00
|
|
|
func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) {
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1)
|
2017-08-03 21:42:35 +02:00
|
|
|
defer func() {
|
2019-07-16 13:56:20 +02:00
|
|
|
tr.Done(err)
|
2017-08-03 21:42:35 +02:00
|
|
|
}()
|
2019-07-16 13:56:20 +02:00
|
|
|
in = tr.Account(in).WithBuffer()
|
2017-08-03 21:42:35 +02:00
|
|
|
|
2019-08-07 17:40:26 +02:00
|
|
|
hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash
|
|
|
|
hashOption := &fs.HashesOption{Hashes: hashes}
|
|
|
|
hash, err := hash.NewMultiHasherTypes(hashes)
|
2017-09-11 08:26:53 +02:00
|
|
|
if err != nil {
|
2017-09-16 22:49:08 +02:00
|
|
|
return nil, err
|
2017-09-11 08:26:53 +02:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
readCounter := readers.NewCountingReader(in)
|
2017-09-11 08:26:53 +02:00
|
|
|
trackingIn := io.TeeReader(readCounter, hash)
|
2017-09-11 08:25:34 +02:00
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
compare := func(dst fs.Object) error {
|
|
|
|
src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, hash.Sums(), fdst)
|
2019-06-17 10:34:30 +02:00
|
|
|
if !Equal(ctx, src, dst) {
|
2017-09-11 08:26:53 +02:00
|
|
|
err = errors.Errorf("corrupted on transfer")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(dst, "%v", err)
|
2017-09-11 08:26:53 +02:00
|
|
|
return err
|
2017-09-11 08:25:34 +02:00
|
|
|
}
|
2017-09-11 08:26:53 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// check if file small enough for direct upload
|
2018-01-12 17:30:54 +01:00
|
|
|
buf := make([]byte, fs.Config.StreamingUploadCutoff)
|
2017-09-11 08:26:53 +02:00
|
|
|
if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n)
|
|
|
|
src := object.NewMemoryObject(dstFileName, modTime, buf[:n])
|
2019-06-17 10:34:30 +02:00
|
|
|
return Copy(ctx, fdst, nil, dstFileName, src)
|
2017-09-11 08:25:34 +02:00
|
|
|
}
|
2017-11-11 19:43:00 +01:00
|
|
|
|
|
|
|
// Make a new ReadCloser with the bits we've already read
|
|
|
|
in = &readCloser{
|
|
|
|
Reader: io.MultiReader(bytes.NewReader(buf), trackingIn),
|
|
|
|
Closer: in,
|
|
|
|
}
|
2017-09-11 08:25:34 +02:00
|
|
|
|
2017-08-03 21:42:35 +02:00
|
|
|
fStreamTo := fdst
|
|
|
|
canStream := fdst.Features().PutStream != nil
|
|
|
|
if !canStream {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file")
|
|
|
|
tmpLocalFs, err := fs.TemporaryLocalFs()
|
2017-08-03 21:42:35 +02:00
|
|
|
if err != nil {
|
2017-09-16 22:49:08 +02:00
|
|
|
return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file")
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
|
|
|
defer func() {
|
2019-06-17 10:34:30 +02:00
|
|
|
err := Purge(ctx, tmpLocalFs, "")
|
2017-08-03 21:42:35 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err)
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
fStreamTo = tmpLocalFs
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf("stdin", "Not uploading as --dry-run")
|
2017-08-03 21:42:35 +02:00
|
|
|
// prevents "broken pipe" errors
|
|
|
|
_, err = io.Copy(ioutil.Discard, in)
|
2017-09-16 22:49:08 +02:00
|
|
|
return nil, err
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil)
|
2019-06-17 10:34:30 +02:00
|
|
|
if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, hashOption); err != nil {
|
2017-09-16 22:49:08 +02:00
|
|
|
return dst, err
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
2017-09-16 22:49:08 +02:00
|
|
|
if err = compare(dst); err != nil {
|
|
|
|
return dst, err
|
2017-09-11 08:26:53 +02:00
|
|
|
}
|
|
|
|
if !canStream {
|
2017-12-01 16:16:11 +01:00
|
|
|
// copy dst (which is the local object we have just streamed to) to the remote
|
2019-06-17 10:34:30 +02:00
|
|
|
return Copy(ctx, fdst, nil, dstFileName, dst)
|
2017-09-11 08:26:53 +02:00
|
|
|
}
|
2017-09-16 22:49:08 +02:00
|
|
|
return dst, nil
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
|
|
|
|
2018-03-29 09:10:19 +02:00
|
|
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
2019-06-17 10:34:30 +02:00
|
|
|
func PublicLink(ctx context.Context, f fs.Fs, remote string) (string, error) {
|
2018-03-29 09:10:19 +02:00
|
|
|
doPublicLink := f.Features().PublicLink
|
|
|
|
if doPublicLink == nil {
|
|
|
|
return "", errors.Errorf("%v doesn't support public links", f)
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return doPublicLink(ctx, remote)
|
2018-03-29 09:10:19 +02:00
|
|
|
}
|
|
|
|
|
2016-11-27 12:49:31 +01:00
|
|
|
// Rmdirs removes any empty directories (or directories only
|
|
|
|
// containing empty directories) under f, including f.
|
2019-06-17 10:34:30 +02:00
|
|
|
func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error {
|
2016-11-27 12:49:31 +01:00
|
|
|
dirEmpty := make(map[string]bool)
|
2018-10-27 00:47:23 +02:00
|
|
|
dirEmpty[dir] = !leaveRoot
|
2019-06-17 10:34:30 +02:00
|
|
|
err := walk.Walk(ctx, f, dir, true, fs.Config.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error {
|
2016-11-27 12:49:31 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(f, "Failed to list %q: %v", dirPath, err)
|
2017-02-24 23:51:01 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, entry := range entries {
|
|
|
|
switch x := entry.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-02-24 23:51:01 +01:00
|
|
|
// add a new directory as empty
|
2017-06-30 14:37:29 +02:00
|
|
|
dir := x.Remote()
|
2017-02-24 23:51:01 +01:00
|
|
|
_, found := dirEmpty[dir]
|
|
|
|
if !found {
|
|
|
|
dirEmpty[dir] = true
|
2016-11-27 12:49:31 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2017-02-24 23:51:01 +01:00
|
|
|
// mark the parents of the file as being non-empty
|
|
|
|
dir := x.Remote()
|
|
|
|
for dir != "" {
|
|
|
|
dir = path.Dir(dir)
|
|
|
|
if dir == "." || dir == "/" {
|
|
|
|
dir = ""
|
|
|
|
}
|
|
|
|
empty, found := dirEmpty[dir]
|
|
|
|
// End if we reach a directory which is non-empty
|
|
|
|
if found && !empty {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
dirEmpty[dir] = false
|
2016-11-27 12:49:31 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-24 23:51:01 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to rmdirs")
|
2016-11-27 12:49:31 +01:00
|
|
|
}
|
|
|
|
// Now delete the empty directories, starting from the longest path
|
|
|
|
var toDelete []string
|
|
|
|
for dir, empty := range dirEmpty {
|
|
|
|
if empty {
|
|
|
|
toDelete = append(toDelete, dir)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(toDelete)
|
|
|
|
for i := len(toDelete) - 1; i >= 0; i-- {
|
|
|
|
dir := toDelete[i]
|
2019-06-17 10:34:30 +02:00
|
|
|
err := TryRmdir(ctx, f, dir)
|
2016-11-27 12:49:31 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.CountError(err)
|
|
|
|
fs.Errorf(dir, "Failed to rmdir: %v", err)
|
2016-11-27 12:49:31 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2016-10-23 18:34:17 +02:00
|
|
|
|
2019-07-08 03:02:53 +02:00
|
|
|
// GetCompareDest sets up --compare-dest
|
|
|
|
func GetCompareDest() (CompareDest fs.Fs, err error) {
|
|
|
|
CompareDest, err = cache.Get(fs.Config.CompareDest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", fs.Config.CompareDest, err))
|
|
|
|
}
|
|
|
|
return CompareDest, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// compareDest checks --compare-dest to see if src needs to
|
|
|
|
// be copied
|
|
|
|
//
|
|
|
|
// Returns True if src is in --compare-dest
|
|
|
|
func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) {
|
|
|
|
var remote string
|
|
|
|
if dst == nil {
|
|
|
|
remote = src.Remote()
|
|
|
|
} else {
|
|
|
|
remote = dst.Remote()
|
|
|
|
}
|
|
|
|
CompareDestFile, err := CompareDest.NewObject(ctx, remote)
|
|
|
|
switch err {
|
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
return false, nil
|
|
|
|
case nil:
|
|
|
|
break
|
|
|
|
default:
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if Equal(ctx, src, CompareDestFile) {
|
|
|
|
fs.Debugf(src, "Destination found in --compare-dest, skipping")
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetCopyDest sets up --copy-dest
|
|
|
|
func GetCopyDest(fdst fs.Fs) (CopyDest fs.Fs, err error) {
|
|
|
|
CopyDest, err = cache.Get(fs.Config.CopyDest)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", fs.Config.CopyDest, err))
|
|
|
|
}
|
|
|
|
if !SameConfig(fdst, CopyDest) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination"))
|
|
|
|
}
|
|
|
|
if CopyDest.Features().Copy == nil {
|
|
|
|
return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server side copy"))
|
|
|
|
}
|
|
|
|
return CopyDest, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// copyDest checks --copy-dest to see if src needs to
|
|
|
|
// be copied
|
|
|
|
//
|
|
|
|
// Returns True if src was copied from --copy-dest
|
|
|
|
func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
|
|
|
|
var remote string
|
|
|
|
if dst == nil {
|
|
|
|
remote = src.Remote()
|
|
|
|
} else {
|
|
|
|
remote = dst.Remote()
|
|
|
|
}
|
|
|
|
CopyDestFile, err := CopyDest.NewObject(ctx, remote)
|
|
|
|
switch err {
|
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
return false, nil
|
|
|
|
case nil:
|
|
|
|
break
|
|
|
|
default:
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if equal(ctx, src, CopyDestFile, fs.Config.SizeOnly, fs.Config.CheckSum, false) {
|
|
|
|
if dst == nil || !Equal(ctx, src, dst) {
|
|
|
|
if dst != nil && backupDir != nil {
|
|
|
|
err = MoveBackupDir(ctx, backupDir, dst)
|
|
|
|
if err != nil {
|
|
|
|
return false, errors.Wrap(err, "moving to --backup-dir failed")
|
|
|
|
}
|
|
|
|
// If successful zero out the dstObj as it is no longer there
|
|
|
|
dst = nil
|
|
|
|
}
|
|
|
|
_, err := Copy(ctx, fdst, dst, remote, CopyDestFile)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(src, "Destination found in --copy-dest, error copying")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
fs.Debugf(src, "Destination found in --copy-dest, using server side copy")
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
fs.Debugf(src, "Unchanged skipping")
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
fs.Debugf(src, "Destination not found in --copy-dest")
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CompareOrCopyDest checks --compare-dest and --copy-dest to see if src
|
|
|
|
// does not need to be copied
|
|
|
|
//
|
|
|
|
// Returns True if src does not need to be copied
|
|
|
|
func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) {
|
|
|
|
if fs.Config.CompareDest != "" {
|
|
|
|
return compareDest(ctx, dst, src, CompareOrCopyDest)
|
|
|
|
} else if fs.Config.CopyDest != "" {
|
|
|
|
return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir)
|
|
|
|
}
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
// NeedTransfer checks to see if src needs to be copied to dst using
|
|
|
|
// the current config.
|
|
|
|
//
|
|
|
|
// Returns a flag which indicates whether the file needs to be
|
|
|
|
// transferred or not.
|
2019-06-17 10:34:30 +02:00
|
|
|
func NeedTransfer(ctx context.Context, dst, src fs.Object) bool {
|
2018-01-12 17:30:54 +01:00
|
|
|
if dst == nil {
|
2019-10-10 14:44:05 +02:00
|
|
|
fs.Debugf(src, "Need to transfer - File not found at Destination")
|
2018-01-12 17:30:54 +01:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
// If we should ignore existing files, don't transfer
|
|
|
|
if fs.Config.IgnoreExisting {
|
|
|
|
fs.Debugf(src, "Destination exists, skipping")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
// If we should upload unconditionally
|
|
|
|
if fs.Config.IgnoreTimes {
|
|
|
|
fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use")
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
// If UpdateOlder is in effect, skip if dst is newer than src
|
|
|
|
if fs.Config.UpdateOlder {
|
2019-06-17 10:34:30 +02:00
|
|
|
srcModTime := src.ModTime(ctx)
|
|
|
|
dstModTime := dst.ModTime(ctx)
|
2018-01-12 17:30:54 +01:00
|
|
|
dt := dstModTime.Sub(srcModTime)
|
|
|
|
// If have a mutually agreed precision then use that
|
2018-06-03 20:45:34 +02:00
|
|
|
modifyWindow := fs.GetModifyWindow(dst.Fs(), src.Fs())
|
2018-01-12 17:30:54 +01:00
|
|
|
if modifyWindow == fs.ModTimeNotSupported {
|
|
|
|
// Otherwise use 1 second as a safe default as
|
|
|
|
// the resolution of the time a file was
|
|
|
|
// uploaded.
|
|
|
|
modifyWindow = time.Second
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case dt >= modifyWindow:
|
|
|
|
fs.Debugf(src, "Destination is newer than source, skipping")
|
|
|
|
return false
|
|
|
|
case dt <= -modifyWindow:
|
|
|
|
fs.Debugf(src, "Destination is older than source, transferring")
|
|
|
|
default:
|
2019-09-04 12:47:26 +02:00
|
|
|
if !sizeDiffers(src, dst) {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Destination mod time is within %v of source and sizes identical, skipping", modifyWindow)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
fs.Debugf(src, "Destination mod time is within %v of source but sizes differ, transferring", modifyWindow)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Check to see if changed or not
|
2019-06-17 10:34:30 +02:00
|
|
|
if Equal(ctx, src, dst) {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Unchanged skipping")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:45:41 +02:00
|
|
|
// RcatSize reads data from the Reader until EOF and uploads it to a file on remote.
|
|
|
|
// Pass in size >=0 if known, <0 if not known
|
2019-06-17 10:34:30 +02:00
|
|
|
func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) {
|
2018-08-30 17:45:41 +02:00
|
|
|
var obj fs.Object
|
|
|
|
|
|
|
|
if size >= 0 {
|
2019-07-16 13:56:20 +02:00
|
|
|
var err error
|
2018-08-30 17:45:41 +02:00
|
|
|
// Size known use Put
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size)
|
2019-07-16 13:56:20 +02:00
|
|
|
defer func() {
|
|
|
|
tr.Done(err)
|
|
|
|
}()
|
|
|
|
body := ioutil.NopCloser(in) // we let the server close the body
|
|
|
|
in := tr.Account(body) // account the transfer (no buffering)
|
2019-01-04 20:31:09 +01:00
|
|
|
|
|
|
|
if fs.Config.DryRun {
|
|
|
|
fs.Logf("stdin", "Not uploading as --dry-run")
|
|
|
|
// prevents "broken pipe" errors
|
|
|
|
_, err = io.Copy(ioutil.Discard, in)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:45:41 +02:00
|
|
|
info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst)
|
2019-06-17 10:34:30 +02:00
|
|
|
obj, err = fdst.Put(ctx, in, info)
|
2018-08-30 17:45:41 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(dstFileName, "Post request put error: %v", err)
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Size unknown use Rcat
|
2019-06-17 10:34:30 +02:00
|
|
|
obj, err = Rcat(ctx, fdst, dstFileName, in, modTime)
|
2018-08-30 17:45:41 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(dstFileName, "Post request rcat error: %v", err)
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return obj, nil
|
|
|
|
}
|
|
|
|
|
2018-11-02 18:29:57 +01:00
|
|
|
// CopyURL copies the data from the url to (fdst, dstFileName)
|
2019-09-03 18:25:19 +02:00
|
|
|
func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool) (dst fs.Object, err error) {
|
2019-03-08 21:33:22 +01:00
|
|
|
client := fshttp.NewClient(fs.Config)
|
|
|
|
resp, err := client.Get(url)
|
2018-11-02 18:29:57 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer fs.CheckClose(resp.Body, &err)
|
2019-08-05 20:20:50 +02:00
|
|
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
|
|
|
return nil, errors.Errorf("CopyURL failed: %s", resp.Status)
|
|
|
|
}
|
2019-09-03 18:25:19 +02:00
|
|
|
|
|
|
|
if dstFileNameFromURL {
|
|
|
|
dstFileName = path.Base(resp.Request.URL.Path)
|
|
|
|
if dstFileName == "." || dstFileName == "/" {
|
|
|
|
return nil, errors.Errorf("CopyURL failed: file name wasn't found in url")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
return RcatSize(ctx, fdst, dstFileName, resp.Body, resp.ContentLength, time.Now())
|
2018-11-02 18:29:57 +01:00
|
|
|
}
|
|
|
|
|
2019-06-23 05:50:09 +02:00
|
|
|
// BackupDir returns the correctly configured --backup-dir
|
|
|
|
func BackupDir(fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) {
|
2019-06-23 05:52:09 +02:00
|
|
|
if fs.Config.BackupDir != "" {
|
|
|
|
backupDir, err = cache.Get(fs.Config.BackupDir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", fs.Config.BackupDir, err))
|
|
|
|
}
|
|
|
|
if !SameConfig(fdst, backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination"))
|
|
|
|
}
|
|
|
|
if srcFileName == "" {
|
|
|
|
if Overlapping(fdst, backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap"))
|
|
|
|
}
|
|
|
|
if Overlapping(fsrc, backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap"))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if fs.Config.Suffix == "" {
|
|
|
|
if SameDir(fdst, backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same"))
|
|
|
|
}
|
|
|
|
if SameDir(fsrc, backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same"))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if srcFileName == "" {
|
|
|
|
return nil, fserrors.FatalError(errors.New("--suffix must be used with a file or with --backup-dir"))
|
|
|
|
}
|
|
|
|
// --backup-dir is not set but --suffix is - use the destination as the backupDir
|
|
|
|
backupDir = fdst
|
2019-06-23 05:50:09 +02:00
|
|
|
}
|
|
|
|
if !CanServerSideMove(backupDir) {
|
|
|
|
return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server side move or copy"))
|
|
|
|
}
|
|
|
|
return backupDir, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MoveBackupDir moves a file to the backup dir
|
|
|
|
func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) {
|
|
|
|
remoteWithSuffix := SuffixName(dst.Remote())
|
|
|
|
overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix)
|
|
|
|
_, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-23 18:34:17 +02:00
|
|
|
// moveOrCopyFile moves or copies a single file possibly to a new name
|
2019-06-17 10:34:30 +02:00
|
|
|
func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) {
|
2017-10-12 21:45:36 +02:00
|
|
|
dstFilePath := path.Join(fdst.Root(), dstFileName)
|
|
|
|
srcFilePath := path.Join(fsrc.Root(), srcFileName)
|
|
|
|
if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName)
|
2017-05-27 17:30:26 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-23 18:34:17 +02:00
|
|
|
// Choose operations
|
|
|
|
Op := Move
|
|
|
|
if cp {
|
|
|
|
Op = Copy
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find src object
|
2019-06-17 10:34:30 +02:00
|
|
|
srcObj, err := fsrc.NewObject(ctx, srcFileName)
|
2016-10-23 18:34:17 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find dst object if it exists
|
2019-06-17 10:34:30 +02:00
|
|
|
dstObj, err := fdst.NewObject(ctx, dstFileName)
|
2018-01-12 17:30:54 +01:00
|
|
|
if err == fs.ErrorObjectNotFound {
|
2016-10-23 18:34:17 +02:00
|
|
|
dstObj = nil
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-10 12:01:13 +02:00
|
|
|
// Special case for changing case of a file on a case insensitive remote
|
|
|
|
// This will move the file to a temporary name then
|
|
|
|
// move it back to the intended destination. This is required
|
|
|
|
// to avoid issues with certain remotes and avoid file deletion.
|
|
|
|
if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) {
|
|
|
|
// Create random name to temporarily move file to
|
2019-08-06 13:44:08 +02:00
|
|
|
tmpObjName := dstFileName + "-rclone-move-" + random.String(8)
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err := fdst.NewObject(ctx, tmpObjName)
|
2019-06-10 12:01:13 +02:00
|
|
|
if err != fs.ErrorObjectNotFound {
|
|
|
|
if err == nil {
|
|
|
|
return errors.New("found an already existing file with a randomly generated name. Try the operation again")
|
|
|
|
}
|
|
|
|
return errors.Wrap(err, "error while attempting to move file to a temporary location")
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
tr := accounting.Stats(ctx).NewTransfer(srcObj)
|
2019-07-16 13:56:20 +02:00
|
|
|
defer func() {
|
|
|
|
tr.Done(err)
|
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj)
|
2019-06-10 12:01:13 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "error while moving file to temporary location")
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = Op(ctx, fdst, nil, dstFileName, tmpObj)
|
2019-06-10 12:01:13 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-08 03:02:53 +02:00
|
|
|
var backupDir, copyDestDir fs.Fs
|
|
|
|
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" {
|
|
|
|
backupDir, err = BackupDir(fdst, fsrc, srcFileName)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "creating Fs for --backup-dir failed")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if fs.Config.CompareDest != "" {
|
|
|
|
copyDestDir, err = GetCompareDest()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else if fs.Config.CopyDest != "" {
|
|
|
|
copyDestDir, err = GetCopyDest(fdst)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) {
|
2019-05-23 14:17:16 +02:00
|
|
|
// If destination already exists, then we must move it into --backup-dir if required
|
2019-07-08 03:02:53 +02:00
|
|
|
if dstObj != nil && backupDir != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
err = MoveBackupDir(ctx, backupDir, dstObj)
|
2019-05-23 14:17:16 +02:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "moving to --backup-dir failed")
|
|
|
|
}
|
|
|
|
// If successful zero out the dstObj as it is no longer there
|
|
|
|
dstObj = nil
|
|
|
|
}
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
2017-06-07 14:02:21 +02:00
|
|
|
} else {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
|
2017-06-07 14:02:21 +02:00
|
|
|
if !cp {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = DeleteFile(ctx, srcObj)
|
2017-06-07 14:02:21 +02:00
|
|
|
}
|
2019-07-22 21:11:46 +02:00
|
|
|
tr.Done(err)
|
2016-10-23 18:34:17 +02:00
|
|
|
}
|
2017-06-07 14:02:21 +02:00
|
|
|
return err
|
2016-10-23 18:34:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MoveFile moves a single file possibly to a new name
|
2019-06-17 10:34:30 +02:00
|
|
|
func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
|
|
|
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false)
|
2016-10-23 18:34:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// CopyFile moves a single file possibly to a new name
|
2019-06-17 10:34:30 +02:00
|
|
|
func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) {
|
|
|
|
return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true)
|
2016-10-23 18:34:17 +02:00
|
|
|
}
|
2018-01-06 15:39:31 +01:00
|
|
|
|
2018-09-11 03:59:48 +02:00
|
|
|
// SetTier changes tier of object in remote
|
2019-06-17 10:34:30 +02:00
|
|
|
func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error {
|
|
|
|
return ListFn(ctx, fsrc, func(o fs.Object) {
|
2018-09-11 03:59:48 +02:00
|
|
|
objImpl, ok := o.(fs.SetTierer)
|
|
|
|
if !ok {
|
|
|
|
fs.Errorf(fsrc, "Remote object does not implement SetTier")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err := objImpl.SetTier(tier)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(fsrc, "Failed to do SetTier, %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-01-06 15:39:31 +01:00
|
|
|
// ListFormat defines files information print format
|
|
|
|
type ListFormat struct {
|
|
|
|
separator string
|
|
|
|
dirSlash bool
|
2018-06-03 11:42:34 +02:00
|
|
|
absolute bool
|
2019-02-14 09:45:03 +01:00
|
|
|
output []func(entry *ListJSONItem) string
|
2018-05-13 13:15:05 +02:00
|
|
|
csv *csv.Writer
|
|
|
|
buf bytes.Buffer
|
2018-01-06 15:39:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SetSeparator changes separator in struct
|
|
|
|
func (l *ListFormat) SetSeparator(separator string) {
|
|
|
|
l.separator = separator
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetDirSlash defines if slash should be printed
|
|
|
|
func (l *ListFormat) SetDirSlash(dirSlash bool) {
|
|
|
|
l.dirSlash = dirSlash
|
|
|
|
}
|
|
|
|
|
2018-06-03 11:42:34 +02:00
|
|
|
// SetAbsolute prints a leading slash in front of path names
|
|
|
|
func (l *ListFormat) SetAbsolute(absolute bool) {
|
|
|
|
l.absolute = absolute
|
|
|
|
}
|
|
|
|
|
2018-05-13 13:15:05 +02:00
|
|
|
// SetCSV defines if the output should be csv
|
|
|
|
//
|
|
|
|
// Note that you should call SetSeparator before this if you want a
|
|
|
|
// custom separator
|
|
|
|
func (l *ListFormat) SetCSV(useCSV bool) {
|
|
|
|
if useCSV {
|
|
|
|
l.csv = csv.NewWriter(&l.buf)
|
|
|
|
if l.separator != "" {
|
|
|
|
l.csv.Comma = []rune(l.separator)[0]
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
l.csv = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-06 15:39:31 +01:00
|
|
|
// SetOutput sets functions used to create files information
|
2019-02-14 09:45:03 +01:00
|
|
|
func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) {
|
2018-01-06 15:39:31 +01:00
|
|
|
l.output = output
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddModTime adds file's Mod Time to output
|
|
|
|
func (l *ListFormat) AddModTime() {
|
2019-02-14 09:45:03 +01:00
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return entry.ModTime.When.Local().Format("2006-01-02 15:04:05")
|
|
|
|
})
|
2018-01-06 15:39:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddSize adds file's size to output
|
|
|
|
func (l *ListFormat) AddSize() {
|
2019-02-14 09:45:03 +01:00
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return strconv.FormatInt(entry.Size, 10)
|
2018-01-06 18:53:37 +01:00
|
|
|
})
|
2018-01-06 15:39:31 +01:00
|
|
|
}
|
|
|
|
|
2019-02-14 09:45:03 +01:00
|
|
|
// normalisePath makes sure the path has the correct slashes for the current mode
|
|
|
|
func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string {
|
|
|
|
if l.absolute && !strings.HasPrefix(remote, "/") {
|
|
|
|
remote = "/" + remote
|
|
|
|
}
|
|
|
|
if entry.IsDir && l.dirSlash {
|
|
|
|
remote += "/"
|
|
|
|
}
|
|
|
|
return remote
|
|
|
|
}
|
|
|
|
|
2018-01-06 15:39:31 +01:00
|
|
|
// AddPath adds path to file to output
|
|
|
|
func (l *ListFormat) AddPath() {
|
2019-02-14 09:45:03 +01:00
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return l.normalisePath(entry, entry.Path)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddEncrypted adds the encrypted path to file to output
|
|
|
|
func (l *ListFormat) AddEncrypted() {
|
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return l.normalisePath(entry, entry.Encrypted)
|
2018-01-06 15:39:31 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-01-06 18:53:37 +01:00
|
|
|
// AddHash adds the hash of the type given to the output
|
2018-01-12 17:30:54 +01:00
|
|
|
func (l *ListFormat) AddHash(ht hash.Type) {
|
2019-02-14 09:45:03 +01:00
|
|
|
hashName := ht.String()
|
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
if entry.IsDir {
|
2018-01-06 18:53:37 +01:00
|
|
|
return ""
|
|
|
|
}
|
2019-02-14 09:45:03 +01:00
|
|
|
return entry.Hashes[hashName]
|
2018-01-06 18:53:37 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-13 10:18:08 +02:00
|
|
|
// AddID adds file's ID to the output if known
|
|
|
|
func (l *ListFormat) AddID() {
|
2019-02-14 09:45:03 +01:00
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return entry.ID
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddOrigID adds file's Original ID to the output if known
|
|
|
|
func (l *ListFormat) AddOrigID() {
|
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return entry.OrigID
|
2018-05-13 10:18:08 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-20 13:45:06 +01:00
|
|
|
// AddTier adds file's Tier to the output if known
|
|
|
|
func (l *ListFormat) AddTier() {
|
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return entry.Tier
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-13 11:37:25 +02:00
|
|
|
// AddMimeType adds file's MimeType to the output if known
|
|
|
|
func (l *ListFormat) AddMimeType() {
|
2019-02-14 09:45:03 +01:00
|
|
|
l.AppendOutput(func(entry *ListJSONItem) string {
|
|
|
|
return entry.MimeType
|
2018-05-13 11:37:25 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-01-06 15:39:31 +01:00
|
|
|
// AppendOutput adds string generated by specific function to printed output
|
2019-02-14 09:45:03 +01:00
|
|
|
func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) {
|
2018-01-06 15:39:31 +01:00
|
|
|
l.output = append(l.output, functionToAppend)
|
|
|
|
}
|
|
|
|
|
2018-05-13 11:55:18 +02:00
|
|
|
// Format prints information about the DirEntry in the format defined
|
2019-02-14 09:45:03 +01:00
|
|
|
func (l *ListFormat) Format(entry *ListJSONItem) (result string) {
|
2018-05-13 13:15:05 +02:00
|
|
|
var out []string
|
2018-05-13 11:55:18 +02:00
|
|
|
for _, fun := range l.output {
|
2019-02-14 09:45:03 +01:00
|
|
|
out = append(out, fun(entry))
|
2018-05-13 13:15:05 +02:00
|
|
|
}
|
|
|
|
if l.csv != nil {
|
|
|
|
l.buf.Reset()
|
|
|
|
_ = l.csv.Write(out) // can't fail writing to bytes.Buffer
|
|
|
|
l.csv.Flush()
|
|
|
|
result = strings.TrimRight(l.buf.String(), "\n")
|
|
|
|
} else {
|
|
|
|
result = strings.Join(out, l.separator)
|
2018-01-06 15:39:31 +01:00
|
|
|
}
|
2018-05-13 13:15:05 +02:00
|
|
|
return result
|
2018-01-06 15:39:31 +01:00
|
|
|
}
|
2019-01-15 17:43:55 +01:00
|
|
|
|
|
|
|
// DirMove renames srcRemote to dstRemote
|
|
|
|
//
|
|
|
|
// It does this by loading the directory tree into memory (using ListR
|
|
|
|
// if available) and doing renames in parallel.
|
2019-06-17 10:34:30 +02:00
|
|
|
func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) {
|
2019-01-15 17:43:55 +01:00
|
|
|
// Use DirMove if possible
|
|
|
|
if doDirMove := f.Features().DirMove; doDirMove != nil {
|
2019-06-17 10:34:30 +02:00
|
|
|
return doDirMove(ctx, f, srcRemote, dstRemote)
|
2019-01-15 17:43:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Load the directory tree into memory
|
2019-06-17 10:34:30 +02:00
|
|
|
tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1)
|
2019-01-15 17:43:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "RenameDir tree walk")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the directories in sorted order
|
|
|
|
dirs := tree.Dirs()
|
|
|
|
|
|
|
|
// Make the destination directories - must be done in order not in parallel
|
|
|
|
for _, dir := range dirs {
|
|
|
|
dstPath := dstRemote + dir[len(srcRemote):]
|
2019-06-17 10:34:30 +02:00
|
|
|
err := f.Mkdir(ctx, dstPath)
|
2019-01-15 17:43:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "RenameDir mkdir")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rename the files in parallel
|
|
|
|
type rename struct {
|
|
|
|
o fs.Object
|
|
|
|
newPath string
|
|
|
|
}
|
|
|
|
renames := make(chan rename, fs.Config.Transfers)
|
2019-07-01 10:33:21 +02:00
|
|
|
g, gCtx := errgroup.WithContext(context.Background())
|
2019-01-15 17:43:55 +01:00
|
|
|
for i := 0; i < fs.Config.Transfers; i++ {
|
|
|
|
g.Go(func() error {
|
|
|
|
for job := range renames {
|
2019-07-01 10:33:21 +02:00
|
|
|
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
|
|
|
|
_, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o)
|
2019-01-15 17:43:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
select {
|
2019-07-01 10:33:21 +02:00
|
|
|
case <-gCtx.Done():
|
|
|
|
return gCtx.Err()
|
2019-01-15 17:43:55 +01:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
for dir, entries := range tree {
|
|
|
|
dstPath := dstRemote + dir[len(srcRemote):]
|
|
|
|
for _, entry := range entries {
|
|
|
|
if o, ok := entry.(fs.Object); ok {
|
|
|
|
renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
close(renames)
|
|
|
|
err = g.Wait()
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "RenameDir renames")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the source directories in reverse order
|
|
|
|
for i := len(dirs) - 1; i >= 0; i-- {
|
2019-06-17 10:34:30 +02:00
|
|
|
err := f.Rmdir(ctx, dirs[i])
|
2019-01-15 17:43:55 +01:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "RenameDir rmdir")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-08 10:19:07 +02:00
|
|
|
|
|
|
|
// FsInfo provides information about a remote
|
|
|
|
type FsInfo struct {
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
Name string
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
Root string
|
|
|
|
|
|
|
|
// String returns a description of the FS
|
|
|
|
String string
|
|
|
|
|
|
|
|
// Precision of the ModTimes in this Fs in Nanoseconds
|
|
|
|
Precision time.Duration
|
|
|
|
|
|
|
|
// Returns the supported hash types of the filesystem
|
|
|
|
Hashes []string
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
Features map[string]bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetFsInfo gets the information (FsInfo) about a given Fs
|
|
|
|
func GetFsInfo(f fs.Fs) *FsInfo {
|
|
|
|
info := &FsInfo{
|
|
|
|
Name: f.Name(),
|
|
|
|
Root: f.Root(),
|
|
|
|
String: f.String(),
|
|
|
|
Precision: f.Precision(),
|
|
|
|
Hashes: make([]string, 0, 4),
|
|
|
|
Features: f.Features().Enabled(),
|
|
|
|
}
|
|
|
|
for _, hashType := range f.Hashes().Array() {
|
|
|
|
info.Hashes = append(info.Hashes, hashType.String())
|
|
|
|
}
|
|
|
|
return info
|
|
|
|
}
|