2018-01-12 17:30:54 +01:00
|
|
|
// Package sync is the implementation of sync/copy/move
|
|
|
|
package sync
|
2016-07-04 14:12:33 +02:00
|
|
|
|
|
|
|
import (
|
2018-04-06 20:13:27 +02:00
|
|
|
"context"
|
2017-01-03 18:35:12 +01:00
|
|
|
"fmt"
|
2018-06-03 10:21:25 +02:00
|
|
|
"path"
|
2017-08-09 22:06:39 +02:00
|
|
|
"sort"
|
2020-03-20 14:04:56 +01:00
|
|
|
"strings"
|
2016-07-04 14:12:33 +02:00
|
|
|
"sync"
|
2019-07-25 12:28:27 +02:00
|
|
|
"time"
|
2016-11-05 19:03:55 +01:00
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/filter"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/march"
|
|
|
|
"github.com/rclone/rclone/fs/operations"
|
2016-07-04 14:12:33 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type syncCopyMove struct {
|
|
|
|
// parameters
|
2018-01-12 17:30:54 +01:00
|
|
|
fdst fs.Fs
|
|
|
|
fsrc fs.Fs
|
|
|
|
deleteMode fs.DeleteMode // how we are doing deletions
|
2017-11-27 12:42:02 +01:00
|
|
|
DoMove bool
|
2019-03-06 09:43:46 +01:00
|
|
|
copyEmptySrcDirs bool
|
2017-11-27 12:42:02 +01:00
|
|
|
deleteEmptySrcDirs bool
|
|
|
|
dir string
|
2016-07-04 14:12:33 +02:00
|
|
|
// internal state
|
2020-05-15 01:27:59 +02:00
|
|
|
ctx context.Context // internal context for controlling go-routines
|
|
|
|
cancel func() // cancel the context
|
|
|
|
noTraverse bool // if set don't traverse the dst
|
|
|
|
noCheckDest bool // if set transfer all objects regardless without checking dst
|
|
|
|
noUnicodeNormalization bool // don't normalize unicode characters in filenames
|
|
|
|
deletersWg sync.WaitGroup // for delete before go routine
|
|
|
|
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
|
|
|
|
trackRenames bool // set if we should do server side renames
|
|
|
|
trackRenamesStrategy trackRenamesStrategy // stratgies used for tracking renames
|
|
|
|
dstFilesMu sync.Mutex // protect dstFiles
|
|
|
|
dstFiles map[string]fs.Object // dst files, always filled
|
|
|
|
srcFiles map[string]fs.Object // src files, only used if deleteBefore
|
|
|
|
srcFilesChan chan fs.Object // passes src objects
|
|
|
|
srcFilesResult chan error // error result of src listing
|
|
|
|
dstFilesResult chan error // error result of dst listing
|
|
|
|
dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs
|
|
|
|
dstEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
|
|
|
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
|
|
|
|
srcEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
|
|
|
checkerWg sync.WaitGroup // wait for checkers
|
|
|
|
toBeChecked *pipe // checkers channel
|
|
|
|
transfersWg sync.WaitGroup // wait for transfers
|
|
|
|
toBeUploaded *pipe // copiers channel
|
|
|
|
errorMu sync.Mutex // Mutex covering the errors variables
|
|
|
|
err error // normal error from copy process
|
|
|
|
noRetryErr error // error with NoRetry set
|
|
|
|
fatalErr error // fatal error
|
|
|
|
commonHash hash.Type // common hash type between src and dst
|
2020-06-10 12:02:14 +02:00
|
|
|
modifyWindow time.Duration // modify window between fsrc, fdst
|
2020-05-15 01:27:59 +02:00
|
|
|
renameMapMu sync.Mutex // mutex to protect the below
|
|
|
|
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
|
|
|
|
renamerWg sync.WaitGroup // wait for renamers
|
|
|
|
toBeRenamed *pipe // renamers channel
|
|
|
|
trackRenamesWg sync.WaitGroup // wg for background track renames
|
|
|
|
trackRenamesCh chan fs.Object // objects are pumped in here
|
|
|
|
renameCheck []fs.Object // accumulate files to check for rename here
|
|
|
|
compareCopyDest fs.Fs // place to check for files to server side copy
|
|
|
|
backupDir fs.Fs // place to store overwrites/deletes
|
2020-05-15 12:39:07 +02:00
|
|
|
checkFirst bool // if set run all the checkers before starting transfers
|
2018-01-12 17:30:54 +01:00
|
|
|
}
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
type trackRenamesStrategy byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
trackRenamesStrategyHash trackRenamesStrategy = 1 << iota
|
|
|
|
trackRenamesStrategyModtime
|
|
|
|
)
|
|
|
|
|
|
|
|
func (strategy trackRenamesStrategy) hash() bool {
|
|
|
|
return (strategy & trackRenamesStrategyHash) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (strategy trackRenamesStrategy) modTime() bool {
|
|
|
|
return (strategy & trackRenamesStrategyModtime) != 0
|
|
|
|
}
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
2019-02-14 13:06:26 +01:00
|
|
|
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) {
|
|
|
|
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
s := &syncCopyMove{
|
2020-05-15 01:27:59 +02:00
|
|
|
fdst: fdst,
|
|
|
|
fsrc: fsrc,
|
|
|
|
deleteMode: deleteMode,
|
|
|
|
DoMove: DoMove,
|
|
|
|
copyEmptySrcDirs: copyEmptySrcDirs,
|
|
|
|
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
|
|
|
dir: "",
|
|
|
|
srcFilesChan: make(chan fs.Object, fs.Config.Checkers+fs.Config.Transfers),
|
|
|
|
srcFilesResult: make(chan error, 1),
|
|
|
|
dstFilesResult: make(chan error, 1),
|
|
|
|
dstEmptyDirs: make(map[string]fs.DirEntry),
|
|
|
|
srcEmptyDirs: make(map[string]fs.DirEntry),
|
|
|
|
noTraverse: fs.Config.NoTraverse,
|
|
|
|
noCheckDest: fs.Config.NoCheckDest,
|
|
|
|
noUnicodeNormalization: fs.Config.NoUnicodeNormalization,
|
|
|
|
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
|
|
|
|
trackRenames: fs.Config.TrackRenames,
|
|
|
|
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
|
2020-06-10 12:02:14 +02:00
|
|
|
modifyWindow: fs.GetModifyWindow(fsrc, fdst),
|
2020-05-15 01:27:59 +02:00
|
|
|
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
|
2020-05-15 12:39:07 +02:00
|
|
|
checkFirst: fs.Config.CheckFirst,
|
|
|
|
}
|
|
|
|
backlog := fs.Config.MaxBacklog
|
|
|
|
if s.checkFirst {
|
|
|
|
fs.Infof(s.fdst, "Running all checks before starting transfers")
|
|
|
|
backlog = -1
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2019-11-28 18:01:21 +01:00
|
|
|
var err error
|
2020-05-15 12:39:07 +02:00
|
|
|
s.toBeChecked, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-15 12:39:07 +02:00
|
|
|
s.toBeUploaded, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-15 12:39:07 +02:00
|
|
|
s.toBeRenamed, err = newPipe(fs.Config.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-07-25 12:28:27 +02:00
|
|
|
// If a max session duration has been defined add a deadline to the context
|
|
|
|
if fs.Config.MaxDuration > 0 {
|
|
|
|
endTime := time.Now().Add(fs.Config.MaxDuration)
|
|
|
|
fs.Infof(s.fdst, "Transfer session deadline: %s", endTime.Format("2006/01/02 15:04:05"))
|
|
|
|
s.ctx, s.cancel = context.WithDeadline(ctx, endTime)
|
|
|
|
} else {
|
|
|
|
s.ctx, s.cancel = context.WithCancel(ctx)
|
|
|
|
}
|
2018-11-25 17:49:38 +01:00
|
|
|
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
|
|
|
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
|
|
|
s.noTraverse = false
|
|
|
|
}
|
2020-03-21 18:35:34 +01:00
|
|
|
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(fs.Config.TrackRenamesStrategy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-17 18:41:11 +02:00
|
|
|
if s.noCheckDest {
|
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
|
|
|
return nil, errors.New("can't use --no-check-dest with sync: use copy instead")
|
|
|
|
}
|
|
|
|
if fs.Config.Immutable {
|
|
|
|
return nil, errors.New("can't use --no-check-dest with --immutable")
|
|
|
|
}
|
|
|
|
if s.backupDir != nil {
|
|
|
|
return nil, errors.New("can't use --no-check-dest with --backup-dir")
|
|
|
|
}
|
|
|
|
}
|
2017-01-02 17:37:59 +01:00
|
|
|
if s.trackRenames {
|
2017-01-10 21:03:55 +01:00
|
|
|
// Don't track renames for remotes without server-side move support.
|
2018-01-12 17:30:54 +01:00
|
|
|
if !operations.CanServerSideMove(fdst) {
|
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
|
2017-01-02 17:37:59 +01:00
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2020-03-21 18:35:34 +01:00
|
|
|
if s.trackRenamesStrategy.hash() && s.commonHash == hash.None {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
|
2017-01-02 17:37:59 +01:00
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-06-10 12:02:14 +02:00
|
|
|
if s.trackRenamesStrategy.modTime() && s.modifyWindow == fs.ModTimeNotSupported {
|
2020-03-20 14:04:56 +01:00
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as either the source or destination do not support modtime")
|
|
|
|
s.trackRenames = false
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:31:23 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeOff {
|
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as it doesn't work with copy or move, only sync")
|
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2017-01-02 17:37:59 +01:00
|
|
|
}
|
2017-01-25 20:35:14 +01:00
|
|
|
if s.trackRenames {
|
|
|
|
// track renames needs delete after
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
|
|
|
s.deleteMode = fs.DeleteModeAfter
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
2018-11-25 17:49:38 +01:00
|
|
|
if s.noTraverse {
|
|
|
|
fs.Errorf(nil, "Ignoring --no-traverse with --track-renames")
|
|
|
|
s.noTraverse = false
|
|
|
|
}
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
// Make Fs for --backup-dir if required
|
2019-06-23 05:52:09 +02:00
|
|
|
if fs.Config.BackupDir != "" || fs.Config.Suffix != "" {
|
2017-01-10 22:47:03 +01:00
|
|
|
var err error
|
2019-06-23 05:50:09 +02:00
|
|
|
s.backupDir, err = operations.BackupDir(fdst, fsrc, "")
|
2017-01-10 22:47:03 +01:00
|
|
|
if err != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
return nil, err
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
|
|
|
}
|
2019-07-08 03:02:53 +02:00
|
|
|
if fs.Config.CompareDest != "" {
|
|
|
|
var err error
|
|
|
|
s.compareCopyDest, err = operations.GetCompareDest()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else if fs.Config.CopyDest != "" {
|
|
|
|
var err error
|
|
|
|
s.compareCopyDest, err = operations.GetCopyDest(fdst)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
return s, nil
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-09-01 16:47:32 +02:00
|
|
|
// Check to see if the context has been cancelled
|
2016-07-04 14:12:33 +02:00
|
|
|
func (s *syncCopyMove) aborting() bool {
|
2018-07-19 23:41:34 +02:00
|
|
|
return s.ctx.Err() != nil
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// This reads the map and pumps it into the channel passed in, closing
|
|
|
|
// the channel at the end
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) {
|
2016-07-04 14:12:33 +02:00
|
|
|
outer:
|
2017-01-03 18:35:12 +01:00
|
|
|
for _, o := range files {
|
2016-07-04 14:12:33 +02:00
|
|
|
if s.aborting() {
|
|
|
|
break outer
|
|
|
|
}
|
|
|
|
select {
|
2017-01-03 18:35:12 +01:00
|
|
|
case out <- o:
|
2017-09-01 16:47:32 +02:00
|
|
|
case <-s.ctx.Done():
|
2016-07-04 14:12:33 +02:00
|
|
|
break outer
|
|
|
|
}
|
|
|
|
}
|
2017-01-03 18:35:12 +01:00
|
|
|
close(out)
|
|
|
|
s.srcFilesResult <- nil
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// This checks the types of errors returned while copying files
|
|
|
|
func (s *syncCopyMove) processError(err error) {
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-07-25 12:28:27 +02:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
err = fserrors.NoRetryError(err)
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
s.errorMu.Lock()
|
|
|
|
defer s.errorMu.Unlock()
|
|
|
|
switch {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fserrors.IsFatalError(err):
|
2016-08-18 19:56:57 +02:00
|
|
|
if !s.aborting() {
|
2018-05-03 15:45:28 +02:00
|
|
|
fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err)
|
2017-09-01 16:47:32 +02:00
|
|
|
s.cancel()
|
2016-08-18 19:56:57 +02:00
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
s.fatalErr = err
|
2018-01-12 17:30:54 +01:00
|
|
|
case fserrors.IsNoRetryError(err):
|
2016-07-04 14:12:33 +02:00
|
|
|
s.noRetryErr = err
|
|
|
|
default:
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-30 14:06:24 +02:00
|
|
|
// Returns the current error (if any) in the order of precedence
|
2017-01-03 18:35:12 +01:00
|
|
|
// fatalErr
|
|
|
|
// normal error
|
|
|
|
// noRetryErr
|
|
|
|
func (s *syncCopyMove) currentError() error {
|
|
|
|
s.errorMu.Lock()
|
|
|
|
defer s.errorMu.Unlock()
|
|
|
|
if s.fatalErr != nil {
|
|
|
|
return s.fatalErr
|
|
|
|
}
|
|
|
|
if s.err != nil {
|
|
|
|
return s.err
|
|
|
|
}
|
|
|
|
return s.noRetryErr
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// pairChecker reads Objects~s on in send to out if they need transferring.
|
|
|
|
//
|
|
|
|
// FIXME potentially doing lots of hashes at once
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
2016-07-04 14:12:33 +02:00
|
|
|
defer wg.Done()
|
|
|
|
for {
|
2020-03-13 22:12:22 +01:00
|
|
|
pair, ok := in.GetMax(s.ctx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2016-07-04 14:12:33 +02:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
2019-07-22 21:11:46 +02:00
|
|
|
var err error
|
|
|
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
|
2018-07-19 23:41:34 +02:00
|
|
|
// Check to see if can store this
|
|
|
|
if src.Storable() {
|
2019-07-08 03:02:53 +02:00
|
|
|
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
|
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
|
|
|
}
|
|
|
|
if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) {
|
2018-07-19 23:41:34 +02:00
|
|
|
// If files are treated as immutable, fail if destination exists and does not match
|
|
|
|
if fs.Config.Immutable && pair.Dst != nil {
|
|
|
|
fs.Errorf(pair.Dst, "Source and destination exist but do not match: immutable file modified")
|
|
|
|
s.processError(fs.ErrorImmutableModified)
|
|
|
|
} else {
|
|
|
|
// If destination already exists, then we must move it into --backup-dir if required
|
|
|
|
if pair.Dst != nil && s.backupDir != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
err := operations.MoveBackupDir(s.ctx, s.backupDir, pair.Dst)
|
2018-07-19 23:41:34 +02:00
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
2017-01-10 22:47:03 +01:00
|
|
|
} else {
|
2018-07-19 23:41:34 +02:00
|
|
|
// If successful zero out the dst as it is no longer there and copy the file
|
|
|
|
pair.Dst = nil
|
|
|
|
ok = out.Put(s.ctx, pair)
|
|
|
|
if !ok {
|
2018-04-21 23:01:27 +02:00
|
|
|
return
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
} else {
|
|
|
|
ok = out.Put(s.ctx, pair)
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If moving need to delete the files we don't need to copy
|
|
|
|
if s.DoMove {
|
|
|
|
// Delete src if no error on copy
|
2019-06-17 10:34:30 +02:00
|
|
|
s.processError(operations.DeleteFile(s.ctx, src))
|
2016-10-03 20:58:44 +02:00
|
|
|
}
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2019-07-22 21:11:46 +02:00
|
|
|
tr.Done(err)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// pairRenamer reads Objects~s on in and attempts to rename them,
|
|
|
|
// otherwise it sends them out if they need transferring.
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
2017-01-03 18:35:12 +01:00
|
|
|
defer wg.Done()
|
|
|
|
for {
|
2020-03-13 22:12:22 +01:00
|
|
|
pair, ok := in.GetMax(s.ctx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2017-01-03 18:35:12 +01:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
|
|
|
if !s.tryRename(src) {
|
|
|
|
// pass on if not renamed
|
|
|
|
ok = out.Put(s.ctx, pair)
|
2017-01-03 18:35:12 +01:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-22 18:53:10 +02:00
|
|
|
// pairCopyOrMove reads Objects on in and moves or copies them.
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, fraction int, wg *sync.WaitGroup) {
|
2016-07-04 14:12:33 +02:00
|
|
|
defer wg.Done()
|
2016-10-22 18:53:10 +02:00
|
|
|
var err error
|
2016-07-04 14:12:33 +02:00
|
|
|
for {
|
2020-03-13 22:12:22 +01:00
|
|
|
pair, ok := in.GetMax(s.ctx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2016-07-04 14:12:33 +02:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
|
|
|
if s.DoMove {
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = operations.Move(ctx, fdst, pair.Dst, src.Remote(), src)
|
2018-07-19 23:41:34 +02:00
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err = operations.Copy(ctx, fdst, pair.Dst, src.Remote(), src)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
s.processError(err)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This starts the background checkers.
|
|
|
|
func (s *syncCopyMove) startCheckers() {
|
2018-01-12 17:30:54 +01:00
|
|
|
s.checkerWg.Add(fs.Config.Checkers)
|
|
|
|
for i := 0; i < fs.Config.Checkers; i++ {
|
2020-03-13 22:12:22 +01:00
|
|
|
fraction := (100 * i) / fs.Config.Checkers
|
|
|
|
go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background checkers
|
|
|
|
func (s *syncCopyMove) stopCheckers() {
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeChecked.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for checks to finish")
|
2016-07-04 14:12:33 +02:00
|
|
|
s.checkerWg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This starts the background transfers
|
|
|
|
func (s *syncCopyMove) startTransfers() {
|
2018-01-12 17:30:54 +01:00
|
|
|
s.transfersWg.Add(fs.Config.Transfers)
|
|
|
|
for i := 0; i < fs.Config.Transfers; i++ {
|
2020-03-13 22:12:22 +01:00
|
|
|
fraction := (100 * i) / fs.Config.Transfers
|
|
|
|
go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background transfers
|
|
|
|
func (s *syncCopyMove) stopTransfers() {
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeUploaded.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
2016-10-22 18:53:10 +02:00
|
|
|
s.transfersWg.Wait()
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// This starts the background renamers.
|
|
|
|
func (s *syncCopyMove) startRenamers() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
s.renamerWg.Add(fs.Config.Checkers)
|
|
|
|
for i := 0; i < fs.Config.Checkers; i++ {
|
2020-03-13 22:12:22 +01:00
|
|
|
fraction := (100 * i) / fs.Config.Checkers
|
|
|
|
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg)
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background renamers
|
|
|
|
func (s *syncCopyMove) stopRenamers() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeRenamed.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for renames to finish")
|
2017-01-03 18:35:12 +01:00
|
|
|
s.renamerWg.Wait()
|
|
|
|
}
|
|
|
|
|
2017-01-24 12:04:09 +01:00
|
|
|
// This starts the collection of possible renames
|
|
|
|
func (s *syncCopyMove) startTrackRenames() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.trackRenamesWg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer s.trackRenamesWg.Done()
|
|
|
|
for o := range s.trackRenamesCh {
|
|
|
|
s.renameCheck = append(s.renameCheck, o)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background rename collection
|
|
|
|
func (s *syncCopyMove) stopTrackRenames() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(s.trackRenamesCh)
|
|
|
|
s.trackRenamesWg.Wait()
|
|
|
|
}
|
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
// This starts the background deletion of files for --delete-during
|
|
|
|
func (s *syncCopyMove) startDeleters() {
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
2017-01-25 20:59:53 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
s.deletersWg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer s.deletersWg.Done()
|
2019-06-17 10:34:30 +02:00
|
|
|
err := operations.DeleteFilesWithBackupDir(s.ctx, s.deleteFilesCh, s.backupDir)
|
2017-01-25 20:59:53 +01:00
|
|
|
s.processError(err)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background deleters
|
|
|
|
func (s *syncCopyMove) stopDeleters() {
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
2017-01-25 20:59:53 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
close(s.deleteFilesCh)
|
|
|
|
s.deletersWg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
|
|
|
// then it checks to see if they exist first in srcFiles the source
|
|
|
|
// file map, otherwise it unconditionally deletes them. If
|
|
|
|
// checkSrcMap is clear then it assumes that the any source files that
|
|
|
|
// have been found have been removed from dstFiles already.
|
|
|
|
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
2019-07-18 12:13:54 +02:00
|
|
|
if accounting.Stats(s.ctx).Errored() && !fs.Config.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
|
|
|
return fs.ErrorNotDeleting
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the spare files
|
2018-01-12 17:30:54 +01:00
|
|
|
toDelete := make(fs.ObjectsChan, fs.Config.Transfers)
|
2016-07-04 14:12:33 +02:00
|
|
|
go func() {
|
2018-04-21 23:01:27 +02:00
|
|
|
outer:
|
2016-07-04 14:12:33 +02:00
|
|
|
for remote, o := range s.dstFiles {
|
|
|
|
if checkSrcMap {
|
|
|
|
_, exists := s.srcFiles[remote]
|
|
|
|
if exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.aborting() {
|
|
|
|
break
|
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
break outer
|
|
|
|
case toDelete <- o:
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
close(toDelete)
|
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
return operations.DeleteFilesWithBackupDir(s.ctx, toDelete, s.backupDir)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-08-09 22:06:39 +02:00
|
|
|
// This deletes the empty directories in the slice passed in. It
|
|
|
|
// ignores any errors deleting directories
|
2019-06-17 10:34:30 +02:00
|
|
|
func deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error {
|
2018-05-14 19:16:56 +02:00
|
|
|
if len(entriesMap) == 0 {
|
2017-08-09 22:06:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
2019-07-18 12:13:54 +02:00
|
|
|
if accounting.Stats(ctx).Errored() && !fs.Config.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
|
|
|
|
return fs.ErrorNotDeletingDirs
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
|
2018-05-14 19:16:56 +02:00
|
|
|
var entries fs.DirEntries
|
|
|
|
for _, entry := range entriesMap {
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
2017-08-09 22:06:39 +02:00
|
|
|
// Now delete the empty directories starting from the longest path
|
|
|
|
sort.Sort(entries)
|
|
|
|
var errorCount int
|
|
|
|
var okCount int
|
|
|
|
for i := len(entries) - 1; i >= 0; i-- {
|
|
|
|
entry := entries[i]
|
2018-01-12 17:30:54 +01:00
|
|
|
dir, ok := entry.(fs.Directory)
|
2017-08-09 22:06:39 +02:00
|
|
|
if ok {
|
|
|
|
// TryRmdir only deletes empty directories
|
2019-06-17 10:34:30 +02:00
|
|
|
err := operations.TryRmdir(ctx, f, dir.Remote())
|
2017-08-09 22:06:39 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
|
2017-08-09 22:06:39 +02:00
|
|
|
errorCount++
|
|
|
|
} else {
|
|
|
|
okCount++
|
|
|
|
}
|
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(f, "Not a directory: %v", entry)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if errorCount > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(f, "failed to delete %d directories", errorCount)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
if okCount > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(f, "deleted %d directories", okCount)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-03 10:21:25 +02:00
|
|
|
// This copies the empty directories in the slice passed in and logs
|
|
|
|
// any errors copying the directories
|
2019-06-17 10:34:30 +02:00
|
|
|
func copyEmptyDirectories(ctx context.Context, f fs.Fs, entries map[string]fs.DirEntry) error {
|
2018-06-03 10:21:25 +02:00
|
|
|
if len(entries) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var okCount int
|
|
|
|
for _, entry := range entries {
|
|
|
|
dir, ok := entry.(fs.Directory)
|
|
|
|
if ok {
|
2019-06-17 10:34:30 +02:00
|
|
|
err := operations.Mkdir(ctx, f, dir.Remote())
|
2018-06-03 10:21:25 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(fs.LogDirName(f, dir.Remote()), "Failed to Mkdir: %v", err)
|
|
|
|
} else {
|
|
|
|
okCount++
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fs.Errorf(f, "Not a directory: %v", entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-18 12:13:54 +02:00
|
|
|
if accounting.Stats(ctx).Errored() {
|
|
|
|
fs.Debugf(f, "failed to copy %d directories", accounting.Stats(ctx).GetErrors())
|
2018-06-03 10:21:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if okCount > 0 {
|
|
|
|
fs.Debugf(f, "copied %d directories", okCount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-07-17 07:43:58 +02:00
|
|
|
func (s *syncCopyMove) srcParentDirCheck(entry fs.DirEntry) {
|
|
|
|
// If we are moving files then we don't want to remove directories with files in them
|
|
|
|
// from the srcEmptyDirs as we are about to move them making the directory empty.
|
|
|
|
if s.DoMove {
|
|
|
|
return
|
|
|
|
}
|
2018-06-03 10:21:25 +02:00
|
|
|
parentDir := path.Dir(entry.Remote())
|
|
|
|
if parentDir == "." {
|
|
|
|
parentDir = ""
|
|
|
|
}
|
2018-07-17 07:43:58 +02:00
|
|
|
if _, ok := s.srcEmptyDirs[parentDir]; ok {
|
|
|
|
delete(s.srcEmptyDirs, parentDir)
|
2018-05-14 19:16:56 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
// parseTrackRenamesStrategy turns a config string into a trackRenamesStrategy
|
|
|
|
func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy, err error) {
|
|
|
|
if len(strategies) == 0 {
|
|
|
|
return strategy, nil
|
|
|
|
}
|
|
|
|
for _, s := range strings.Split(strategies, ",") {
|
|
|
|
switch s {
|
|
|
|
case "hash":
|
|
|
|
strategy |= trackRenamesStrategyHash
|
|
|
|
case "modtime":
|
|
|
|
strategy |= trackRenamesStrategyModtime
|
|
|
|
case "size":
|
|
|
|
// ignore
|
|
|
|
default:
|
|
|
|
return strategy, errors.Errorf("unknown track renames strategy %q", s)
|
2020-03-20 14:04:56 +01:00
|
|
|
}
|
|
|
|
}
|
2020-03-21 18:35:34 +01:00
|
|
|
return strategy, nil
|
2020-03-20 14:04:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// renameID makes a string with the size and the other identifiers of the requested rename strategies
|
2017-01-03 18:35:12 +01:00
|
|
|
//
|
|
|
|
// it may return an empty string in which case no hash could be made
|
2020-03-21 18:35:34 +01:00
|
|
|
func (s *syncCopyMove) renameID(obj fs.Object, renamesStrategy trackRenamesStrategy, precision time.Duration) string {
|
2020-03-20 14:04:56 +01:00
|
|
|
var builder strings.Builder
|
|
|
|
|
|
|
|
fmt.Fprintf(&builder, "%d", obj.Size())
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
if renamesStrategy.hash() {
|
2020-03-20 14:04:56 +01:00
|
|
|
var err error
|
|
|
|
hash, err := obj.Hash(s.ctx, s.commonHash)
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(obj, "Hash failed: %v", err)
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if hash == "" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Fprintf(&builder, ",%s", hash)
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-06-10 12:02:14 +02:00
|
|
|
// for renamesStrategy.modTime() we don't add to the hash but we check the times in
|
|
|
|
// popRenameMap
|
2020-03-20 14:04:56 +01:00
|
|
|
|
|
|
|
return builder.String()
|
2017-01-04 00:03:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// pushRenameMap adds the object with hash to the rename map
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
|
2017-01-04 00:03:20 +01:00
|
|
|
s.renameMapMu.Lock()
|
|
|
|
s.renameMap[hash] = append(s.renameMap[hash], obj)
|
|
|
|
s.renameMapMu.Unlock()
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// popRenameMap finds the object with hash and pop the first match from
|
|
|
|
// renameMap or returns nil if not found.
|
2020-06-10 12:02:14 +02:00
|
|
|
func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) {
|
2017-01-04 00:03:20 +01:00
|
|
|
s.renameMapMu.Lock()
|
|
|
|
dsts, ok := s.renameMap[hash]
|
|
|
|
if ok && len(dsts) > 0 {
|
2020-06-10 12:02:14 +02:00
|
|
|
// Element to remove
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
// If using track renames strategy modtime then we need to check the modtimes here
|
|
|
|
if s.trackRenamesStrategy.modTime() {
|
|
|
|
i = -1
|
|
|
|
srcModTime := src.ModTime(s.ctx)
|
|
|
|
for j, dst := range dsts {
|
|
|
|
dstModTime := dst.ModTime(s.ctx)
|
|
|
|
dt := dstModTime.Sub(srcModTime)
|
|
|
|
if dt < s.modifyWindow && dt > -s.modifyWindow {
|
|
|
|
i = j
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If nothing matched then return nil
|
|
|
|
if i < 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the entry and return it
|
|
|
|
dst = dsts[i]
|
|
|
|
dsts = append(dsts[:i], dsts[i+1:]...)
|
2017-01-04 00:03:20 +01:00
|
|
|
if len(dsts) > 0 {
|
|
|
|
s.renameMap[hash] = dsts
|
|
|
|
} else {
|
|
|
|
delete(s.renameMap, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.renameMapMu.Unlock()
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeRenameMap builds a map of the destination files by hash that
|
2017-01-24 12:04:09 +01:00
|
|
|
// match sizes in the slice of objects in s.renameCheck
|
|
|
|
func (s *syncCopyMove) makeRenameMap() {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(s.fdst, "Making map for --track-renames")
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// first make a map of possible sizes we need to check
|
|
|
|
possibleSizes := map[int64]struct{}{}
|
2017-01-24 12:04:09 +01:00
|
|
|
for _, obj := range s.renameCheck {
|
2017-01-04 00:03:20 +01:00
|
|
|
possibleSizes[obj.Size()] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pump all the dstFiles into in
|
2018-01-12 17:30:54 +01:00
|
|
|
in := make(chan fs.Object, fs.Config.Checkers)
|
2017-01-03 18:35:12 +01:00
|
|
|
go s.pumpMapToChan(s.dstFiles, in)
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// now make a map of size,hash for all dstFiles
|
2018-01-12 17:30:54 +01:00
|
|
|
s.renameMap = make(map[string][]fs.Object)
|
2017-01-03 18:35:12 +01:00
|
|
|
var wg sync.WaitGroup
|
2018-01-12 17:30:54 +01:00
|
|
|
wg.Add(fs.Config.Transfers)
|
|
|
|
for i := 0; i < fs.Config.Transfers; i++ {
|
2016-12-18 11:03:56 +01:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2017-01-04 00:03:20 +01:00
|
|
|
for obj := range in {
|
2018-01-12 17:30:54 +01:00
|
|
|
// only create hash for dst fs.Object if its size could match
|
2017-01-04 00:03:20 +01:00
|
|
|
if _, found := possibleSizes[obj.Size()]; found {
|
2019-07-22 21:11:46 +02:00
|
|
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj)
|
2020-06-10 12:02:14 +02:00
|
|
|
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
if hash != "" {
|
|
|
|
s.pushRenameMap(hash, obj)
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2019-07-22 21:11:46 +02:00
|
|
|
tr.Done(nil)
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(s.fdst, "Finished making map for --track-renames")
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
|
|
|
|
2020-05-25 08:05:53 +02:00
|
|
|
// tryRename renames an src object when doing track renames if
|
2017-01-04 00:03:20 +01:00
|
|
|
// possible, it returns true if the object was renamed.
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) tryRename(src fs.Object) bool {
|
2017-02-22 20:28:22 +01:00
|
|
|
// Calculate the hash of the src object
|
2020-03-20 14:04:56 +01:00
|
|
|
hash := s.renameID(src, s.trackRenamesStrategy, fs.GetModifyWindow(s.fsrc, s.fdst))
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
if hash == "" {
|
2017-01-04 00:03:20 +01:00
|
|
|
return false
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-04 00:03:20 +01:00
|
|
|
|
2017-02-22 20:28:22 +01:00
|
|
|
// Get a match on fdst
|
2020-06-10 12:02:14 +02:00
|
|
|
dst := s.popRenameMap(hash, src)
|
2017-01-04 00:03:20 +01:00
|
|
|
if dst == nil {
|
|
|
|
return false
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-04 00:03:20 +01:00
|
|
|
|
2017-02-22 20:28:22 +01:00
|
|
|
// Find dst object we are about to overwrite if it exists
|
2019-06-17 10:34:30 +02:00
|
|
|
dstOverwritten, _ := s.fdst.NewObject(s.ctx, src.Remote())
|
2017-02-22 20:28:22 +01:00
|
|
|
|
|
|
|
// Rename dst to have name src.Remote()
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err := operations.Move(s.ctx, s.fdst, dstOverwritten, src.Remote(), dst)
|
2017-01-04 00:03:20 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
|
2017-01-04 00:03:20 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove file from dstFiles if present
|
|
|
|
s.dstFilesMu.Lock()
|
|
|
|
delete(s.dstFiles, dst.Remote())
|
|
|
|
s.dstFilesMu.Unlock()
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(src, "Renamed from %q", dst.Remote())
|
2017-01-04 00:03:20 +01:00
|
|
|
return true
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
// Syncs fsrc into fdst
|
|
|
|
//
|
|
|
|
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
|
|
|
//
|
|
|
|
// If DoMove is true then files will be moved instead of copied
|
|
|
|
//
|
|
|
|
// dir is the start directory, "" for root
|
2017-06-13 15:35:51 +02:00
|
|
|
func (s *syncCopyMove) run() error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if operations.Same(s.fdst, s.fsrc) {
|
|
|
|
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
|
2017-01-25 20:59:53 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start background checking and transferring pipeline
|
|
|
|
s.startCheckers()
|
|
|
|
s.startRenamers()
|
2020-05-15 12:39:07 +02:00
|
|
|
if !s.checkFirst {
|
|
|
|
s.startTransfers()
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
s.startDeleters()
|
2018-01-12 17:30:54 +01:00
|
|
|
s.dstFiles = make(map[string]fs.Object)
|
2017-01-25 20:59:53 +01:00
|
|
|
|
|
|
|
s.startTrackRenames()
|
|
|
|
|
2017-09-01 16:47:32 +02:00
|
|
|
// set up a march over fdst and fsrc
|
2018-11-25 18:26:58 +01:00
|
|
|
m := &march.March{
|
2020-05-15 01:27:59 +02:00
|
|
|
Ctx: s.ctx,
|
|
|
|
Fdst: s.fdst,
|
|
|
|
Fsrc: s.fsrc,
|
|
|
|
Dir: s.dir,
|
|
|
|
NoTraverse: s.noTraverse,
|
|
|
|
Callback: s,
|
|
|
|
DstIncludeAll: filter.Active.Opt.DeleteExcluded,
|
|
|
|
NoCheckDest: s.noCheckDest,
|
|
|
|
NoUnicodeNormalization: s.noUnicodeNormalization,
|
2018-11-25 18:26:58 +01:00
|
|
|
}
|
2019-06-20 13:50:25 +02:00
|
|
|
s.processError(m.Run())
|
2017-01-25 20:59:53 +01:00
|
|
|
|
|
|
|
s.stopTrackRenames()
|
|
|
|
if s.trackRenames {
|
|
|
|
// Build the map of the remaining dstFiles by hash
|
|
|
|
s.makeRenameMap()
|
|
|
|
// Attempt renames for all the files which don't have a matching dst
|
|
|
|
for _, src := range s.renameCheck {
|
2018-07-19 23:41:34 +02:00
|
|
|
ok := s.toBeRenamed.Put(s.ctx, fs.ObjectPair{Src: src, Dst: nil})
|
|
|
|
if !ok {
|
2018-04-21 23:01:27 +02:00
|
|
|
break
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop background checking and transferring pipeline
|
|
|
|
s.stopCheckers()
|
2020-05-15 12:39:07 +02:00
|
|
|
if s.checkFirst {
|
|
|
|
fs.Infof(s.fdst, "Checks finished, now starting transfers")
|
|
|
|
s.startTransfers()
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
s.stopRenamers()
|
|
|
|
s.stopTransfers()
|
|
|
|
s.stopDeleters()
|
|
|
|
|
2019-03-06 09:43:46 +01:00
|
|
|
if s.copyEmptySrcDirs {
|
2019-06-17 10:34:30 +02:00
|
|
|
s.processError(copyEmptyDirectories(s.ctx, s.fdst, s.srcEmptyDirs))
|
2019-03-06 09:43:46 +01:00
|
|
|
}
|
2018-06-03 10:21:25 +02:00
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
// Delete files after
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeAfter {
|
2018-03-13 00:40:19 +01:00
|
|
|
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
|
|
|
s.processError(s.deleteFiles(false))
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 22:06:39 +02:00
|
|
|
|
|
|
|
// Prune empty directories
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
2018-03-13 00:40:19 +01:00
|
|
|
if s.currentError() != nil && !fs.Config.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
|
2017-08-09 22:06:39 +02:00
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
s.processError(deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs))
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-06 17:18:13 +01:00
|
|
|
|
2017-11-27 12:42:02 +01:00
|
|
|
// Delete empty fsrc subdirectories
|
|
|
|
// if DoMove and --delete-empty-src-dirs flag is set
|
|
|
|
if s.DoMove && s.deleteEmptySrcDirs {
|
2017-11-23 06:22:01 +01:00
|
|
|
//delete empty subdirectories that were part of the move
|
2019-06-17 10:34:30 +02:00
|
|
|
s.processError(deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs))
|
2017-11-06 17:18:13 +01:00
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
|
2019-07-25 12:28:27 +02:00
|
|
|
// Read the error out of the context if there is one
|
|
|
|
s.processError(s.ctx.Err())
|
|
|
|
|
2020-03-30 17:45:02 +02:00
|
|
|
if s.deleteMode != fs.DeleteModeOnly && accounting.Stats(s.ctx).GetTransfers() == 0 {
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Infof(nil, "There was nothing to transfer")
|
|
|
|
}
|
|
|
|
|
2018-04-21 23:01:27 +02:00
|
|
|
// cancel the context to free resources
|
|
|
|
s.cancel()
|
2017-01-25 20:59:53 +01:00
|
|
|
return s.currentError()
|
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// DstOnly have an object which is in the destination only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) {
|
|
|
|
if s.deleteMode == fs.DeleteModeOff {
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
switch x := dst.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2017-01-25 20:59:53 +01:00
|
|
|
switch s.deleteMode {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.DeleteModeAfter:
|
2017-01-25 20:59:53 +01:00
|
|
|
// record object as needs deleting
|
|
|
|
s.dstFilesMu.Lock()
|
|
|
|
s.dstFiles[x.Remote()] = x
|
|
|
|
s.dstFilesMu.Unlock()
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.DeleteModeDuring, fs.DeleteModeOnly:
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case s.deleteFilesCh <- x:
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode))
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2017-08-09 22:06:39 +02:00
|
|
|
// Record directory as it is potentially empty and needs deleting
|
|
|
|
if s.fdst.Features().CanHaveEmptyDirectories {
|
|
|
|
s.dstEmptyDirsMu.Lock()
|
2018-05-14 19:16:56 +02:00
|
|
|
s.dstEmptyDirs[dst.Remote()] = dst
|
2017-08-09 22:06:39 +02:00
|
|
|
s.dstEmptyDirsMu.Unlock()
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// SrcOnly have an object which is in the source only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
|
|
|
if s.deleteMode == fs.DeleteModeOnly {
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
switch x := src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2018-07-17 07:43:58 +02:00
|
|
|
// If it's a copy operation,
|
|
|
|
// remove parent directory from srcEmptyDirs
|
2018-05-14 19:16:56 +02:00
|
|
|
// since it's not really empty
|
|
|
|
s.srcEmptyDirsMu.Lock()
|
2018-07-17 07:43:58 +02:00
|
|
|
s.srcParentDirCheck(src)
|
2018-05-14 19:16:56 +02:00
|
|
|
s.srcEmptyDirsMu.Unlock()
|
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
if s.trackRenames {
|
|
|
|
// Save object to check for a rename later
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case s.trackRenamesCh <- x:
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
2019-07-08 03:02:53 +02:00
|
|
|
// Check CompareDest && CopyDest
|
|
|
|
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, nil, x, s.compareCopyDest, s.backupDir)
|
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
|
|
|
}
|
|
|
|
if !NoNeedTransfer {
|
|
|
|
// No need to check since doesn't exist
|
|
|
|
ok := s.toBeUploaded.Put(s.ctx, fs.ObjectPair{Src: x, Dst: nil})
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2017-11-06 17:18:13 +01:00
|
|
|
// Record the directory for deletion
|
|
|
|
s.srcEmptyDirsMu.Lock()
|
2018-07-17 07:43:58 +02:00
|
|
|
s.srcParentDirCheck(src)
|
2018-05-14 19:16:56 +02:00
|
|
|
s.srcEmptyDirs[src.Remote()] = src
|
2017-11-06 17:18:13 +01:00
|
|
|
s.srcEmptyDirsMu.Unlock()
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// Match is called when src and dst are present, so sync src to dst
|
2019-06-17 10:34:30 +02:00
|
|
|
func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
|
2017-01-25 20:59:53 +01:00
|
|
|
switch srcX := src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2018-05-14 19:16:56 +02:00
|
|
|
s.srcEmptyDirsMu.Lock()
|
2018-07-17 07:43:58 +02:00
|
|
|
s.srcParentDirCheck(src)
|
2018-05-14 19:16:56 +02:00
|
|
|
s.srcEmptyDirsMu.Unlock()
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeOnly {
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
dstX, ok := dst.(fs.Object)
|
2017-01-25 20:59:53 +01:00
|
|
|
if ok {
|
2018-07-19 23:41:34 +02:00
|
|
|
ok = s.toBeChecked.Put(s.ctx, fs.ObjectPair{Src: srcX, Dst: dstX})
|
|
|
|
if !ok {
|
|
|
|
return false
|
2018-04-21 23:01:27 +02:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
|
|
|
// FIXME src is file, dst is directory
|
|
|
|
err := errors.New("can't overwrite directory with file")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
2017-01-25 20:59:53 +01:00
|
|
|
s.processError(err)
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2018-01-12 17:30:54 +01:00
|
|
|
_, ok := dst.(fs.Directory)
|
2017-01-25 20:59:53 +01:00
|
|
|
if ok {
|
2020-03-03 17:24:22 +01:00
|
|
|
// Only record matched (src & dst) empty dirs when performing move
|
|
|
|
if s.DoMove {
|
|
|
|
// Record the src directory for deletion
|
|
|
|
s.srcEmptyDirsMu.Lock()
|
|
|
|
s.srcParentDirCheck(src)
|
|
|
|
s.srcEmptyDirs[src.Remote()] = src
|
|
|
|
s.srcEmptyDirsMu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
// FIXME src is dir, dst is file
|
|
|
|
err := errors.New("can't overwrite file with directory")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
2017-09-01 16:21:46 +02:00
|
|
|
s.processError(err)
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:35:14 +01:00
|
|
|
// Syncs fsrc into fdst
|
|
|
|
//
|
|
|
|
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
|
|
|
//
|
|
|
|
// If DoMove is true then files will be moved instead of copied
|
|
|
|
//
|
|
|
|
// dir is the start directory, "" for root
|
2019-06-17 10:34:30 +02:00
|
|
|
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if deleteMode != fs.DeleteModeOff && DoMove {
|
|
|
|
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
// Run an extra pass to delete only
|
2018-01-12 17:30:54 +01:00
|
|
|
if deleteMode == fs.DeleteModeBefore {
|
|
|
|
if fs.Config.TrackRenames {
|
|
|
|
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
// only delete stuff during in this pass
|
2019-06-17 10:34:30 +02:00
|
|
|
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2017-01-25 20:59:53 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-13 15:35:51 +02:00
|
|
|
err = do.run()
|
2017-01-25 20:59:53 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Next pass does a copy only
|
2018-01-12 17:30:54 +01:00
|
|
|
deleteMode = fs.DeleteModeOff
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2017-01-10 22:47:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-13 15:35:51 +02:00
|
|
|
return do.run()
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:35:14 +01:00
|
|
|
// Sync fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, fs.Config.DeleteMode, false, false, copyEmptySrcDirs)
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// CopyDir copies fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// moveDir moves fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MoveDir moves fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if operations.Same(fdst, fsrc) {
|
|
|
|
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
2016-07-04 14:12:33 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First attempt to use DirMover if exists, same Fs and no filters are active
|
2018-01-12 17:30:54 +01:00
|
|
|
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && filter.Active.InActive() {
|
2020-06-05 17:13:10 +02:00
|
|
|
if operations.SkipDestructive(ctx, fdst, "server side directory move") {
|
2016-07-11 12:36:46 +02:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fdst, "Using server side directory move")
|
2019-06-17 10:34:30 +02:00
|
|
|
err := fdstDirMove(ctx, fsrc, "", "")
|
2016-07-04 14:12:33 +02:00
|
|
|
switch err {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.ErrorCantDirMove, fs.ErrorDirExists:
|
|
|
|
fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
|
2016-07-04 14:12:33 +02:00
|
|
|
case nil:
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(fdst, "Server side directory move succeeded")
|
2016-07-04 14:12:33 +02:00
|
|
|
return nil
|
|
|
|
default:
|
2019-11-18 15:13:02 +01:00
|
|
|
err = fs.CountError(err)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(fdst, "Server side directory move failed: %v", err)
|
2016-07-04 14:12:33 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-11 12:36:46 +02:00
|
|
|
// Otherwise move the files one by one
|
2019-06-17 10:34:30 +02:00
|
|
|
return moveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|