2018-01-12 17:30:54 +01:00
|
|
|
// Package sync is the implementation of sync/copy/move
|
|
|
|
package sync
|
2016-07-04 14:12:33 +02:00
|
|
|
|
|
|
|
import (
|
2018-04-06 20:13:27 +02:00
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
"errors"
|
2017-01-03 18:35:12 +01:00
|
|
|
"fmt"
|
2018-06-03 10:21:25 +02:00
|
|
|
"path"
|
2017-08-09 22:06:39 +02:00
|
|
|
"sort"
|
2020-03-20 14:04:56 +01:00
|
|
|
"strings"
|
2016-07-04 14:12:33 +02:00
|
|
|
"sync"
|
2019-07-25 12:28:27 +02:00
|
|
|
"time"
|
2016-11-05 19:03:55 +01:00
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/filter"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/fs/march"
|
|
|
|
"github.com/rclone/rclone/fs/operations"
|
2024-02-06 17:00:34 +01:00
|
|
|
"github.com/rclone/rclone/lib/errcount"
|
|
|
|
"golang.org/x/sync/errgroup"
|
2016-07-04 14:12:33 +02:00
|
|
|
)
|
|
|
|
|
2023-07-15 18:41:13 +02:00
|
|
|
// ErrorMaxDurationReached defines error when transfer duration is reached
|
|
|
|
// Used for checking on exit and matching to correct exit code.
|
|
|
|
var ErrorMaxDurationReached = errors.New("max transfer duration reached as set by --max-duration")
|
|
|
|
|
|
|
|
// ErrorMaxDurationReachedFatal is returned from when the max
|
|
|
|
// duration limit is reached.
|
|
|
|
var ErrorMaxDurationReachedFatal = fserrors.FatalError(ErrorMaxDurationReached)
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
type syncCopyMove struct {
|
|
|
|
// parameters
|
2018-01-12 17:30:54 +01:00
|
|
|
fdst fs.Fs
|
|
|
|
fsrc fs.Fs
|
|
|
|
deleteMode fs.DeleteMode // how we are doing deletions
|
2017-11-27 12:42:02 +01:00
|
|
|
DoMove bool
|
2019-03-06 09:43:46 +01:00
|
|
|
copyEmptySrcDirs bool
|
2017-11-27 12:42:02 +01:00
|
|
|
deleteEmptySrcDirs bool
|
|
|
|
dir string
|
2016-07-04 14:12:33 +02:00
|
|
|
// internal state
|
2020-11-05 12:33:32 +01:00
|
|
|
ci *fs.ConfigInfo // global config
|
2020-11-26 18:10:41 +01:00
|
|
|
fi *filter.Filter // filter config
|
2020-05-15 01:27:59 +02:00
|
|
|
ctx context.Context // internal context for controlling go-routines
|
|
|
|
cancel func() // cancel the context
|
2020-09-09 13:53:21 +02:00
|
|
|
inCtx context.Context // internal context for controlling march
|
|
|
|
inCancel func() // cancel the march context
|
2020-05-15 01:27:59 +02:00
|
|
|
noTraverse bool // if set don't traverse the dst
|
|
|
|
noCheckDest bool // if set transfer all objects regardless without checking dst
|
|
|
|
noUnicodeNormalization bool // don't normalize unicode characters in filenames
|
|
|
|
deletersWg sync.WaitGroup // for delete before go routine
|
|
|
|
deleteFilesCh chan fs.Object // channel to receive deletes if delete before
|
2020-10-13 23:43:40 +02:00
|
|
|
trackRenames bool // set if we should do server-side renames
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames
|
2020-05-15 01:27:59 +02:00
|
|
|
dstFilesMu sync.Mutex // protect dstFiles
|
|
|
|
dstFiles map[string]fs.Object // dst files, always filled
|
|
|
|
srcFiles map[string]fs.Object // src files, only used if deleteBefore
|
|
|
|
srcFilesChan chan fs.Object // passes src objects
|
|
|
|
srcFilesResult chan error // error result of src listing
|
|
|
|
dstFilesResult chan error // error result of dst listing
|
|
|
|
dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs
|
|
|
|
dstEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
|
|
|
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
|
|
|
|
srcEmptyDirs map[string]fs.DirEntry // potentially empty directories
|
2024-04-04 18:59:56 +02:00
|
|
|
srcMoveEmptyDirs map[string]fs.DirEntry // potentially empty directories when moving files out of them
|
2020-05-15 01:27:59 +02:00
|
|
|
checkerWg sync.WaitGroup // wait for checkers
|
|
|
|
toBeChecked *pipe // checkers channel
|
|
|
|
transfersWg sync.WaitGroup // wait for transfers
|
|
|
|
toBeUploaded *pipe // copiers channel
|
|
|
|
errorMu sync.Mutex // Mutex covering the errors variables
|
|
|
|
err error // normal error from copy process
|
|
|
|
noRetryErr error // error with NoRetry set
|
|
|
|
fatalErr error // fatal error
|
|
|
|
commonHash hash.Type // common hash type between src and dst
|
2020-06-10 12:02:14 +02:00
|
|
|
modifyWindow time.Duration // modify window between fsrc, fdst
|
2020-05-15 01:27:59 +02:00
|
|
|
renameMapMu sync.Mutex // mutex to protect the below
|
|
|
|
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
|
|
|
|
renamerWg sync.WaitGroup // wait for renamers
|
|
|
|
toBeRenamed *pipe // renamers channel
|
|
|
|
trackRenamesWg sync.WaitGroup // wg for background track renames
|
|
|
|
trackRenamesCh chan fs.Object // objects are pumped in here
|
|
|
|
renameCheck []fs.Object // accumulate files to check for rename here
|
2020-11-16 04:04:29 +01:00
|
|
|
compareCopyDest []fs.Fs // place to check for files to server side copy
|
2020-05-15 01:27:59 +02:00
|
|
|
backupDir fs.Fs // place to store overwrites/deletes
|
2020-05-15 12:39:07 +02:00
|
|
|
checkFirst bool // if set run all the checkers before starting transfers
|
2022-02-24 11:03:41 +01:00
|
|
|
maxDurationEndTime time.Time // end time if --max-duration is set
|
2023-10-01 11:02:56 +02:00
|
|
|
logger operations.LoggerFn // LoggerFn used to report the results of a sync (or bisync) to an io.Writer
|
|
|
|
usingLogger bool // whether we are using logger
|
2024-02-06 17:00:34 +01:00
|
|
|
setDirMetadata bool // if set we set the directory metadata
|
|
|
|
setDirModTime bool // if set we set the directory modtimes
|
|
|
|
setDirModTimeAfter bool // if set we set the directory modtimes at the end of the sync
|
2024-02-29 01:29:38 +01:00
|
|
|
setDirModTimeMu sync.Mutex // protect setDirModTimes and modifiedDirs
|
2024-02-06 17:00:34 +01:00
|
|
|
setDirModTimes []setDirModTime // directories that need their modtime set
|
|
|
|
setDirModTimesMaxLevel int // max level of the directories to set
|
2024-02-29 01:29:38 +01:00
|
|
|
modifiedDirs map[string]struct{} // dirs with changed contents (if s.setDirModTimeAfter)
|
2024-02-06 17:00:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// For keeping track of delayed modtime sets
|
|
|
|
type setDirModTime struct {
|
2024-06-08 13:38:31 +02:00
|
|
|
src fs.Directory
|
2024-02-06 17:00:34 +01:00
|
|
|
dst fs.Directory
|
|
|
|
dir string
|
|
|
|
modTime time.Time
|
|
|
|
level int // the level of the directory, 0 is root
|
2018-01-12 17:30:54 +01:00
|
|
|
}
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
type trackRenamesStrategy byte
|
|
|
|
|
|
|
|
const (
|
|
|
|
trackRenamesStrategyHash trackRenamesStrategy = 1 << iota
|
|
|
|
trackRenamesStrategyModtime
|
2020-06-09 21:40:03 +02:00
|
|
|
trackRenamesStrategyLeaf
|
2020-03-21 18:35:34 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
func (strategy trackRenamesStrategy) hash() bool {
|
|
|
|
return (strategy & trackRenamesStrategyHash) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func (strategy trackRenamesStrategy) modTime() bool {
|
|
|
|
return (strategy & trackRenamesStrategyModtime) != 0
|
|
|
|
}
|
|
|
|
|
2020-06-09 21:40:03 +02:00
|
|
|
func (strategy trackRenamesStrategy) leaf() bool {
|
|
|
|
return (strategy & trackRenamesStrategyLeaf) != 0
|
|
|
|
}
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) (*syncCopyMove, error) {
|
2022-06-01 19:24:54 +02:00
|
|
|
if (deleteMode != fs.DeleteModeOff || DoMove) && operations.OverlappingFilterCheck(ctx, fdst, fsrc) {
|
2019-02-14 13:06:26 +01:00
|
|
|
return nil, fserrors.FatalError(fs.ErrorOverlapping)
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
ci := fs.GetConfig(ctx)
|
2020-11-26 18:10:41 +01:00
|
|
|
fi := filter.GetConfig(ctx)
|
2016-07-04 14:12:33 +02:00
|
|
|
s := &syncCopyMove{
|
2020-11-05 12:33:32 +01:00
|
|
|
ci: ci,
|
2020-11-26 18:10:41 +01:00
|
|
|
fi: fi,
|
2020-05-15 01:27:59 +02:00
|
|
|
fdst: fdst,
|
|
|
|
fsrc: fsrc,
|
|
|
|
deleteMode: deleteMode,
|
|
|
|
DoMove: DoMove,
|
|
|
|
copyEmptySrcDirs: copyEmptySrcDirs,
|
|
|
|
deleteEmptySrcDirs: deleteEmptySrcDirs,
|
|
|
|
dir: "",
|
2020-11-05 12:33:32 +01:00
|
|
|
srcFilesChan: make(chan fs.Object, ci.Checkers+ci.Transfers),
|
2020-05-15 01:27:59 +02:00
|
|
|
srcFilesResult: make(chan error, 1),
|
|
|
|
dstFilesResult: make(chan error, 1),
|
|
|
|
dstEmptyDirs: make(map[string]fs.DirEntry),
|
|
|
|
srcEmptyDirs: make(map[string]fs.DirEntry),
|
2024-04-04 18:59:56 +02:00
|
|
|
srcMoveEmptyDirs: make(map[string]fs.DirEntry),
|
2020-11-05 12:33:32 +01:00
|
|
|
noTraverse: ci.NoTraverse,
|
|
|
|
noCheckDest: ci.NoCheckDest,
|
|
|
|
noUnicodeNormalization: ci.NoUnicodeNormalization,
|
|
|
|
deleteFilesCh: make(chan fs.Object, ci.Checkers),
|
|
|
|
trackRenames: ci.TrackRenames,
|
2020-05-15 01:27:59 +02:00
|
|
|
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
|
2020-11-05 17:27:01 +01:00
|
|
|
modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst),
|
2020-11-05 12:33:32 +01:00
|
|
|
trackRenamesCh: make(chan fs.Object, ci.Checkers),
|
|
|
|
checkFirst: ci.CheckFirst,
|
2024-02-06 17:00:34 +01:00
|
|
|
setDirMetadata: ci.Metadata && fsrc.Features().ReadDirMetadata && fdst.Features().WriteDirMetadata,
|
2024-03-09 12:28:15 +01:00
|
|
|
setDirModTime: (!ci.NoUpdateDirModTime && fsrc.Features().CanHaveEmptyDirectories) && (fdst.Features().WriteDirSetModTime || fdst.Features().MkdirMetadata != nil || fdst.Features().DirSetModTime != nil),
|
2024-04-04 19:03:20 +02:00
|
|
|
setDirModTimeAfter: !ci.NoUpdateDirModTime && (!copyEmptySrcDirs || fsrc.Features().CanHaveEmptyDirectories && fdst.Features().DirModTimeUpdatesOnWrite),
|
2024-02-29 01:29:38 +01:00
|
|
|
modifiedDirs: make(map[string]struct{}),
|
2020-05-15 12:39:07 +02:00
|
|
|
}
|
2023-10-01 11:02:56 +02:00
|
|
|
|
|
|
|
s.logger, s.usingLogger = operations.GetLogger(ctx)
|
|
|
|
|
|
|
|
if deleteMode == fs.DeleteModeOff {
|
|
|
|
loggerOpt := operations.GetLoggerOpt(ctx)
|
|
|
|
loggerOpt.DeleteModeOff = true
|
|
|
|
loggerOpt.LoggerFn = s.logger
|
|
|
|
ctx = operations.WithLoggerOpt(ctx, loggerOpt)
|
|
|
|
}
|
|
|
|
|
2020-11-05 12:33:32 +01:00
|
|
|
backlog := ci.MaxBacklog
|
2020-05-15 12:39:07 +02:00
|
|
|
if s.checkFirst {
|
|
|
|
fs.Infof(s.fdst, "Running all checks before starting transfers")
|
|
|
|
backlog = -1
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2019-11-28 18:01:21 +01:00
|
|
|
var err error
|
2020-11-05 12:33:32 +01:00
|
|
|
s.toBeChecked, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
s.toBeUploaded, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
s.toBeRenamed, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog)
|
2019-11-28 18:01:21 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
if ci.MaxDuration > 0 {
|
2022-02-24 11:03:41 +01:00
|
|
|
s.maxDurationEndTime = time.Now().Add(ci.MaxDuration)
|
2021-11-29 17:19:00 +01:00
|
|
|
fs.Infof(s.fdst, "Transfer session %v deadline: %s", ci.CutoffMode, s.maxDurationEndTime.Format("2006/01/02 15:04:05"))
|
|
|
|
}
|
|
|
|
// If a max session duration has been defined add a deadline
|
|
|
|
// to the main context if cutoff mode is hard. This will cut
|
|
|
|
// the transfers off.
|
|
|
|
if !s.maxDurationEndTime.IsZero() && ci.CutoffMode == fs.CutoffModeHard {
|
2022-02-24 11:03:41 +01:00
|
|
|
s.ctx, s.cancel = context.WithDeadline(ctx, s.maxDurationEndTime)
|
2019-07-25 12:28:27 +02:00
|
|
|
} else {
|
|
|
|
s.ctx, s.cancel = context.WithCancel(ctx)
|
|
|
|
}
|
2021-11-29 17:19:00 +01:00
|
|
|
// Input context - cancel this for graceful stop.
|
|
|
|
//
|
|
|
|
// If a max session duration has been defined add a deadline
|
|
|
|
// to the input context if cutoff mode is graceful or soft.
|
|
|
|
// This won't stop the transfers but will cut the
|
|
|
|
// list/check/transfer pipelines.
|
|
|
|
if !s.maxDurationEndTime.IsZero() && ci.CutoffMode != fs.CutoffModeHard {
|
|
|
|
s.inCtx, s.inCancel = context.WithDeadline(s.ctx, s.maxDurationEndTime)
|
|
|
|
} else {
|
|
|
|
s.inCtx, s.inCancel = context.WithCancel(s.ctx)
|
|
|
|
}
|
2018-11-25 17:49:38 +01:00
|
|
|
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
|
2021-04-05 21:36:39 +02:00
|
|
|
if !fi.HaveFilesFrom() {
|
|
|
|
fs.Errorf(nil, "Ignoring --no-traverse with sync")
|
|
|
|
}
|
2018-11-25 17:49:38 +01:00
|
|
|
s.noTraverse = false
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy)
|
2020-03-21 18:35:34 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-10-17 18:41:11 +02:00
|
|
|
if s.noCheckDest {
|
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
|
|
|
return nil, errors.New("can't use --no-check-dest with sync: use copy instead")
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
if ci.Immutable {
|
2019-10-17 18:41:11 +02:00
|
|
|
return nil, errors.New("can't use --no-check-dest with --immutable")
|
|
|
|
}
|
|
|
|
if s.backupDir != nil {
|
|
|
|
return nil, errors.New("can't use --no-check-dest with --backup-dir")
|
|
|
|
}
|
|
|
|
}
|
2017-01-02 17:37:59 +01:00
|
|
|
if s.trackRenames {
|
2017-01-10 21:03:55 +01:00
|
|
|
// Don't track renames for remotes without server-side move support.
|
2018-01-12 17:30:54 +01:00
|
|
|
if !operations.CanServerSideMove(fdst) {
|
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy")
|
2017-01-02 17:37:59 +01:00
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2020-03-21 18:35:34 +01:00
|
|
|
if s.trackRenamesStrategy.hash() && s.commonHash == hash.None {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash")
|
2017-01-02 17:37:59 +01:00
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-06-10 12:02:14 +02:00
|
|
|
if s.trackRenamesStrategy.modTime() && s.modifyWindow == fs.ModTimeNotSupported {
|
2020-03-20 14:04:56 +01:00
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as either the source or destination do not support modtime")
|
|
|
|
s.trackRenames = false
|
|
|
|
}
|
|
|
|
|
2018-03-17 11:31:23 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeOff {
|
|
|
|
fs.Errorf(fdst, "Ignoring --track-renames as it doesn't work with copy or move, only sync")
|
|
|
|
s.trackRenames = false
|
|
|
|
}
|
2017-01-02 17:37:59 +01:00
|
|
|
}
|
2017-01-25 20:35:14 +01:00
|
|
|
if s.trackRenames {
|
|
|
|
// track renames needs delete after
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
|
|
|
s.deleteMode = fs.DeleteModeAfter
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
2018-11-25 17:49:38 +01:00
|
|
|
if s.noTraverse {
|
|
|
|
fs.Errorf(nil, "Ignoring --no-traverse with --track-renames")
|
|
|
|
s.noTraverse = false
|
|
|
|
}
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
// Make Fs for --backup-dir if required
|
2020-11-05 12:33:32 +01:00
|
|
|
if ci.BackupDir != "" || ci.Suffix != "" {
|
2017-01-10 22:47:03 +01:00
|
|
|
var err error
|
2020-11-05 16:18:51 +01:00
|
|
|
s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "")
|
2017-01-10 22:47:03 +01:00
|
|
|
if err != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
return nil, err
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
|
|
|
}
|
2020-11-16 04:04:29 +01:00
|
|
|
if len(ci.CompareDest) > 0 {
|
2019-07-08 03:02:53 +02:00
|
|
|
var err error
|
2020-11-05 16:18:51 +01:00
|
|
|
s.compareCopyDest, err = operations.GetCompareDest(ctx)
|
2019-07-08 03:02:53 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-11-16 04:04:29 +01:00
|
|
|
} else if len(ci.CopyDest) > 0 {
|
2019-07-08 03:02:53 +02:00
|
|
|
var err error
|
2020-11-05 16:18:51 +01:00
|
|
|
s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst)
|
2019-07-08 03:02:53 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
return s, nil
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-09-01 16:47:32 +02:00
|
|
|
// Check to see if the context has been cancelled
|
2016-07-04 14:12:33 +02:00
|
|
|
func (s *syncCopyMove) aborting() bool {
|
2018-07-19 23:41:34 +02:00
|
|
|
return s.ctx.Err() != nil
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// This reads the map and pumps it into the channel passed in, closing
|
|
|
|
// the channel at the end
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) {
|
2016-07-04 14:12:33 +02:00
|
|
|
outer:
|
2017-01-03 18:35:12 +01:00
|
|
|
for _, o := range files {
|
2016-07-04 14:12:33 +02:00
|
|
|
if s.aborting() {
|
|
|
|
break outer
|
|
|
|
}
|
|
|
|
select {
|
2017-01-03 18:35:12 +01:00
|
|
|
case out <- o:
|
2017-09-01 16:47:32 +02:00
|
|
|
case <-s.ctx.Done():
|
2016-07-04 14:12:33 +02:00
|
|
|
break outer
|
|
|
|
}
|
|
|
|
}
|
2017-01-03 18:35:12 +01:00
|
|
|
close(out)
|
|
|
|
s.srcFilesResult <- nil
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// This checks the types of errors returned while copying files
|
|
|
|
func (s *syncCopyMove) processError(err error) {
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
2019-07-25 12:28:27 +02:00
|
|
|
if err == context.DeadlineExceeded {
|
|
|
|
err = fserrors.NoRetryError(err)
|
2020-09-09 13:53:21 +02:00
|
|
|
} else if err == accounting.ErrorMaxTransferLimitReachedGraceful {
|
|
|
|
if s.inCtx.Err() == nil {
|
|
|
|
fs.Logf(nil, "%v - stopping transfers", err)
|
|
|
|
// Cancel the march and stop the pipes
|
|
|
|
s.inCancel()
|
|
|
|
}
|
2021-04-06 13:59:36 +02:00
|
|
|
} else if err == context.Canceled && s.inCtx.Err() != nil {
|
|
|
|
// Ignore context Canceled if we have called s.inCancel()
|
|
|
|
return
|
2019-07-25 12:28:27 +02:00
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
s.errorMu.Lock()
|
|
|
|
defer s.errorMu.Unlock()
|
|
|
|
switch {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fserrors.IsFatalError(err):
|
2016-08-18 19:56:57 +02:00
|
|
|
if !s.aborting() {
|
2018-05-03 15:45:28 +02:00
|
|
|
fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err)
|
2017-09-01 16:47:32 +02:00
|
|
|
s.cancel()
|
2016-08-18 19:56:57 +02:00
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
s.fatalErr = err
|
2018-01-12 17:30:54 +01:00
|
|
|
case fserrors.IsNoRetryError(err):
|
2016-07-04 14:12:33 +02:00
|
|
|
s.noRetryErr = err
|
|
|
|
default:
|
|
|
|
s.err = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-30 14:06:24 +02:00
|
|
|
// Returns the current error (if any) in the order of precedence
|
2022-08-05 17:35:41 +02:00
|
|
|
//
|
|
|
|
// fatalErr
|
|
|
|
// normal error
|
|
|
|
// noRetryErr
|
2017-01-03 18:35:12 +01:00
|
|
|
func (s *syncCopyMove) currentError() error {
|
|
|
|
s.errorMu.Lock()
|
|
|
|
defer s.errorMu.Unlock()
|
|
|
|
if s.fatalErr != nil {
|
|
|
|
return s.fatalErr
|
|
|
|
}
|
|
|
|
if s.err != nil {
|
|
|
|
return s.err
|
|
|
|
}
|
|
|
|
return s.noRetryErr
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// pairChecker reads Objects~s on in send to out if they need transferring.
|
|
|
|
//
|
|
|
|
// FIXME potentially doing lots of hashes at once
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
2016-07-04 14:12:33 +02:00
|
|
|
defer wg.Done()
|
|
|
|
for {
|
2020-09-09 13:53:21 +02:00
|
|
|
pair, ok := in.GetMax(s.inCtx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2016-07-04 14:12:33 +02:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
2019-07-22 21:11:46 +02:00
|
|
|
var err error
|
2023-02-06 11:30:22 +01:00
|
|
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src, "checking")
|
2018-07-19 23:41:34 +02:00
|
|
|
// Check to see if can store this
|
|
|
|
if src.Storable() {
|
2022-08-01 18:51:46 +02:00
|
|
|
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
|
|
|
|
if needTransfer {
|
|
|
|
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir)
|
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err)
|
2022-08-01 18:51:46 +02:00
|
|
|
}
|
|
|
|
if NoNeedTransfer {
|
|
|
|
needTransfer = false
|
|
|
|
}
|
2019-07-08 03:02:53 +02:00
|
|
|
}
|
2023-10-09 04:59:22 +02:00
|
|
|
// Fix case for case insensitive filesystems
|
|
|
|
if s.ci.FixCase && !s.ci.Immutable && src.Remote() != pair.Dst.Remote() {
|
2024-04-11 17:08:29 +02:00
|
|
|
if newDst, err := operations.Move(s.ctx, s.fdst, nil, src.Remote(), pair.Dst); err != nil {
|
2023-10-09 04:59:22 +02:00
|
|
|
fs.Errorf(pair.Dst, "Error while attempting to rename to %s: %v", src.Remote(), err)
|
|
|
|
s.processError(err)
|
|
|
|
} else {
|
|
|
|
fs.Infof(pair.Dst, "Fixed case by renaming to: %s", src.Remote())
|
|
|
|
pair.Dst = newDst
|
|
|
|
}
|
|
|
|
}
|
2022-08-01 18:51:46 +02:00
|
|
|
if needTransfer {
|
2018-07-19 23:41:34 +02:00
|
|
|
// If files are treated as immutable, fail if destination exists and does not match
|
2020-11-05 12:33:32 +01:00
|
|
|
if s.ci.Immutable && pair.Dst != nil {
|
2020-12-28 13:05:49 +01:00
|
|
|
err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified))
|
|
|
|
fs.Errorf(pair.Dst, "Source and destination exist but do not match: %v", err)
|
|
|
|
s.processError(err)
|
2018-07-19 23:41:34 +02:00
|
|
|
} else {
|
2024-02-29 01:29:38 +01:00
|
|
|
if pair.Dst != nil {
|
|
|
|
s.markDirModifiedObject(pair.Dst)
|
|
|
|
} else {
|
|
|
|
s.markDirModifiedObject(src)
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
// If destination already exists, then we must move it into --backup-dir if required
|
|
|
|
if pair.Dst != nil && s.backupDir != nil {
|
2019-06-23 05:50:09 +02:00
|
|
|
err := operations.MoveBackupDir(s.ctx, s.backupDir, pair.Dst)
|
2018-07-19 23:41:34 +02:00
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, err)
|
2017-01-10 22:47:03 +01:00
|
|
|
} else {
|
2018-07-19 23:41:34 +02:00
|
|
|
// If successful zero out the dst as it is no longer there and copy the file
|
|
|
|
pair.Dst = nil
|
2023-08-15 22:33:32 +02:00
|
|
|
ok = out.Put(s.inCtx, pair)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2018-04-21 23:01:27 +02:00
|
|
|
return
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
} else {
|
2023-08-15 22:33:32 +02:00
|
|
|
ok = out.Put(s.inCtx, pair)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2017-01-10 22:47:03 +01:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If moving need to delete the files we don't need to copy
|
|
|
|
if s.DoMove {
|
|
|
|
// Delete src if no error on copy
|
2021-01-26 18:44:24 +01:00
|
|
|
if operations.SameObject(src, pair.Dst) {
|
|
|
|
fs.Logf(src, "Not removing source file as it is the same file as the destination")
|
2021-07-29 18:42:55 +02:00
|
|
|
} else if s.ci.IgnoreExisting {
|
|
|
|
fs.Debugf(src, "Not removing source file as destination file exists and --ignore-existing is set")
|
2023-03-08 14:03:05 +01:00
|
|
|
} else if s.checkFirst && s.ci.OrderBy != "" {
|
|
|
|
// If we want perfect ordering then use the transfers to delete the file
|
|
|
|
//
|
|
|
|
// We send src == dst, to say we want the src deleted
|
2023-08-15 22:33:32 +02:00
|
|
|
ok = out.Put(s.inCtx, fs.ObjectPair{Src: src, Dst: src})
|
2023-03-08 14:03:05 +01:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2021-01-26 18:44:24 +01:00
|
|
|
} else {
|
2023-10-01 11:02:56 +02:00
|
|
|
deleteFileErr := operations.DeleteFile(s.ctx, src)
|
|
|
|
s.processError(deleteFileErr)
|
|
|
|
s.logger(s.ctx, operations.TransferError, pair.Src, pair.Dst, deleteFileErr)
|
2021-01-26 18:44:24 +01:00
|
|
|
}
|
2016-10-03 20:58:44 +02:00
|
|
|
}
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2020-11-05 17:59:59 +01:00
|
|
|
tr.Done(s.ctx, err)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// pairRenamer reads Objects~s on in and attempts to rename them,
|
|
|
|
// otherwise it sends them out if they need transferring.
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
|
2017-01-03 18:35:12 +01:00
|
|
|
defer wg.Done()
|
|
|
|
for {
|
2020-09-09 13:53:21 +02:00
|
|
|
pair, ok := in.GetMax(s.inCtx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2017-01-03 18:35:12 +01:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
|
|
|
if !s.tryRename(src) {
|
|
|
|
// pass on if not renamed
|
2023-04-03 12:53:51 +02:00
|
|
|
fs.Debugf(src, "Need to transfer - No matching file found at Destination")
|
2023-08-15 22:33:32 +02:00
|
|
|
ok = out.Put(s.inCtx, pair)
|
2017-01-03 18:35:12 +01:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-22 18:53:10 +02:00
|
|
|
// pairCopyOrMove reads Objects on in and moves or copies them.
|
2020-03-13 22:12:22 +01:00
|
|
|
func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, fraction int, wg *sync.WaitGroup) {
|
2016-07-04 14:12:33 +02:00
|
|
|
defer wg.Done()
|
2016-10-22 18:53:10 +02:00
|
|
|
var err error
|
2016-07-04 14:12:33 +02:00
|
|
|
for {
|
2020-09-09 13:53:21 +02:00
|
|
|
pair, ok := in.GetMax(s.inCtx, fraction)
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2016-07-04 14:12:33 +02:00
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
src := pair.Src
|
2023-03-08 14:03:05 +01:00
|
|
|
dst := pair.Dst
|
2018-07-19 23:41:34 +02:00
|
|
|
if s.DoMove {
|
2023-03-08 14:03:05 +01:00
|
|
|
if src != dst {
|
2024-01-04 12:28:47 +01:00
|
|
|
_, err = operations.MoveTransfer(ctx, fdst, dst, src.Remote(), src)
|
2023-03-08 14:03:05 +01:00
|
|
|
} else {
|
|
|
|
// src == dst signals delete the src
|
|
|
|
err = operations.DeleteFile(ctx, src)
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
} else {
|
2023-03-08 14:03:05 +01:00
|
|
|
_, err = operations.Copy(ctx, fdst, dst, src.Remote(), src)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
if err != nil {
|
|
|
|
s.logger(ctx, operations.TransferError, src, dst, err)
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This starts the background checkers.
|
|
|
|
func (s *syncCopyMove) startCheckers() {
|
2020-11-05 12:33:32 +01:00
|
|
|
s.checkerWg.Add(s.ci.Checkers)
|
|
|
|
for i := 0; i < s.ci.Checkers; i++ {
|
|
|
|
fraction := (100 * i) / s.ci.Checkers
|
2020-03-13 22:12:22 +01:00
|
|
|
go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background checkers
|
|
|
|
func (s *syncCopyMove) stopCheckers() {
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeChecked.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for checks to finish")
|
2016-07-04 14:12:33 +02:00
|
|
|
s.checkerWg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This starts the background transfers
|
|
|
|
func (s *syncCopyMove) startTransfers() {
|
2020-11-05 12:33:32 +01:00
|
|
|
s.transfersWg.Add(s.ci.Transfers)
|
|
|
|
for i := 0; i < s.ci.Transfers; i++ {
|
|
|
|
fraction := (100 * i) / s.ci.Transfers
|
2020-03-13 22:12:22 +01:00
|
|
|
go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background transfers
|
|
|
|
func (s *syncCopyMove) stopTransfers() {
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeUploaded.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for transfers to finish")
|
2016-10-22 18:53:10 +02:00
|
|
|
s.transfersWg.Wait()
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
// This starts the background renamers.
|
|
|
|
func (s *syncCopyMove) startRenamers() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
s.renamerWg.Add(s.ci.Checkers)
|
|
|
|
for i := 0; i < s.ci.Checkers; i++ {
|
|
|
|
fraction := (100 * i) / s.ci.Checkers
|
2020-03-13 22:12:22 +01:00
|
|
|
go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg)
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background renamers
|
|
|
|
func (s *syncCopyMove) stopRenamers() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
s.toBeRenamed.Close()
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Debugf(s.fdst, "Waiting for renames to finish")
|
2017-01-03 18:35:12 +01:00
|
|
|
s.renamerWg.Wait()
|
|
|
|
}
|
|
|
|
|
2017-01-24 12:04:09 +01:00
|
|
|
// This starts the collection of possible renames
|
|
|
|
func (s *syncCopyMove) startTrackRenames() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.trackRenamesWg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer s.trackRenamesWg.Done()
|
|
|
|
for o := range s.trackRenamesCh {
|
|
|
|
s.renameCheck = append(s.renameCheck, o)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background rename collection
|
|
|
|
func (s *syncCopyMove) stopTrackRenames() {
|
|
|
|
if !s.trackRenames {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
close(s.trackRenamesCh)
|
|
|
|
s.trackRenamesWg.Wait()
|
|
|
|
}
|
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
// This starts the background deletion of files for --delete-during
|
|
|
|
func (s *syncCopyMove) startDeleters() {
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
2017-01-25 20:59:53 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
s.deletersWg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer s.deletersWg.Done()
|
2019-06-17 10:34:30 +02:00
|
|
|
err := operations.DeleteFilesWithBackupDir(s.ctx, s.deleteFilesCh, s.backupDir)
|
2017-01-25 20:59:53 +01:00
|
|
|
s.processError(err)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
// This stops the background deleters
|
|
|
|
func (s *syncCopyMove) stopDeleters() {
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly {
|
2017-01-25 20:59:53 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
close(s.deleteFilesCh)
|
|
|
|
s.deletersWg.Wait()
|
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// This deletes the files in the dstFiles map. If checkSrcMap is set
|
|
|
|
// then it checks to see if they exist first in srcFiles the source
|
|
|
|
// file map, otherwise it unconditionally deletes them. If
|
|
|
|
// checkSrcMap is clear then it assumes that the any source files that
|
|
|
|
// have been found have been removed from dstFiles already.
|
|
|
|
func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
2020-11-05 12:33:32 +01:00
|
|
|
if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
2023-10-01 11:02:56 +02:00
|
|
|
// log all deletes as errors
|
|
|
|
for remote, o := range s.dstFiles {
|
|
|
|
if checkSrcMap {
|
|
|
|
_, exists := s.srcFiles[remote]
|
|
|
|
if exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.logger(s.ctx, operations.TransferError, nil, o, fs.ErrorNotDeleting)
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
return fs.ErrorNotDeleting
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the spare files
|
2023-02-07 11:56:03 +01:00
|
|
|
toDelete := make(fs.ObjectsChan, s.ci.Checkers)
|
2016-07-04 14:12:33 +02:00
|
|
|
go func() {
|
2018-04-21 23:01:27 +02:00
|
|
|
outer:
|
2016-07-04 14:12:33 +02:00
|
|
|
for remote, o := range s.dstFiles {
|
|
|
|
if checkSrcMap {
|
|
|
|
_, exists := s.srcFiles[remote]
|
|
|
|
if exists {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s.aborting() {
|
|
|
|
break
|
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
break outer
|
|
|
|
case toDelete <- o:
|
|
|
|
}
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
close(toDelete)
|
|
|
|
}()
|
2019-06-17 10:34:30 +02:00
|
|
|
return operations.DeleteFilesWithBackupDir(s.ctx, toDelete, s.backupDir)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-08-09 22:06:39 +02:00
|
|
|
// This deletes the empty directories in the slice passed in. It
|
|
|
|
// ignores any errors deleting directories
|
2020-11-05 12:33:32 +01:00
|
|
|
func (s *syncCopyMove) deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error {
|
2018-05-14 19:16:56 +02:00
|
|
|
if len(entriesMap) == 0 {
|
2017-08-09 22:06:39 +02:00
|
|
|
return nil
|
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
if accounting.Stats(ctx).Errored() && !s.ci.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs)
|
|
|
|
return fs.ErrorNotDeletingDirs
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
|
2018-05-14 19:16:56 +02:00
|
|
|
var entries fs.DirEntries
|
|
|
|
for _, entry := range entriesMap {
|
|
|
|
entries = append(entries, entry)
|
|
|
|
}
|
2017-08-09 22:06:39 +02:00
|
|
|
// Now delete the empty directories starting from the longest path
|
|
|
|
sort.Sort(entries)
|
|
|
|
var errorCount int
|
|
|
|
var okCount int
|
|
|
|
for i := len(entries) - 1; i >= 0; i-- {
|
|
|
|
entry := entries[i]
|
2018-01-12 17:30:54 +01:00
|
|
|
dir, ok := entry.(fs.Directory)
|
2017-08-09 22:06:39 +02:00
|
|
|
if ok {
|
|
|
|
// TryRmdir only deletes empty directories
|
2019-06-17 10:34:30 +02:00
|
|
|
err := operations.TryRmdir(ctx, f, dir.Remote())
|
2017-08-09 22:06:39 +02:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err)
|
2017-08-09 22:06:39 +02:00
|
|
|
errorCount++
|
|
|
|
} else {
|
|
|
|
okCount++
|
|
|
|
}
|
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(f, "Not a directory: %v", entry)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if errorCount > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(f, "failed to delete %d directories", errorCount)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
if okCount > 0 {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(f, "deleted %d directories", okCount)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-04-04 18:59:56 +02:00
|
|
|
// mark the parent of entry as not empty and if entry is a directory mark it as potentially empty.
|
|
|
|
func (s *syncCopyMove) markParentNotEmpty(entry fs.DirEntry) {
|
|
|
|
s.srcEmptyDirsMu.Lock()
|
|
|
|
defer s.srcEmptyDirsMu.Unlock()
|
|
|
|
// Mark entry as potentially empty if it is a directory
|
2024-04-17 17:55:17 +02:00
|
|
|
_, isDir := entry.(fs.Directory)
|
|
|
|
if isDir {
|
2024-04-04 18:59:56 +02:00
|
|
|
s.srcEmptyDirs[entry.Remote()] = entry
|
|
|
|
// if DoMove and --delete-empty-src-dirs flag is set then record the parent but
|
|
|
|
// don't remove any as we are about to move files out of them them making the
|
|
|
|
// directory empty.
|
|
|
|
if s.DoMove && s.deleteEmptySrcDirs {
|
|
|
|
s.srcMoveEmptyDirs[entry.Remote()] = entry
|
|
|
|
}
|
2018-07-17 07:43:58 +02:00
|
|
|
}
|
2018-06-03 10:21:25 +02:00
|
|
|
parentDir := path.Dir(entry.Remote())
|
2024-04-17 17:55:17 +02:00
|
|
|
if isDir && s.copyEmptySrcDirs {
|
|
|
|
// Mark its parent as not empty
|
|
|
|
if parentDir == "." {
|
|
|
|
parentDir = ""
|
|
|
|
}
|
|
|
|
delete(s.srcEmptyDirs, parentDir)
|
|
|
|
}
|
|
|
|
if !isDir {
|
|
|
|
// Mark ALL its parents as not empty
|
|
|
|
for {
|
|
|
|
if parentDir == "." {
|
|
|
|
parentDir = ""
|
|
|
|
}
|
|
|
|
delete(s.srcEmptyDirs, parentDir)
|
|
|
|
if parentDir == "" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
parentDir = path.Dir(parentDir)
|
|
|
|
}
|
2018-06-03 10:21:25 +02:00
|
|
|
}
|
2018-05-14 19:16:56 +02:00
|
|
|
}
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
// parseTrackRenamesStrategy turns a config string into a trackRenamesStrategy
|
|
|
|
func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy, err error) {
|
|
|
|
if len(strategies) == 0 {
|
|
|
|
return strategy, nil
|
|
|
|
}
|
|
|
|
for _, s := range strings.Split(strategies, ",") {
|
|
|
|
switch s {
|
|
|
|
case "hash":
|
|
|
|
strategy |= trackRenamesStrategyHash
|
|
|
|
case "modtime":
|
|
|
|
strategy |= trackRenamesStrategyModtime
|
2020-06-09 21:40:03 +02:00
|
|
|
case "leaf":
|
|
|
|
strategy |= trackRenamesStrategyLeaf
|
2020-03-21 18:35:34 +01:00
|
|
|
case "size":
|
|
|
|
// ignore
|
|
|
|
default:
|
2021-11-04 11:12:57 +01:00
|
|
|
return strategy, fmt.Errorf("unknown track renames strategy %q", s)
|
2020-03-20 14:04:56 +01:00
|
|
|
}
|
|
|
|
}
|
2020-03-21 18:35:34 +01:00
|
|
|
return strategy, nil
|
2020-03-20 14:04:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// renameID makes a string with the size and the other identifiers of the requested rename strategies
|
2017-01-03 18:35:12 +01:00
|
|
|
//
|
|
|
|
// it may return an empty string in which case no hash could be made
|
2020-03-21 18:35:34 +01:00
|
|
|
func (s *syncCopyMove) renameID(obj fs.Object, renamesStrategy trackRenamesStrategy, precision time.Duration) string {
|
2020-03-20 14:04:56 +01:00
|
|
|
var builder strings.Builder
|
|
|
|
|
|
|
|
fmt.Fprintf(&builder, "%d", obj.Size())
|
|
|
|
|
2020-03-21 18:35:34 +01:00
|
|
|
if renamesStrategy.hash() {
|
2020-03-20 14:04:56 +01:00
|
|
|
var err error
|
|
|
|
hash, err := obj.Hash(s.ctx, s.commonHash)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(obj, "Hash failed: %v", err)
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
if hash == "" {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2020-06-09 21:40:03 +02:00
|
|
|
builder.WriteRune(',')
|
|
|
|
builder.WriteString(hash)
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-06-10 12:02:14 +02:00
|
|
|
// for renamesStrategy.modTime() we don't add to the hash but we check the times in
|
|
|
|
// popRenameMap
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-06-09 21:40:03 +02:00
|
|
|
if renamesStrategy.leaf() {
|
|
|
|
builder.WriteRune(',')
|
|
|
|
builder.WriteString(path.Base(obj.Remote()))
|
|
|
|
}
|
|
|
|
|
2020-03-20 14:04:56 +01:00
|
|
|
return builder.String()
|
2017-01-04 00:03:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// pushRenameMap adds the object with hash to the rename map
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) {
|
2017-01-04 00:03:20 +01:00
|
|
|
s.renameMapMu.Lock()
|
|
|
|
s.renameMap[hash] = append(s.renameMap[hash], obj)
|
|
|
|
s.renameMapMu.Unlock()
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// popRenameMap finds the object with hash and pop the first match from
|
|
|
|
// renameMap or returns nil if not found.
|
2020-06-10 12:02:14 +02:00
|
|
|
func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) {
|
2017-01-04 00:03:20 +01:00
|
|
|
s.renameMapMu.Lock()
|
2020-07-15 16:05:44 +02:00
|
|
|
defer s.renameMapMu.Unlock()
|
2017-01-04 00:03:20 +01:00
|
|
|
dsts, ok := s.renameMap[hash]
|
|
|
|
if ok && len(dsts) > 0 {
|
2020-06-10 12:02:14 +02:00
|
|
|
// Element to remove
|
|
|
|
i := 0
|
|
|
|
|
|
|
|
// If using track renames strategy modtime then we need to check the modtimes here
|
|
|
|
if s.trackRenamesStrategy.modTime() {
|
|
|
|
i = -1
|
|
|
|
srcModTime := src.ModTime(s.ctx)
|
|
|
|
for j, dst := range dsts {
|
|
|
|
dstModTime := dst.ModTime(s.ctx)
|
|
|
|
dt := dstModTime.Sub(srcModTime)
|
|
|
|
if dt < s.modifyWindow && dt > -s.modifyWindow {
|
|
|
|
i = j
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If nothing matched then return nil
|
|
|
|
if i < 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove the entry and return it
|
|
|
|
dst = dsts[i]
|
|
|
|
dsts = append(dsts[:i], dsts[i+1:]...)
|
2017-01-04 00:03:20 +01:00
|
|
|
if len(dsts) > 0 {
|
|
|
|
s.renameMap[hash] = dsts
|
|
|
|
} else {
|
|
|
|
delete(s.renameMap, hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// makeRenameMap builds a map of the destination files by hash that
|
2017-01-24 12:04:09 +01:00
|
|
|
// match sizes in the slice of objects in s.renameCheck
|
|
|
|
func (s *syncCopyMove) makeRenameMap() {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(s.fdst, "Making map for --track-renames")
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// first make a map of possible sizes we need to check
|
|
|
|
possibleSizes := map[int64]struct{}{}
|
2017-01-24 12:04:09 +01:00
|
|
|
for _, obj := range s.renameCheck {
|
2017-01-04 00:03:20 +01:00
|
|
|
possibleSizes[obj.Size()] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// pump all the dstFiles into in
|
2020-11-05 12:33:32 +01:00
|
|
|
in := make(chan fs.Object, s.ci.Checkers)
|
2017-01-03 18:35:12 +01:00
|
|
|
go s.pumpMapToChan(s.dstFiles, in)
|
2016-12-18 11:03:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
// now make a map of size,hash for all dstFiles
|
2018-01-12 17:30:54 +01:00
|
|
|
s.renameMap = make(map[string][]fs.Object)
|
2017-01-03 18:35:12 +01:00
|
|
|
var wg sync.WaitGroup
|
2023-02-07 11:56:03 +01:00
|
|
|
wg.Add(s.ci.Checkers)
|
|
|
|
for i := 0; i < s.ci.Checkers; i++ {
|
2016-12-18 11:03:56 +01:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2017-01-04 00:03:20 +01:00
|
|
|
for obj := range in {
|
2018-01-12 17:30:54 +01:00
|
|
|
// only create hash for dst fs.Object if its size could match
|
2017-01-04 00:03:20 +01:00
|
|
|
if _, found := possibleSizes[obj.Size()]; found {
|
2023-02-06 11:30:22 +01:00
|
|
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj, "renaming")
|
2020-06-10 12:02:14 +02:00
|
|
|
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2017-01-04 00:03:20 +01:00
|
|
|
if hash != "" {
|
|
|
|
s.pushRenameMap(hash, obj)
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2020-11-05 17:59:59 +01:00
|
|
|
tr.Done(s.ctx, nil)
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(s.fdst, "Finished making map for --track-renames")
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
|
|
|
|
2020-05-25 08:05:53 +02:00
|
|
|
// tryRename renames an src object when doing track renames if
|
2017-01-04 00:03:20 +01:00
|
|
|
// possible, it returns true if the object was renamed.
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) tryRename(src fs.Object) bool {
|
2017-02-22 20:28:22 +01:00
|
|
|
// Calculate the hash of the src object
|
2020-11-05 17:27:01 +01:00
|
|
|
hash := s.renameID(src, s.trackRenamesStrategy, fs.GetModifyWindow(s.ctx, s.fsrc, s.fdst))
|
2020-03-20 14:04:56 +01:00
|
|
|
|
2017-01-03 18:35:12 +01:00
|
|
|
if hash == "" {
|
2017-01-04 00:03:20 +01:00
|
|
|
return false
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-04 00:03:20 +01:00
|
|
|
|
2017-02-22 20:28:22 +01:00
|
|
|
// Get a match on fdst
|
2020-06-10 12:02:14 +02:00
|
|
|
dst := s.popRenameMap(hash, src)
|
2017-01-04 00:03:20 +01:00
|
|
|
if dst == nil {
|
|
|
|
return false
|
2017-01-03 18:35:12 +01:00
|
|
|
}
|
2017-01-04 00:03:20 +01:00
|
|
|
|
2017-02-22 20:28:22 +01:00
|
|
|
// Find dst object we are about to overwrite if it exists
|
2019-06-17 10:34:30 +02:00
|
|
|
dstOverwritten, _ := s.fdst.NewObject(s.ctx, src.Remote())
|
2017-02-22 20:28:22 +01:00
|
|
|
|
|
|
|
// Rename dst to have name src.Remote()
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err := operations.Move(s.ctx, s.fdst, dstOverwritten, src.Remote(), dst)
|
2017-01-04 00:03:20 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err)
|
2017-01-04 00:03:20 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove file from dstFiles if present
|
|
|
|
s.dstFilesMu.Lock()
|
|
|
|
delete(s.dstFiles, dst.Remote())
|
|
|
|
s.dstFilesMu.Unlock()
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(src, "Renamed from %q", dst.Remote())
|
2017-01-04 00:03:20 +01:00
|
|
|
return true
|
2016-12-18 11:03:56 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
// Syncs fsrc into fdst
|
|
|
|
//
|
|
|
|
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// If DoMove is true then files will be moved instead of copied.
|
2017-01-25 20:59:53 +01:00
|
|
|
//
|
|
|
|
// dir is the start directory, "" for root
|
2017-06-13 15:35:51 +02:00
|
|
|
func (s *syncCopyMove) run() error {
|
2018-01-12 17:30:54 +01:00
|
|
|
if operations.Same(s.fdst, s.fsrc) {
|
|
|
|
fs.Errorf(s.fdst, "Nothing to do as source and destination are the same")
|
2017-01-25 20:59:53 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start background checking and transferring pipeline
|
|
|
|
s.startCheckers()
|
|
|
|
s.startRenamers()
|
2020-05-15 12:39:07 +02:00
|
|
|
if !s.checkFirst {
|
|
|
|
s.startTransfers()
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
s.startDeleters()
|
2018-01-12 17:30:54 +01:00
|
|
|
s.dstFiles = make(map[string]fs.Object)
|
2017-01-25 20:59:53 +01:00
|
|
|
|
|
|
|
s.startTrackRenames()
|
|
|
|
|
2017-09-01 16:47:32 +02:00
|
|
|
// set up a march over fdst and fsrc
|
2018-11-25 18:26:58 +01:00
|
|
|
m := &march.March{
|
2020-09-09 13:53:21 +02:00
|
|
|
Ctx: s.inCtx,
|
2020-05-15 01:27:59 +02:00
|
|
|
Fdst: s.fdst,
|
|
|
|
Fsrc: s.fsrc,
|
|
|
|
Dir: s.dir,
|
|
|
|
NoTraverse: s.noTraverse,
|
|
|
|
Callback: s,
|
2020-11-26 18:10:41 +01:00
|
|
|
DstIncludeAll: s.fi.Opt.DeleteExcluded,
|
2020-05-15 01:27:59 +02:00
|
|
|
NoCheckDest: s.noCheckDest,
|
|
|
|
NoUnicodeNormalization: s.noUnicodeNormalization,
|
2018-11-25 18:26:58 +01:00
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
s.processError(m.Run(s.ctx))
|
2017-01-25 20:59:53 +01:00
|
|
|
|
|
|
|
s.stopTrackRenames()
|
|
|
|
if s.trackRenames {
|
|
|
|
// Build the map of the remaining dstFiles by hash
|
|
|
|
s.makeRenameMap()
|
|
|
|
// Attempt renames for all the files which don't have a matching dst
|
|
|
|
for _, src := range s.renameCheck {
|
2023-08-15 22:33:32 +02:00
|
|
|
ok := s.toBeRenamed.Put(s.inCtx, fs.ObjectPair{Src: src, Dst: nil})
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
2018-04-21 23:01:27 +02:00
|
|
|
break
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop background checking and transferring pipeline
|
|
|
|
s.stopCheckers()
|
2020-05-15 12:39:07 +02:00
|
|
|
if s.checkFirst {
|
|
|
|
fs.Infof(s.fdst, "Checks finished, now starting transfers")
|
|
|
|
s.startTransfers()
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
s.stopRenamers()
|
|
|
|
s.stopTransfers()
|
|
|
|
s.stopDeleters()
|
|
|
|
|
|
|
|
// Delete files after
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeAfter {
|
2020-11-05 12:33:32 +01:00
|
|
|
if s.currentError() != nil && !s.ci.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting)
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
|
|
|
s.processError(s.deleteFiles(false))
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 22:06:39 +02:00
|
|
|
|
2024-02-06 17:00:34 +01:00
|
|
|
// Update modtimes for directories if necessary
|
|
|
|
if s.setDirModTime && s.setDirModTimeAfter {
|
|
|
|
s.processError(s.setDelayedDirModTimes(s.ctx))
|
|
|
|
}
|
|
|
|
|
2017-08-09 22:06:39 +02:00
|
|
|
// Prune empty directories
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode != fs.DeleteModeOff {
|
2020-11-05 12:33:32 +01:00
|
|
|
if s.currentError() != nil && !s.ci.IgnoreErrors {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs)
|
2017-08-09 22:06:39 +02:00
|
|
|
} else {
|
2020-11-05 12:33:32 +01:00
|
|
|
s.processError(s.deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs))
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-06 17:18:13 +01:00
|
|
|
|
2017-11-27 12:42:02 +01:00
|
|
|
// Delete empty fsrc subdirectories
|
|
|
|
// if DoMove and --delete-empty-src-dirs flag is set
|
|
|
|
if s.DoMove && s.deleteEmptySrcDirs {
|
2024-04-04 18:59:56 +02:00
|
|
|
// delete potentially empty subdirectories that were part of the move
|
|
|
|
s.processError(s.deleteEmptyDirectories(s.ctx, s.fsrc, s.srcMoveEmptyDirs))
|
2017-11-06 17:18:13 +01:00
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
|
2021-11-29 17:19:00 +01:00
|
|
|
// Read the error out of the contexts if there is one
|
2019-07-25 12:28:27 +02:00
|
|
|
s.processError(s.ctx.Err())
|
2021-11-29 17:19:00 +01:00
|
|
|
s.processError(s.inCtx.Err())
|
2019-07-25 12:28:27 +02:00
|
|
|
|
2022-02-24 11:03:41 +01:00
|
|
|
// If the duration was exceeded then add a Fatal Error so we don't retry
|
|
|
|
if !s.maxDurationEndTime.IsZero() && time.Since(s.maxDurationEndTime) > 0 {
|
2023-07-15 18:41:13 +02:00
|
|
|
fs.Errorf(s.fdst, "%v", ErrorMaxDurationReachedFatal)
|
|
|
|
s.processError(ErrorMaxDurationReachedFatal)
|
2022-02-24 11:03:41 +01:00
|
|
|
}
|
|
|
|
|
2020-12-30 11:36:44 +01:00
|
|
|
// Print nothing to transfer message if there were no transfers and no errors
|
|
|
|
if s.deleteMode != fs.DeleteModeOnly && accounting.Stats(s.ctx).GetTransfers() == 0 && s.currentError() == nil {
|
2020-02-09 20:30:41 +01:00
|
|
|
fs.Infof(nil, "There was nothing to transfer")
|
|
|
|
}
|
|
|
|
|
2021-11-29 17:19:00 +01:00
|
|
|
// cancel the contexts to free resources
|
|
|
|
s.inCancel()
|
2018-04-21 23:01:27 +02:00
|
|
|
s.cancel()
|
2017-01-25 20:59:53 +01:00
|
|
|
return s.currentError()
|
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// DstOnly have an object which is in the destination only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) {
|
|
|
|
if s.deleteMode == fs.DeleteModeOff {
|
2023-10-01 11:02:56 +02:00
|
|
|
if s.usingLogger {
|
|
|
|
switch x := dst.(type) {
|
|
|
|
case fs.Object:
|
|
|
|
s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil)
|
|
|
|
case fs.Directory:
|
|
|
|
// it's a directory that we'd normally skip, because we're not deleting anything on the dest
|
|
|
|
// however, to make sure every file is logged, we need to list it, so we need to return true here.
|
|
|
|
// we skip this when not using logger.
|
|
|
|
s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
switch x := dst.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.MissingOnSrc, nil, x, nil)
|
2017-01-25 20:59:53 +01:00
|
|
|
switch s.deleteMode {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.DeleteModeAfter:
|
2017-01-25 20:59:53 +01:00
|
|
|
// record object as needs deleting
|
|
|
|
s.dstFilesMu.Lock()
|
|
|
|
s.dstFiles[x.Remote()] = x
|
|
|
|
s.dstFilesMu.Unlock()
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.DeleteModeDuring, fs.DeleteModeOnly:
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case s.deleteFilesCh <- x:
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode))
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2017-08-09 22:06:39 +02:00
|
|
|
// Record directory as it is potentially empty and needs deleting
|
|
|
|
if s.fdst.Features().CanHaveEmptyDirectories {
|
|
|
|
s.dstEmptyDirsMu.Lock()
|
2018-05-14 19:16:56 +02:00
|
|
|
s.dstEmptyDirs[dst.Remote()] = dst
|
2017-08-09 22:06:39 +02:00
|
|
|
s.dstEmptyDirsMu.Unlock()
|
2024-04-04 18:59:56 +02:00
|
|
|
s.logger(s.ctx, operations.MissingOnSrc, nil, dst, fs.ErrorIsDir)
|
2017-08-09 22:06:39 +02:00
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2024-02-29 01:29:38 +01:00
|
|
|
// keeps track of dirs with changed contents, to avoid setting modtimes on dirs that haven't changed
|
|
|
|
func (s *syncCopyMove) markDirModified(dir string) {
|
|
|
|
if !s.setDirModTimeAfter {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.setDirModTimeMu.Lock()
|
|
|
|
defer s.setDirModTimeMu.Unlock()
|
|
|
|
s.modifiedDirs[dir] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// like markDirModified, but accepts an Object instead of a string.
|
|
|
|
// the marked dir will be this object's parent.
|
|
|
|
func (s *syncCopyMove) markDirModifiedObject(o fs.Object) {
|
|
|
|
dir := path.Dir(o.Remote())
|
|
|
|
if dir == "." {
|
|
|
|
dir = ""
|
|
|
|
}
|
|
|
|
s.markDirModified(dir)
|
|
|
|
}
|
|
|
|
|
2024-02-06 17:00:34 +01:00
|
|
|
// copyDirMetadata copies the src directory modTime or Metadata to dst
|
|
|
|
// or f if nil. If dst is nil then it uses dir as the name of the new
|
|
|
|
// directory.
|
|
|
|
//
|
|
|
|
// It returns the destination directory if possible. Note that this may
|
|
|
|
// be nil.
|
|
|
|
func (s *syncCopyMove) copyDirMetadata(ctx context.Context, f fs.Fs, dst fs.Directory, dir string, src fs.Directory) (newDst fs.Directory) {
|
|
|
|
var err error
|
2024-02-29 01:29:38 +01:00
|
|
|
equal := operations.DirsEqual(ctx, src, dst, operations.DirsEqualOpt{ModifyWindow: s.modifyWindow, SetDirModtime: s.setDirModTime, SetDirMetadata: s.setDirMetadata})
|
|
|
|
if !s.setDirModTimeAfter && equal {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if s.setDirModTimeAfter && equal {
|
|
|
|
newDst = dst
|
2024-04-04 19:03:20 +02:00
|
|
|
} else if s.copyEmptySrcDirs {
|
2024-02-29 01:29:38 +01:00
|
|
|
if s.setDirMetadata {
|
|
|
|
newDst, err = operations.CopyDirMetadata(ctx, f, dst, dir, src)
|
|
|
|
} else if s.setDirModTime {
|
|
|
|
if dst == nil {
|
|
|
|
newDst, err = operations.MkdirModTime(ctx, f, dir, src.ModTime(ctx))
|
|
|
|
} else {
|
|
|
|
newDst, err = operations.SetDirModTime(ctx, f, dst, dir, src.ModTime(ctx))
|
|
|
|
}
|
|
|
|
} else if dst == nil {
|
|
|
|
// Create the directory if it doesn't exist
|
|
|
|
err = operations.Mkdir(ctx, f, dir)
|
2024-02-06 17:00:34 +01:00
|
|
|
}
|
2024-04-04 19:03:20 +02:00
|
|
|
} else {
|
2024-04-16 20:39:30 +02:00
|
|
|
newDst = dst
|
2024-02-06 17:00:34 +01:00
|
|
|
}
|
|
|
|
// If we need to set modtime after and we created a dir, then save it for later
|
|
|
|
if s.setDirModTime && s.setDirModTimeAfter && err == nil {
|
|
|
|
if newDst != nil {
|
|
|
|
dir = newDst.Remote()
|
|
|
|
}
|
|
|
|
level := strings.Count(dir, "/") + 1
|
|
|
|
// The root directory "" is at the top level
|
|
|
|
if dir == "" {
|
|
|
|
level = 0
|
|
|
|
}
|
|
|
|
s.setDirModTimeMu.Lock()
|
|
|
|
// Keep track of the maximum level inserted
|
|
|
|
if level > s.setDirModTimesMaxLevel {
|
|
|
|
s.setDirModTimesMaxLevel = level
|
|
|
|
}
|
2024-04-04 19:03:20 +02:00
|
|
|
set := setDirModTime{
|
2024-06-08 13:38:31 +02:00
|
|
|
src: src,
|
2024-02-06 17:00:34 +01:00
|
|
|
dst: newDst,
|
|
|
|
dir: dir,
|
|
|
|
modTime: src.ModTime(ctx),
|
|
|
|
level: level,
|
2024-04-04 19:03:20 +02:00
|
|
|
}
|
|
|
|
s.setDirModTimes = append(s.setDirModTimes, set)
|
2024-02-06 17:00:34 +01:00
|
|
|
s.setDirModTimeMu.Unlock()
|
|
|
|
fs.Debugf(nil, "Added delayed dir = %q, newDst=%v", dir, newDst)
|
|
|
|
}
|
|
|
|
s.processError(err)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return newDst
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the modtimes for directories
|
|
|
|
func (s *syncCopyMove) setDelayedDirModTimes(ctx context.Context) error {
|
|
|
|
s.setDirModTimeMu.Lock()
|
|
|
|
defer s.setDirModTimeMu.Unlock()
|
|
|
|
|
|
|
|
// Timestamp all directories at the same level in parallel, deepest first
|
|
|
|
// We do this by iterating the slice multiple times to save memory
|
|
|
|
// There could be a lot of directories in this slice.
|
2024-02-29 01:29:38 +01:00
|
|
|
errCount := errcount.New()
|
2024-02-06 17:00:34 +01:00
|
|
|
for level := s.setDirModTimesMaxLevel; level >= 0; level-- {
|
|
|
|
g, gCtx := errgroup.WithContext(ctx)
|
|
|
|
g.SetLimit(s.ci.Checkers)
|
|
|
|
for _, item := range s.setDirModTimes {
|
|
|
|
if item.level != level {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// End early if error
|
|
|
|
if gCtx.Err() != nil {
|
|
|
|
break
|
|
|
|
}
|
2024-04-16 18:15:02 +02:00
|
|
|
if _, ok := s.modifiedDirs[item.dir]; !ok {
|
|
|
|
continue
|
2024-04-04 19:03:20 +02:00
|
|
|
}
|
|
|
|
if !s.copyEmptySrcDirs {
|
|
|
|
if _, isEmpty := s.srcEmptyDirs[item.dir]; isEmpty {
|
|
|
|
continue
|
|
|
|
}
|
2024-02-29 01:29:38 +01:00
|
|
|
}
|
2024-04-04 19:03:20 +02:00
|
|
|
item := item
|
2024-04-17 17:55:17 +02:00
|
|
|
if s.setDirModTimeAfter { // mark dir's parent as modified
|
|
|
|
dir := path.Dir(item.dir)
|
|
|
|
if dir == "." {
|
|
|
|
dir = ""
|
|
|
|
}
|
|
|
|
s.modifiedDirs[dir] = struct{}{} // lock is already held
|
|
|
|
}
|
2024-02-06 17:00:34 +01:00
|
|
|
g.Go(func() error {
|
2024-04-04 19:03:20 +02:00
|
|
|
var err error
|
2024-06-08 13:38:31 +02:00
|
|
|
if s.setDirMetadata {
|
2024-04-04 19:03:20 +02:00
|
|
|
_, err = operations.CopyDirMetadata(gCtx, s.fdst, item.dst, item.dir, item.src)
|
|
|
|
} else {
|
|
|
|
_, err = operations.SetDirModTime(gCtx, s.fdst, item.dst, item.dir, item.modTime)
|
|
|
|
}
|
2024-02-06 17:00:34 +01:00
|
|
|
if err != nil {
|
|
|
|
err = fs.CountError(err)
|
2024-04-04 19:03:20 +02:00
|
|
|
fs.Errorf(item.dir, "Failed to update directory timestamp or metadata: %v", err)
|
2024-02-06 17:00:34 +01:00
|
|
|
errCount.Add(err)
|
|
|
|
}
|
|
|
|
return nil // don't return errors, just count them
|
|
|
|
})
|
|
|
|
}
|
|
|
|
err := g.Wait()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errCount.Err("failed to set directory modtime")
|
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// SrcOnly have an object which is in the source only
|
2018-01-12 17:30:54 +01:00
|
|
|
func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
|
|
|
|
if s.deleteMode == fs.DeleteModeOnly {
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
switch x := src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.MissingOnDst, x, nil, nil)
|
2024-04-04 18:59:56 +02:00
|
|
|
s.markParentNotEmpty(src)
|
2018-05-14 19:16:56 +02:00
|
|
|
|
2017-01-25 20:59:53 +01:00
|
|
|
if s.trackRenames {
|
|
|
|
// Save object to check for a rename later
|
2018-04-21 23:01:27 +02:00
|
|
|
select {
|
|
|
|
case <-s.ctx.Done():
|
|
|
|
return
|
|
|
|
case s.trackRenamesCh <- x:
|
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
2019-07-08 03:02:53 +02:00
|
|
|
// Check CompareDest && CopyDest
|
|
|
|
NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, nil, x, s.compareCopyDest, s.backupDir)
|
|
|
|
if err != nil {
|
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.TransferError, x, nil, err)
|
2019-07-08 03:02:53 +02:00
|
|
|
}
|
|
|
|
if !NoNeedTransfer {
|
|
|
|
// No need to check since doesn't exist
|
2023-04-03 12:53:51 +02:00
|
|
|
fs.Debugf(src, "Need to transfer - File not found at Destination")
|
2024-02-29 01:29:38 +01:00
|
|
|
s.markDirModifiedObject(x)
|
2023-08-15 22:33:32 +02:00
|
|
|
ok := s.toBeUploaded.Put(s.inCtx, fs.ObjectPair{Src: x, Dst: nil})
|
2019-07-08 03:02:53 +02:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2018-04-21 23:01:27 +02:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2024-04-04 18:59:56 +02:00
|
|
|
s.markParentNotEmpty(src)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.MissingOnDst, src, nil, fs.ErrorIsDir)
|
2024-02-06 17:00:34 +01:00
|
|
|
|
|
|
|
// Create the directory and make sure the Metadata/ModTime is correct
|
|
|
|
s.copyDirMetadata(s.ctx, s.fdst, nil, x.Remote(), x)
|
2024-04-17 17:55:17 +02:00
|
|
|
s.markDirModified(x.Remote())
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
// Match is called when src and dst are present, so sync src to dst
|
2019-06-17 10:34:30 +02:00
|
|
|
func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) {
|
2017-01-25 20:59:53 +01:00
|
|
|
switch srcX := src.(type) {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Object:
|
2024-04-04 18:59:56 +02:00
|
|
|
s.markParentNotEmpty(src)
|
2018-05-14 19:16:56 +02:00
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
if s.deleteMode == fs.DeleteModeOnly {
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
dstX, ok := dst.(fs.Object)
|
2017-01-25 20:59:53 +01:00
|
|
|
if ok {
|
2023-10-01 11:02:56 +02:00
|
|
|
// No logger here because we'll handle it in equal()
|
2023-08-15 22:33:32 +02:00
|
|
|
ok = s.toBeChecked.Put(s.inCtx, fs.ObjectPair{Src: srcX, Dst: dstX})
|
2018-07-19 23:41:34 +02:00
|
|
|
if !ok {
|
|
|
|
return false
|
2018-04-21 23:01:27 +02:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
} else {
|
|
|
|
// FIXME src is file, dst is directory
|
|
|
|
err := errors.New("can't overwrite directory with file")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
2017-01-25 20:59:53 +01:00
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(ctx, operations.TransferError, srcX, dstX, err)
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.Directory:
|
2017-01-25 20:59:53 +01:00
|
|
|
// Do the same thing to the entire contents of the directory
|
2024-04-04 18:59:56 +02:00
|
|
|
s.markParentNotEmpty(src)
|
2024-02-06 17:00:34 +01:00
|
|
|
dstX, ok := dst.(fs.Directory)
|
2017-01-25 20:59:53 +01:00
|
|
|
if ok {
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(s.ctx, operations.Match, src, dst, fs.ErrorIsDir)
|
2024-02-06 17:00:34 +01:00
|
|
|
// Create the directory and make sure the Metadata/ModTime is correct
|
|
|
|
s.copyDirMetadata(s.ctx, s.fdst, dstX, "", srcX)
|
|
|
|
|
2023-10-09 04:59:22 +02:00
|
|
|
if s.ci.FixCase && !s.ci.Immutable && src.Remote() != dst.Remote() {
|
|
|
|
// Fix case for case insensitive filesystems
|
|
|
|
// Fix each dir before recursing into subdirs and files
|
2024-02-09 16:37:14 +01:00
|
|
|
err := operations.DirMoveCaseInsensitive(s.ctx, s.fdst, dst.Remote(), src.Remote())
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(dst, "Error while attempting to rename to %s: %v", src.Remote(), err)
|
2023-10-09 04:59:22 +02:00
|
|
|
s.processError(err)
|
|
|
|
} else {
|
2024-02-09 16:37:14 +01:00
|
|
|
fs.Infof(dst, "Fixed case by renaming to: %s", src.Remote())
|
2023-10-09 04:59:22 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-03 17:24:22 +01:00
|
|
|
|
2017-09-01 16:21:46 +02:00
|
|
|
return true
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
// FIXME src is dir, dst is file
|
|
|
|
err := errors.New("can't overwrite file with directory")
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(dst, "%v", err)
|
2017-09-01 16:21:46 +02:00
|
|
|
s.processError(err)
|
2023-10-01 11:02:56 +02:00
|
|
|
s.logger(ctx, operations.TransferError, src.(fs.ObjectInfo), dst.(fs.ObjectInfo), err)
|
2017-01-25 20:59:53 +01:00
|
|
|
default:
|
|
|
|
panic("Bad object in DirEntries")
|
|
|
|
}
|
2017-09-01 16:21:46 +02:00
|
|
|
return false
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:35:14 +01:00
|
|
|
// Syncs fsrc into fdst
|
|
|
|
//
|
|
|
|
// If Delete is true then it deletes any files in fdst that aren't in fsrc
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// If DoMove is true then files will be moved instead of copied.
|
2017-01-25 20:35:14 +01:00
|
|
|
//
|
|
|
|
// dir is the start directory, "" for root
|
2019-06-17 10:34:30 +02:00
|
|
|
func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
2020-11-05 12:33:32 +01:00
|
|
|
ci := fs.GetConfig(ctx)
|
2018-01-12 17:30:54 +01:00
|
|
|
if deleteMode != fs.DeleteModeOff && DoMove {
|
|
|
|
return fserrors.FatalError(errors.New("can't delete and move at the same time"))
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
2017-01-25 20:59:53 +01:00
|
|
|
// Run an extra pass to delete only
|
2018-01-12 17:30:54 +01:00
|
|
|
if deleteMode == fs.DeleteModeBefore {
|
2020-11-05 12:33:32 +01:00
|
|
|
if ci.TrackRenames {
|
2018-01-12 17:30:54 +01:00
|
|
|
return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames"))
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
|
|
|
// only delete stuff during in this pass
|
2019-06-17 10:34:30 +02:00
|
|
|
do, err := newSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2017-01-25 20:59:53 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-13 15:35:51 +02:00
|
|
|
err = do.run()
|
2017-01-25 20:59:53 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Next pass does a copy only
|
2018-01-12 17:30:54 +01:00
|
|
|
deleteMode = fs.DeleteModeOff
|
2017-01-25 20:59:53 +01:00
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
do, err := newSyncCopyMove(ctx, fdst, fsrc, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2017-01-10 22:47:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-06-13 15:35:51 +02:00
|
|
|
return do.run()
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
2017-01-25 20:35:14 +01:00
|
|
|
// Sync fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
2020-11-05 12:33:32 +01:00
|
|
|
ci := fs.GetConfig(ctx)
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs)
|
2017-01-25 20:35:14 +01:00
|
|
|
}
|
|
|
|
|
2016-07-04 14:12:33 +02:00
|
|
|
// CopyDir copies fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error {
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// moveDir moves fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
|
|
|
return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MoveDir moves fsrc into fdst
|
2019-06-17 10:34:30 +02:00
|
|
|
func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error {
|
2020-11-26 18:10:41 +01:00
|
|
|
fi := filter.GetConfig(ctx)
|
2018-01-12 17:30:54 +01:00
|
|
|
if operations.Same(fdst, fsrc) {
|
|
|
|
fs.Errorf(fdst, "Nothing to do as source and destination are the same")
|
2016-07-04 14:12:33 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First attempt to use DirMover if exists, same Fs and no filters are active
|
2020-11-26 18:10:41 +01:00
|
|
|
if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && fi.InActive() {
|
2020-10-13 23:43:40 +02:00
|
|
|
if operations.SkipDestructive(ctx, fdst, "server-side directory move") {
|
2016-07-11 12:36:46 +02:00
|
|
|
return nil
|
|
|
|
}
|
2020-10-13 23:43:40 +02:00
|
|
|
fs.Debugf(fdst, "Using server-side directory move")
|
2019-06-17 10:34:30 +02:00
|
|
|
err := fdstDirMove(ctx, fsrc, "", "")
|
2016-07-04 14:12:33 +02:00
|
|
|
switch err {
|
2018-01-12 17:30:54 +01:00
|
|
|
case fs.ErrorCantDirMove, fs.ErrorDirExists:
|
|
|
|
fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err)
|
2016-07-04 14:12:33 +02:00
|
|
|
case nil:
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Infof(fdst, "Server side directory move succeeded")
|
2016-07-04 14:12:33 +02:00
|
|
|
return nil
|
|
|
|
default:
|
2019-11-18 15:13:02 +01:00
|
|
|
err = fs.CountError(err)
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(fdst, "Server side directory move failed: %v", err)
|
2016-07-04 14:12:33 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-11 12:36:46 +02:00
|
|
|
// Otherwise move the files one by one
|
2019-06-17 10:34:30 +02:00
|
|
|
return moveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs)
|
2016-07-04 14:12:33 +02:00
|
|
|
}
|