bisync: full support for comparing checksum, size, modtime - fixes #5679 fixes #5683 fixes #5684 fixes #5675

Before this change, bisync could only detect changes based on modtime, and
would refuse to run if either path lacked modtime support. This made bisync
unavailable for many of rclone's backends. Additionally, bisync did not account
for the Fs's precision when comparing modtimes, meaning that they could only be
reliably compared within the same side -- not against the opposite side. Size
and checksum (even when available) were ignored completely for deltas.

After this change, bisync now fully supports comparing based on any combination
of size, modtime, and checksum, lifting the prior restriction on backends
without modtime support. The comparison logic considers the backend's
precision, hash types, and other features as appropriate.

The comparison features optionally use a new --compare flag (which takes any
combination of size,modtime,checksum) and even supports some combinations not
otherwise supported in `sync` (like comparing all three at the same time.) By
default (without the --compare flag), bisync inherits the same comparison
options as `sync` (that is: size and modtime by default, unless modified with
flags such as --checksum or --size-only.) If the --compare flag is set, it will
override these defaults.

If --compare includes checksum and both remotes support checksums but have no
hash types in common with each other, checksums will be considered only for
comparisons within the same side (to determine what has changed since the prior
sync), but not for comparisons against the opposite side. If one side supports
checksums and the other does not, checksums will only be considered on the side
that supports them. When comparing with checksum and/or size without modtime,
bisync cannot determine whether a file is newer or older -- only whether it is
changed or unchanged. (If it is changed on both sides, bisync still does the
standard equality-check to avoid declaring a sync conflict unless it absolutely
has to.)

Also included are some new flags to customize the checksum comparison behavior
on backends where hashes are slow or unavailable. --no-slow-hash and
--slow-hash-sync-only allow selectively ignoring checksums on backends such as
local where they are slow. --download-hash allows computing them by downloading
when (and only when) they're otherwise not available. Of course, this option
probably won't be practical with large files, but may be a good option for
syncing small-but-important files with maximum accuracy (for example, a source
code repo on a crypt remote.) An additional advantage over methods like
cryptcheck is that the original file is not required for comparison (for
example, --download-hash can be used to bisync two different crypt remotes with
different passwords.)

Additionally, all of the above are now considered during the final --check-sync
for much-improved accuracy (before this change, it only compared filenames!)

Many other details are explained in the included docs.
This commit is contained in:
nielash
2023-11-30 19:44:38 -05:00
parent d8e07bfd8e
commit b4216648e4
308 changed files with 5469 additions and 3243 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/rclone/rclone/cmd/bisync/bilib"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/lib/terminal"
@ -40,7 +41,18 @@ var logger = operations.NewLoggerOpt()
var lock mutex.Mutex
var once mutex.Once
var ignoreListingChecksum bool
var ci *fs.ConfigInfo
var ignoreListingModtime bool
var hashTypes map[string]hash.Type
var queueCI *fs.ConfigInfo
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
func getHashType(fname string) hash.Type {
ht, ok := hashTypes[fname]
if ok {
return ht
}
return hash.None
}
// FsPathIfAny handles type assertions and returns a formatted bilib.FsPath if valid, otherwise ""
func FsPathIfAny(x fs.DirEntry) string {
@ -102,13 +114,16 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
result.Flags = "-"
if side != nil {
result.Size = side.Size()
result.Modtime = side.ModTime(ctx).In(time.UTC)
if !ignoreListingModtime {
result.Modtime = side.ModTime(ctx).In(TZ)
}
if !ignoreListingChecksum {
sideObj, ok := side.(fs.ObjectInfo)
if ok {
result.Hash, _ = sideObj.Hash(ctx, sideObj.Fs().Hashes().GetOne())
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
}
}
}
result.IsWinner = result.Winner.Obj == side
@ -126,13 +141,13 @@ func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEn
result.Size = -1
}
if result.Size < 0 && result.Flags != "d" && (ci.CheckSum || ci.SizeOnly) {
prettyprint(result, "writing result", fs.LogLevelDebug)
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
once.Do(func() {
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
})
}
fs.Debugf(nil, "writing result: %v", result)
err := json.NewEncoder(opt.JSON).Encode(result)
if err != nil {
fs.Errorf(result, "Error encoding JSON: %v", err)
@ -149,13 +164,45 @@ func ReadResults(results io.Reader) []Results {
if err := dec.Decode(&r); err == io.EOF {
break
}
fs.Debugf(nil, "result: %v", r)
prettyprint(r, "result", fs.LogLevelDebug)
slice = append(slice, r)
}
return slice
}
// for setup code shared by both fastCopy and resyncDir
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
queueCI = fs.GetConfig(ctx)
ignoreListingChecksum = b.opt.IgnoreListingChecksum
ignoreListingModtime = !b.opt.Compare.Modtime
hashTypes = map[string]hash.Type{
b.fs1.Name(): b.opt.Compare.HashType1,
b.fs2.Name(): b.opt.Compare.HashType2,
}
logger.LoggerFn = WriteResults
overridingEqual := false
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
overridingEqual = true
fs.Debugf(nil, "overriding equal")
// otherwise impossible in Sync, so override Equal
ctx = b.EqualFn(ctx)
}
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
// set here in case !b.opt.Compare.Modtime
queueCI = fs.GetConfig(ctxCopyLogger)
if b.opt.Compare.NoSlowHash {
queueCI.CheckSum = false
}
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
queueCI.CheckSum = true
}
}
return ctxCopyLogger
}
func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) ([]Results, error) {
ctx = b.preCopy(ctx)
if err := b.saveQueue(files, queueName); err != nil {
return nil, err
}
@ -173,13 +220,9 @@ func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.
}
}
ignoreListingChecksum = b.opt.IgnoreListingChecksum
ci = fs.GetConfig(ctx)
logger.LoggerFn = WriteResults
ctxCopyLogger := operations.WithSyncLogger(ctxCopy, logger)
b.testFn()
err := sync.Sync(ctxCopyLogger, fdst, fsrc, b.opt.CreateEmptySrcDirs)
fs.Debugf(nil, "logger is: %v", logger)
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
@ -203,12 +246,10 @@ func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files b
}
func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results, error) {
ci = fs.GetConfig(ctx)
ignoreListingChecksum = b.opt.IgnoreListingChecksum
logger.LoggerFn = WriteResults
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
err := sync.CopyDir(ctxCopyLogger, fdst, fsrc, b.opt.CreateEmptySrcDirs)
fs.Debugf(nil, "logger is: %v", logger)
ctx = b.preCopy(ctx)
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
prettyprint(logger, "logger", fs.LogLevelDebug)
getResults := ReadResults(logger.JSON)
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")