diff --git a/cmd/bisync/cmd.go b/cmd/bisync/cmd.go index 54a417cb4..fafbfaff5 100644 --- a/cmd/bisync/cmd.go +++ b/cmd/bisync/cmd.go @@ -40,6 +40,9 @@ type Options struct { Force bool FiltersFile string Workdir string + OrigBackupDir string + BackupDir1 string + BackupDir2 string DryRun bool NoCleanup bool SaveQueues bool // save extra debugging files (test only flag) @@ -107,6 +110,8 @@ func init() { Opt.Retries = 3 cmd.Root.AddCommand(commandDefinition) cmdFlags := commandDefinition.Flags() + // when adding new flags, remember to also update the rc params: + // cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md) flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "") flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "") flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "") @@ -116,6 +121,8 @@ func init() { flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "") flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "") flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "") + flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "") + flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "") flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "") flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "") flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "") diff --git a/cmd/bisync/deltas.go b/cmd/bisync/deltas.go index 770dac750..23a10540f 100644 --- a/cmd/bisync/deltas.go +++ b/cmd/bisync/deltas.go @@ -346,6 +346,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change } else { fs.Debugf(nil, "Files are NOT equal: %s", file) b.indent("!Path1", p1+"..path1", "Renaming Path1 copy") + ctxMove = b.setBackupDir(ctxMove, 1) // in case already a file with new name if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil { err = fmt.Errorf("path1 rename failed for %s: %w", p1, err) b.critical = true @@ -360,6 +361,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change copy1to2.Add(file + "..path1") b.indent("!Path2", p2+"..path2", "Renaming Path2 copy") + ctxMove = b.setBackupDir(ctxMove, 2) // in case already a file with new name if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, alias+"..path2", alias); err != nil { err = fmt.Errorf("path2 rename failed for %s: %w", alias, err) return @@ -426,6 +428,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change if copy2to1.NotEmpty() { changes1 = true b.indent("Path2", "Path1", "Do queued copies to") + ctx = b.setBackupDir(ctx, 1) results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1") // retries, if any @@ -442,6 +445,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change if copy1to2.NotEmpty() { changes2 = true b.indent("Path1", "Path2", "Do queued copies to") + ctx = b.setBackupDir(ctx, 2) results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2") // retries, if any diff --git a/cmd/bisync/help.go b/cmd/bisync/help.go index 84e78ab9d..61c944a59 100644 --- a/cmd/bisync/help.go +++ b/cmd/bisync/help.go @@ -10,7 +10,7 @@ func makeHelp(help string) string { "|", "`", "{MAXDELETE}", strconv.Itoa(DefaultMaxDelete), "{CHECKFILE}", DefaultCheckFilename, - "{WORKDIR}", DefaultWorkdir, + // "{WORKDIR}", DefaultWorkdir, ) return replacer.Replace(help) } @@ -37,7 +37,9 @@ var rcHelp = makeHelp(`This takes the following parameters - ignoreListingChecksum - Do not use checksums for listings - resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. Use at your own risk! -- workdir - server directory for history files (default: {WORKDIR}) +- workdir - server directory for history files (default: |~/.cache/rclone/bisync|) +- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote. +- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote. - noCleanup - retain working files See [bisync command help](https://rclone.org/commands/rclone_bisync/) diff --git a/cmd/bisync/operations.go b/cmd/bisync/operations.go index 5d4926c7d..d845b4826 100644 --- a/cmd/bisync/operations.go +++ b/cmd/bisync/operations.go @@ -68,6 +68,8 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) { if opt.Workdir == "" { opt.Workdir = DefaultWorkdir } + ci := fs.GetConfig(ctx) + opt.OrigBackupDir = ci.BackupDir if !opt.DryRun && !opt.Force { if fs1.Precision() == fs.ModTimeNotSupported { @@ -358,7 +360,9 @@ func (b *bisyncRun) runLocked(octx context.Context) (err error) { // Optional rmdirs for empty directories if opt.RemoveEmptyDirs { fs.Infof(nil, "Removing empty directories") + fctx = b.setBackupDir(fctx, 1) err1 := operations.Rmdirs(fctx, b.fs1, "", true) + fctx = b.setBackupDir(fctx, 2) err2 := operations.Rmdirs(fctx, b.fs2, "", true) err := err1 if err == nil { @@ -445,6 +449,8 @@ func (b *bisyncRun) resync(octx, fctx context.Context) error { } ci := fs.GetConfig(ctxSync) ci.IgnoreExisting = true + ctxSync = b.setBackupDir(ctxSync, 1) + // 2 to 1 if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil { b.critical = true return err @@ -452,6 +458,8 @@ func (b *bisyncRun) resync(octx, fctx context.Context) error { b.indent("Path1", "Path2", "Resync is copying UNIQUE OR DIFFERING files to") ci.IgnoreExisting = false + ctxSync = b.setBackupDir(ctxSync, 2) + // 1 to 2 if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil { b.critical = true return err @@ -581,3 +589,17 @@ func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, re } } } + +// setBackupDir overrides --backup-dir with path-specific version, if set, in each direction +func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Context { + ci := fs.GetConfig(ctx) + ci.BackupDir = b.opt.OrigBackupDir + if destPath == 1 && b.opt.BackupDir1 != "" { + ci.BackupDir = b.opt.BackupDir1 + } + if destPath == 2 && b.opt.BackupDir2 != "" { + ci.BackupDir = b.opt.BackupDir1 + } + fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath) + return ctx +} diff --git a/cmd/bisync/rc.go b/cmd/bisync/rc.go index 550be5e38..c0d36a372 100644 --- a/cmd/bisync/rc.go +++ b/cmd/bisync/rc.go @@ -74,6 +74,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) { if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) { return } + if opt.BackupDir1, err = in.GetString("backupdir1"); rc.NotErrParamNotFound(err) { + return + } + if opt.BackupDir2, err = in.GetString("backupdir2"); rc.NotErrParamNotFound(err) { + return + } checkSync, err := in.GetString("checkSync") if rc.NotErrParamNotFound(err) { diff --git a/docs/content/bisync.md b/docs/content/bisync.md index 67d338287..f3354dc1e 100644 --- a/docs/content/bisync.md +++ b/docs/content/bisync.md @@ -105,6 +105,8 @@ Optional Flags: --no-cleanup Retain working files (useful for troubleshooting and testing). --workdir PATH Use custom working directory (useful for testing). (default: `~/.cache/rclone/bisync`) + --backup-dir1 PATH --backup-dir for Path1. Must be a non-overlapping path on the same remote. + --backup-dir2 PATH --backup-dir for Path2. Must be a non-overlapping path on the same remote. -n, --dry-run Go through the motions - No files are copied/deleted. -v, --verbose Increases logging verbosity. May be specified more than once for more details. @@ -357,6 +359,42 @@ Certain more serious errors will still enforce a `--resync` lockout, even in `-- Behavior of `--resilient` may change in a future version. +#### --backup-dir1 and --backup-dir2 + +As of `v1.65`, [`--backup-dir`](/docs/#backup-dir-dir) is supported in bisync. +Because `--backup-dir` must be a non-overlapping path on the same remote, +Bisync has introduced new `--backup-dir1` and `--backup-dir2` flags to support +separate backup-dirs for `Path1` and `Path2` (bisyncing between different +remotes with `--backup-dir` would not otherwise be possible.) `--backup-dir1` +and `--backup-dir2` can use different remotes from each other, but +`--backup-dir1` must use the same remote as `Path1`, and `--backup-dir2` must +use the same remote as `Path2`. Each backup directory must not overlap its +respective bisync Path without being excluded by a filter rule. + +The standard `--backup-dir` will also work, if both paths use the same remote +(but note that deleted files from both paths would be mixed together in the +same dir). If either `--backup-dir1` and `--backup-dir2` are set, they will +override `--backup-dir`. + +Example: +``` +rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case +``` + +In this example, if the user deletes a file in +`/Users/someuser/some/local/path/Bisync`, bisync will propagate the delete to +the other side by moving the corresponding file from `gdrive:Bisync` to +`gdrive:BackupDir`. If the user deletes a file from `gdrive:Bisync`, bisync +moves it from `/Users/someuser/some/local/path/Bisync` to +`/Users/someuser/some/local/path/BackupDir`. + +In the event of a `..path1` / `..path2` rename due to a sync conflict, the +rename is not considered a delete, unless a previous conflict with the same +name already exists and would get overwritten. + +See also: [`--suffix`](/docs/#suffix-suffix), +[`--suffix-keep-extension`](/docs/#suffix-keep-extension) + ## Operation ### Runtime flow details