From ccdd1ea6c4730a43a3a40608fd03d0921e4e3745 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Thu, 2 Jun 2016 21:02:44 +0100 Subject: [PATCH] Add --max-depth parameter This will apply to ls/lsd/sync/copy etc Fixes #412 Fixes #213 --- docs/content/docs.md | 18 ++++++++++++++++++ fs/config.go | 3 +++ fs/operations.go | 9 +++++++-- fs/operations_test.go | 27 ++++++++++++++++++++++++++- 4 files changed, 54 insertions(+), 3 deletions(-) diff --git a/docs/content/docs.md b/docs/content/docs.md index 57208ead7..aeb6f3b66 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -432,6 +432,24 @@ to reduce the value so rclone moves on to a high level retry (see the Disable low level retries with `--low-level-retries 1`. +### --max-depth=N ### + +This modifies the recursion depth for all the commands except purge. + +So if you do `rclone --max-depth 1 ls remote:path` you will see only +the files in the top level directory. Using `--max-depth 2` means you +will see all the files in first two directory levels and so on. + +For historical reasons the `lsd` command defaults to using a +`--max-depth` of 1 - you can override this with the command line flag. + +You can use this command to disable recursion (with `--max-depth 1`). + +Note that if you use this with `sync` and `--delete-excluded` the +files not recursed through are considered excluded and will be deleted +on the destination. Test first with `--dry-run` if you are not sure +what will happen. + ### --modify-window=TIME ### When checking whether a file has been modified, this is the maximum diff --git a/fs/config.go b/fs/config.go index f9314c8ac..d7e7dd58f 100644 --- a/fs/config.go +++ b/fs/config.go @@ -85,6 +85,7 @@ var ( updateOlder = pflag.BoolP("update", "u", false, "Skip files that are newer on the destination.") noGzip = pflag.BoolP("no-gzip-encoding", "", false, "Don't set Accept-Encoding: gzip.") dedupeMode = pflag.StringP("dedupe-mode", "", "interactive", "Dedupe mode interactive|skip|first|newest|oldest|rename.") + maxDepth = pflag.IntP("max-depth", "", -1, "If set limits the recursion depth to this.") bwLimit SizeSuffix // Key to use for password en/decryption. @@ -207,6 +208,7 @@ type ConfigInfo struct { UpdateOlder bool // Skip files that are newer on the destination NoGzip bool // Disable compression DedupeMode DeduplicateMode + MaxDepth int } // Transport returns an http.RoundTripper with the correct timeouts @@ -309,6 +311,7 @@ func LoadConfig() { Config.LowLevelRetries = *lowLevelRetries Config.UpdateOlder = *updateOlder Config.NoGzip = *noGzip + Config.MaxDepth = *maxDepth ConfigPath = *configFile diff --git a/fs/operations.go b/fs/operations.go index 6c4201204..bfa04b2fd 100644 --- a/fs/operations.go +++ b/fs/operations.go @@ -463,6 +463,7 @@ func readFilesMap(fs Fs, includeAll bool, dir string) (files map[string]Object, list := NewLister() if !includeAll { list.SetFilter(Config.Filter) + list.SetLevel(Config.MaxDepth) } list.Start(fs, dir) for { @@ -804,7 +805,7 @@ func Check(fdst, fsrc Fs) error { // // Lists in parallel which may get them out of order func ListFn(f Fs, fn func(Object)) error { - list := NewLister().SetFilter(Config.Filter).Start(f, "") + list := NewLister().SetFilter(Config.Filter).SetLevel(Config.MaxDepth).Start(f, "") var wg sync.WaitGroup wg.Add(Config.Checkers) for i := 0; i < Config.Checkers; i++ { @@ -913,7 +914,11 @@ func Count(f Fs) (objects int64, size int64, err error) { // ListDir lists the directories/buckets/containers in the Fs to the supplied writer func ListDir(f Fs, w io.Writer) error { - list := NewLister().SetLevel(1).Start(f, "") + level := 1 + if Config.MaxDepth > 0 { + level = Config.MaxDepth + } + list := NewLister().SetLevel(level).Start(f, "") for { dir, err := list.GetDir() if err != nil { diff --git a/fs/operations_test.go b/fs/operations_test.go index 8900f1d1b..83e71306f 100644 --- a/fs/operations_test.go +++ b/fs/operations_test.go @@ -280,6 +280,26 @@ func TestCopy(t *testing.T) { fstest.CheckItems(t, r.fremote, file1) } +// Test copy with depth +func TestCopyWithDepth(t *testing.T) { + r := NewRun(t) + defer r.Finalise() + file1 := r.WriteFile("sub dir/hello world", "hello world", t1) + file2 := r.WriteFile("hello world2", "hello world2", t2) + + // Check the MaxDepth too + fs.Config.MaxDepth = 1 + defer func() { fs.Config.MaxDepth = -1 }() + + err := fs.CopyDir(r.fremote, r.flocal) + if err != nil { + t.Fatalf("Copy failed: %v", err) + } + + fstest.CheckItems(t, r.flocal, file1, file2) + fstest.CheckItems(t, r.fremote, file2) +} + // Test a server side copy if possible, or the backup path if not func TestServerSideCopy(t *testing.T) { r := NewRun(t) @@ -949,8 +969,13 @@ func TestCount(t *testing.T) { defer r.Finalise() file1 := r.WriteBoth("potato2", "------------------------------------------------------------", t1) file2 := r.WriteBoth("empty space", "", t2) + file3 := r.WriteBoth("sub dir/potato3", "hello", t2) - fstest.CheckItems(t, r.fremote, file1, file2) + fstest.CheckItems(t, r.fremote, file1, file2, file3) + + // Check the MaxDepth too + fs.Config.MaxDepth = 1 + defer func() { fs.Config.MaxDepth = -1 }() objects, size, err := fs.Count(r.fremote) if err != nil {