Refactor the List and ListDir interface

Gives more accurate error propagation, control of depth of recursion
and short circuit recursion where possible.

Most of the the heavy lifting is done in the "fs" package, making file
system implementations a bit simpler.

This commit contains some code originally by Klaus Post.

Fixes #316
This commit is contained in:
Nick Craig-Wood 2016-04-21 20:06:21 +01:00
parent 3bdad260b0
commit 753b0717be
21 changed files with 1512 additions and 996 deletions

View File

@ -19,7 +19,6 @@ import (
"net/http" "net/http"
"regexp" "regexp"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/go-acd" "github.com/ncw/go-acd"
@ -318,9 +317,7 @@ OUTER:
return shouldRetry(resp, err) return shouldRetry(resp, err)
}) })
if err != nil { if err != nil {
fs.Stats.Error() return false, err
fs.ErrorLog(f, "Couldn't list files: %v", err)
break
} }
if nodes == nil { if nodes == nil {
break break
@ -341,177 +338,45 @@ OUTER:
return return
} }
// Path should be directory path either "" or "path/" // ListDir reads the directory specified by the job into out, returning any more jobs
// func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
// List the directory using a recursive list from the root fs.Debug(f, "Reading %q", job.Path)
// _, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
// This fetches the minimum amount of stuff but does more API calls remote := job.Path + *node.Name
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, "", false, false, func(node *acd.Node) bool {
// Recurse on directories
switch *node.Kind { switch *node.Kind {
case folderKind: case folderKind:
wg.Add(1) if out.IncludeDirectory(remote) {
folder := path + *node.Name + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(*node.Id, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
return false
case fileKind:
if fs := f.newFsObjectWithInfo(path+*node.Name, node); fs != nil {
out <- fs
}
default:
// ignore ASSET etc
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// Path should be directory path either "" or "path/"
//
// List the directory using a recursive list from the root
//
// This fetches the minimum amount of stuff but does more API calls
// which makes it slow
func (f *Fs) listDirNonRecursive(dirID string, path string, out fs.ObjectsChan) error {
// Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
type dirListJob struct {
dirID string
path string
}
in := make(chan dirListJob, fs.Config.Checkers)
errs := make(chan error, fs.Config.Checkers)
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range in {
var jobs []dirListJob
fs.Debug(f, "Reading %q", job.path)
// Make the API request
_, err := f.listAll(job.dirID, "", false, false, func(node *acd.Node) bool {
// Recurse on directories
switch *node.Kind {
case folderKind:
jobs = append(jobs, dirListJob{dirID: *node.Id, path: job.path + *node.Name + "/"})
case fileKind:
if fs := f.newFsObjectWithInfo(job.path+*node.Name, node); fs != nil {
out <- fs
}
default:
// ignore ASSET etc
}
return false
})
fs.Debug(f, "Finished reading %q", job.path)
if err != nil {
fs.ErrorLog(f, "Error reading %s: %s", path, err)
errs <- err
}
// FIXME stop traversal on error?
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these jobs off for traversal in
// the background
for _, job := range jobs {
in <- job
}
}()
traversing.Done()
}
}()
}
// Collect the errors
wg.Add(1)
var errResult error
go func() {
defer wg.Done()
for err := range errs {
errResult = err
}
}()
// Start the process
traversing.Add(1)
in <- dirListJob{dirID: dirID, path: path}
traversing.Wait()
close(in)
close(errs)
wg.Wait()
return errResult
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirNonRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir lists the directories
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *acd.Node) bool {
dir := &fs.Dir{ dir := &fs.Dir{
Name: *item.Name, Name: remote,
Bytes: -1, Bytes: -1,
Count: -1, Count: -1,
} }
dir.When, _ = time.Parse(timeFormat, *item.ModifiedDate) dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
out <- dir if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
}
}
case fileKind:
if o := f.newFsObjectWithInfo(remote, node); o != nil {
if out.Add(o) {
return true
}
}
default:
// ignore ASSET etc
}
return false return false
}) })
if err != nil { fs.Debug(f, "Finished reading %q", job.Path)
fs.Stats.Error() return jobs, err
fs.ErrorLog(f, "ListDir failed: %s", err) }
}
} // List walks the path returning iles and directories into out
}() func (f *Fs) List(out fs.ListOpts) {
return out f.dirCache.List(f, out)
} }
// Put the object into the container // Put the object into the container

177
b2/b2.go
View File

@ -330,7 +330,7 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
} }
// listFn is called from list to handle an object // listFn is called from list to handle an object
type listFn func(string, *api.File) error type listFn func(remote string, object *api.File, isDirectory bool) error
// errEndList is a sentinel used to end the list iteration now. // errEndList is a sentinel used to end the list iteration now.
// listFn should return it to end the iteration with no errors. // listFn should return it to end the iteration with no errors.
@ -339,6 +339,8 @@ var errEndList = errors.New("end list")
// list lists the objects into the function supplied from // list lists the objects into the function supplied from
// the bucket and root supplied // the bucket and root supplied
// //
// level is the depth to search to
//
// If prefix is set then startFileName is used as a prefix which all // If prefix is set then startFileName is used as a prefix which all
// files must have // files must have
// //
@ -346,7 +348,7 @@ var errEndList = errors.New("end list")
// than 1000) // than 1000)
// //
// If hidden is set then it will list the hidden (deleted) files too. // If hidden is set then it will list the hidden (deleted) files too.
func (f *Fs) list(prefix string, limit int, hidden bool, fn listFn) error { func (f *Fs) list(level int, prefix string, limit int, hidden bool, fn listFn) error {
bucketID, err := f.getBucketID() bucketID, err := f.getBucketID()
if err != nil { if err != nil {
return err return err
@ -371,6 +373,7 @@ func (f *Fs) list(prefix string, limit int, hidden bool, fn listFn) error {
if hidden { if hidden {
opts.Path = "/b2_list_file_versions" opts.Path = "/b2_list_file_versions"
} }
lastDir := ""
for { for {
err := f.pacer.Call(func() (bool, error) { err := f.pacer.Call(func() (bool, error) {
resp, err := f.srv.CallJSON(&opts, &request, &response) resp, err := f.srv.CallJSON(&opts, &request, &response)
@ -385,7 +388,14 @@ func (f *Fs) list(prefix string, limit int, hidden bool, fn listFn) error {
if !strings.HasPrefix(file.Name, prefix) { if !strings.HasPrefix(file.Name, prefix) {
return nil return nil
} }
err = fn(file.Name[len(f.root):], file) remote := file.Name[len(f.root):]
slashes := strings.Count(remote, "/")
// Check if this file makes a new directory
if slash := strings.IndexRune(remote, '/'); slash >= 0 {
if dir := remote[:slash]; dir != lastDir {
if slashes-1 < fs.MaxLevel {
err = fn(dir, nil, true)
if err != nil { if err != nil {
if err == errEndList { if err == errEndList {
return nil return nil
@ -393,6 +403,21 @@ func (f *Fs) list(prefix string, limit int, hidden bool, fn listFn) error {
return err return err
} }
} }
lastDir = dir
}
}
// Send the file
if slashes < fs.MaxLevel {
err = fn(remote, file, false)
if err != nil {
if err == errEndList {
return nil
}
return err
}
}
}
// end if no NextFileName // end if no NextFileName
if response.NextFileName == nil { if response.NextFileName == nil {
break break
@ -405,38 +430,68 @@ func (f *Fs) list(prefix string, limit int, hidden bool, fn listFn) error {
return nil return nil
} }
// List walks the path returning a channel of FsObjects // listFiles walks the path returning files and directories to out
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) listFiles(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
if f.bucket == "" {
// Return no objects at top level list
close(out)
fs.Stats.Error()
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects // List the objects
go func() { err := f.list(out.Level(), "", 0, false, func(remote string, object *api.File, isDirectory bool) error {
defer close(out) if isDirectory {
err := f.list("", 0, false, func(remote string, object *api.File) error { dir := &fs.Dir{
Name: remote,
Bytes: -1,
Count: -1,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
if o := f.newFsObjectWithInfo(remote, object); o != nil { if o := f.newFsObjectWithInfo(remote, object); o != nil {
out <- o if out.Add(o) {
return fs.ErrorListAborted
}
}
} }
return nil return nil
}) })
if err != nil { if err != nil {
fs.Stats.Error() out.SetError(err)
fs.ErrorLog(f, "Couldn't list bucket %q: %s", f.bucket, err)
} }
}()
}
return out
} }
// listBucketFn is called from listBuckets to handle a bucket // listBuckets returns all the buckets to out
type listBucketFn func(*api.Bucket) func (f *Fs) listBuckets(out fs.ListOpts) {
defer out.Finished()
err := f.listBucketsToFn(func(bucket *api.Bucket) error {
dir := &fs.Dir{
Name: bucket.Name,
Bytes: -1,
Count: -1,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
return nil
})
if err != nil {
out.SetError(err)
}
}
// listBuckets lists the buckets to the function supplied // List walks the path returning files and directories to out
func (f *Fs) listBuckets(fn listBucketFn) error { func (f *Fs) List(out fs.ListOpts) {
if f.bucket == "" {
f.listBuckets(out)
} else {
f.listFiles(out)
}
return
}
// listBucketFn is called from listBucketsToFn to handle a bucket
type listBucketFn func(*api.Bucket) error
// listBucketsToFn lists the buckets to the function supplied
func (f *Fs) listBucketsToFn(fn listBucketFn) error {
var account = api.Account{ID: f.info.AccountID} var account = api.Account{ID: f.info.AccountID}
var response api.ListBucketsResponse var response api.ListBucketsResponse
opts := rest.Opts{ opts := rest.Opts{
@ -451,7 +506,10 @@ func (f *Fs) listBuckets(fn listBucketFn) error {
return err return err
} }
for i := range response.Buckets { for i := range response.Buckets {
fn(&response.Buckets[i]) err = fn(&response.Buckets[i])
if err != nil {
return err
}
} }
return nil return nil
} }
@ -463,13 +521,15 @@ func (f *Fs) getBucketID() (bucketID string, err error) {
if f._bucketID != "" { if f._bucketID != "" {
return f._bucketID, nil return f._bucketID, nil
} }
err = f.listBuckets(func(bucket *api.Bucket) { err = f.listBucketsToFn(func(bucket *api.Bucket) error {
if bucket.Name == f.bucket { if bucket.Name == f.bucket {
bucketID = bucket.ID bucketID = bucket.ID
} }
return nil
}) })
if bucketID == "" { if bucketID == "" {
err = fmt.Errorf("Couldn't find bucket %q", f.bucket) err = fs.ErrorDirNotFound //fmt.Errorf("Couldn't find bucket %q", f.bucket)
} }
f._bucketID = bucketID f._bucketID = bucketID
return bucketID, err return bucketID, err
@ -489,56 +549,6 @@ func (f *Fs) clearBucketID() {
f.bucketIDMutex.Unlock() f.bucketIDMutex.Unlock()
} }
// ListDir lists the buckets
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
err := f.listBuckets(func(bucket *api.Bucket) {
out <- &fs.Dir{
Name: bucket.Name,
Bytes: -1,
Count: -1,
}
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Error listing buckets: %v", err)
}
}()
} else {
// List the directories in the path in the bucket
go func() {
defer close(out)
lastDir := ""
err := f.list("", 0, false, func(remote string, object *api.File) error {
slash := strings.IndexRune(remote, '/')
if slash < 0 {
return nil
}
dir := remote[:slash]
if dir == lastDir {
return nil
}
out <- &fs.Dir{
Name: dir,
Bytes: -1,
Count: -1,
}
lastDir = dir
return nil
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list bucket %q: %s", f.bucket, err)
}
}()
}
return out
}
// Put the object into the bucket // Put the object into the bucket
// //
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned
@ -651,8 +661,6 @@ func (f *Fs) Purge() error {
} }
checkErrMutex.Lock() checkErrMutex.Lock()
defer checkErrMutex.Unlock() defer checkErrMutex.Unlock()
fs.Stats.Error()
fs.ErrorLog(f, "Purge error: %v", err)
if errReturn == nil { if errReturn == nil {
errReturn = err errReturn = err
} }
@ -670,9 +678,11 @@ func (f *Fs) Purge() error {
} }
}() }()
} }
checkErr(f.list("", 0, true, func(remote string, object *api.File) error { checkErr(f.list(fs.MaxLevel, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
if !isDirectory {
fs.Debug(remote, "Deleting (id %q)", object.ID) fs.Debug(remote, "Deleting (id %q)", object.ID)
toBeDeleted <- object toBeDeleted <- object
}
return nil return nil
})) }))
close(toBeDeleted) close(toBeDeleted)
@ -755,7 +765,10 @@ func (o *Object) readMetaData() (err error) {
return nil return nil
} }
var info *api.File var info *api.File
err = o.fs.list(o.remote, 1, false, func(remote string, object *api.File) error { err = o.fs.list(fs.MaxLevel, o.remote, 1, false, func(remote string, object *api.File, isDirectory bool) error {
if isDirectory {
return nil
}
if remote == o.remote { if remote == o.remote {
info = object info = object
} }

74
dircache/list.go Normal file
View File

@ -0,0 +1,74 @@
// Listing utility functions for fses which use dircache
package dircache
import (
"sync"
"github.com/ncw/rclone/fs"
)
// ListDirJob describe a directory listing that needs to be done
type ListDirJob struct {
DirID string
Path string
Depth int
}
// ListDirer describes the interface necessary to use ListDir
type ListDirer interface {
// ListDir reads the directory specified by the job into out, returning any more jobs
ListDir(out fs.ListOpts, job ListDirJob) (jobs []ListDirJob, err error)
}
// listDir lists the directory using a recursive list from the root
//
// It does this in parallel, calling f.ListDir to do the actual reading
func listDir(f ListDirer, out fs.ListOpts, dirID string, path string) {
// Start some directory listing go routines
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
buffer := out.Buffer()
in := make(chan ListDirJob, buffer)
for i := 0; i < buffer; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for job := range in {
jobs, err := f.ListDir(out, job)
if err != nil {
out.SetError(err)
fs.Debug(f, "Error reading %s: %s", path, err)
} else {
traversing.Add(len(jobs))
go func() {
// Now we have traversed this directory, send these
// jobs off for traversal in the background
for _, job := range jobs {
in <- job
}
}()
}
traversing.Done()
}
}()
}
// Start the process
traversing.Add(1)
in <- ListDirJob{DirID: dirID, Path: path, Depth: out.Level() - 1}
traversing.Wait()
close(in)
wg.Wait()
}
// List walks the path returning iles and directories into out
func (dc *DirCache) List(f ListDirer, out fs.ListOpts) {
defer out.Finished()
err := dc.FindRoot(false)
if err != nil {
out.SetError(fs.ErrorDirNotFound)
} else {
listDir(f, out, dc.RootID(), "")
}
}

View File

@ -13,7 +13,6 @@ import (
"log" "log"
"net/http" "net/http"
"strings" "strings"
"sync"
"time" "time"
"golang.org/x/oauth2" "golang.org/x/oauth2"
@ -452,117 +451,64 @@ func (f *Fs) findExportFormat(filepath string, item *drive.File) (extension, lin
return "", "" return "", ""
} }
// Path should be directory path either "" or "path/" // ListDir reads the directory specified by the job into out, returning any more jobs
// func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
// List the directory using a recursive list from the root fs.Debug(f, "Reading %q", job.Path)
// _, err = f.listAll(job.DirID, "", false, false, func(item *drive.File) bool {
// This fetches the minimum amount of stuff but does more API calls remote := job.Path + item.Title
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, "", false, false, func(item *drive.File) bool {
filepath := path + item.Title
switch { switch {
case *driveAuthOwnerOnly && !isAuthOwned(item): case *driveAuthOwnerOnly && !isAuthOwned(item):
// ignore object or directory // ignore object or directory
case item.MimeType == driveFolderType: case item.MimeType == driveFolderType:
// Recurse on directories if out.IncludeDirectory(remote) {
wg.Add(1)
folder := filepath + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(item.Id, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
case item.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
if o := f.newFsObjectWithInfo(filepath, item); o != nil {
out <- o
}
case len(item.ExportLinks) != 0:
// If item has export links then it is a google doc
extension, link := f.findExportFormat(filepath, item)
if extension == "" {
fs.Debug(filepath, "No export formats found")
} else {
if o := f.newFsObjectWithInfo(filepath+"."+extension, item); o != nil {
obj := o.(*Object)
obj.isDocument = true
obj.url = link
obj.bytes = -1
out <- o
}
}
default:
fs.Debug(filepath, "Ignoring unknown object")
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir walks the path returning a channel of directories
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), "", true, false, func(item *drive.File) bool {
dir := &fs.Dir{ dir := &fs.Dir{
Name: item.Title, Name: item.Title,
Bytes: -1, Bytes: -1,
Count: -1, Count: -1,
} }
dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate) dir.When, _ = time.Parse(timeFormatIn, item.ModifiedDate)
out <- dir if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: item.Id, Path: remote + "/", Depth: job.Depth - 1})
}
}
case item.Md5Checksum != "":
// If item has MD5 sum it is a file stored on drive
if o := f.newFsObjectWithInfo(remote, item); o != nil {
if out.Add(o) {
return true
}
}
case len(item.ExportLinks) != 0:
// If item has export links then it is a google doc
extension, link := f.findExportFormat(remote, item)
if extension == "" {
fs.Debug(remote, "No export formats found")
} else {
if o := f.newFsObjectWithInfo(remote+"."+extension, item); o != nil {
obj := o.(*Object)
obj.isDocument = true
obj.url = link
obj.bytes = -1
if out.Add(o) {
return true
}
}
}
default:
fs.Debug(remote, "Ignoring unknown object")
}
return false return false
}) })
if err != nil { fs.Debug(f, "Finished reading %q", job.Path)
fs.Stats.Error() return jobs, err
fs.ErrorLog(f, "ListDir failed: %s", err) }
}
} // List walks the path returning files and directories to out
}() func (f *Fs) List(out fs.ListOpts) {
return out f.dirCache.List(f, out)
} }
// Creates a drive.File info from the parameters passed in and a half // Creates a drive.File info from the parameters passed in and a half

View File

@ -226,21 +226,16 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
} }
// Strips the root off path and returns it // Strips the root off path and returns it
func (f *Fs) stripRoot(path string) *string { func (f *Fs) stripRoot(path string) (string, error) {
lowercase := strings.ToLower(path) lowercase := strings.ToLower(path)
if !strings.HasPrefix(lowercase, f.slashRootSlash) { if !strings.HasPrefix(lowercase, f.slashRootSlash) {
fs.Stats.Error() return "", fmt.Errorf("Path %q is not under root %q", path, f.slashRootSlash)
fs.ErrorLog(f, "Path '%s' is not under root '%s'", path, f.slashRootSlash)
return nil
} }
return path[len(f.slashRootSlash):], nil
stripped := path[len(f.slashRootSlash):]
return &stripped
} }
// Walk the root returning a channel of FsObjects // Walk the root returning a channel of FsObjects
func (f *Fs) list(out fs.ObjectsChan) { func (f *Fs) list(out fs.ListOpts) {
// Track path component case, it could be different for entries coming from DropBox API // Track path component case, it could be different for entries coming from DropBox API
// See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us // See https://www.dropboxforum.com/hc/communities/public/questions/201665409-Wrong-character-case-of-folder-name-when-calling-listFolder-using-Sync-API?locale=en-us
// and https://github.com/ncw/rclone/issues/53 // and https://github.com/ncw/rclone/issues/53
@ -283,16 +278,36 @@ func (f *Fs) list(out fs.ObjectsChan) {
if entry.IsDir { if entry.IsDir {
nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent) nameTree.PutCaseCorrectDirectoryName(parentPath, lastComponent)
name, err := f.stripRoot(entry.Path + "/")
if err != nil {
out.SetError(err)
return
}
name = strings.Trim(name, "/")
if name != "" {
dir := &fs.Dir{
Name: name,
When: time.Time(entry.ClientMtime),
Bytes: entry.Bytes,
Count: -1,
}
if out.AddDir(dir) {
return
}
}
} else { } else {
parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath) parentPathCorrectCase := nameTree.GetPathWithCorrectCase(parentPath)
if parentPathCorrectCase != nil { if parentPathCorrectCase != nil {
path := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent) path, err := f.stripRoot(*parentPathCorrectCase + "/" + lastComponent)
if path == nil { if err != nil {
// an error occurred and logged by stripRoot out.SetError(err)
continue return
}
if o := f.newFsObjectWithInfo(path, entry); o != nil {
if out.Add(o) {
return
}
} }
out <- f.newFsObjectWithInfo(*path, entry)
} else { } else {
nameTree.PutFile(parentPath, lastComponent, entry) nameTree.PutFile(parentPath, lastComponent, entry)
} }
@ -306,26 +321,28 @@ func (f *Fs) list(out fs.ObjectsChan) {
} }
} }
walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) { walkFunc := func(caseCorrectFilePath string, entry *dropbox.Entry) error {
path := f.stripRoot("/" + caseCorrectFilePath) path, err := f.stripRoot("/" + caseCorrectFilePath)
if path == nil { if err != nil {
// an error occurred and logged by stripRoot return err
return
} }
if o := f.newFsObjectWithInfo(path, entry); o != nil {
out <- f.newFsObjectWithInfo(*path, entry) if out.Add(o) {
return fs.ErrorListAborted
}
}
return nil
}
err := nameTree.WalkFiles(f.root, walkFunc)
if err != nil {
out.SetError(err)
} }
nameTree.WalkFiles(f.root, walkFunc)
} }
// List walks the path returning a channel of FsObjects // List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) List(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
go func() {
defer close(out)
f.list(out) f.list(out)
}()
return out
} }
// ListDir walks the path returning a channel of FsObjects // ListDir walks the path returning a channel of FsObjects
@ -341,14 +358,13 @@ func (f *Fs) ListDir() fs.DirChan {
for i := range entry.Contents { for i := range entry.Contents {
entry := &entry.Contents[i] entry := &entry.Contents[i]
if entry.IsDir { if entry.IsDir {
name := f.stripRoot(entry.Path) name, err := f.stripRoot(entry.Path)
if name == nil { if err != nil {
// an error occurred and logged by stripRoot
continue continue
} }
out <- &fs.Dir{ out <- &fs.Dir{
Name: *name, Name: name,
When: time.Time(entry.ClientMtime), When: time.Time(entry.ClientMtime),
Bytes: entry.Bytes, Bytes: entry.Bytes,
Count: -1, Count: -1,

View File

@ -143,9 +143,9 @@ func (tree *nameTreeNode) GetPathWithCorrectCase(path string) *string {
return &resultString return &resultString
} }
type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry) type nameTreeFileWalkFunc func(caseCorrectFilePath string, entry *dropbox.Entry) error
func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) { func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFileWalkFunc) error {
var prefix string var prefix string
if currentPath == "" { if currentPath == "" {
prefix = "" prefix = ""
@ -154,7 +154,10 @@ func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFile
} }
for name, entry := range tree.Files { for name, entry := range tree.Files {
walkFunc(prefix+name, entry) err := walkFunc(prefix+name, entry)
if err != nil {
return err
}
} }
for lowerCaseName, directory := range tree.Directories { for lowerCaseName, directory := range tree.Directories {
@ -165,15 +168,20 @@ func (tree *nameTreeNode) walkFilesRec(currentPath string, walkFunc nameTreeFile
continue continue
} }
directory.walkFilesRec(prefix+caseCorrectName, walkFunc) err := directory.walkFilesRec(prefix+caseCorrectName, walkFunc)
if err != nil {
return err
} }
}
return nil
} }
func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) { func (tree *nameTreeNode) WalkFiles(rootPath string, walkFunc nameTreeFileWalkFunc) error {
node := tree.getTreeNode(rootPath) node := tree.getTreeNode(rootPath)
if node == nil { if node == nil {
return return nil
} }
node.walkFilesRec(rootPath, walkFunc) return node.walkFilesRec(rootPath, walkFunc)
} }

View File

@ -77,15 +77,15 @@ func TestPutAndWalk(t *testing.T) {
tree.PutCaseCorrectDirectoryName("", "A") tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0 numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) { walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath) assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx") assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++ numCalled++
return nil
} }
tree.WalkFiles("", walkFunc) err := tree.WalkFiles("", walkFunc)
assert(t, err == nil, "No error should be returned")
assert(t, numCalled == 1, "walk func should be called only once") assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported") assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
} }
@ -97,15 +97,15 @@ func TestPutAndWalkWithPrefix(t *testing.T) {
tree.PutCaseCorrectDirectoryName("", "A") tree.PutCaseCorrectDirectoryName("", "A")
numCalled := 0 numCalled := 0
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) { walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath) assert(t, caseCorrectFilePath == "A/F", "caseCorrectFilePath should be A/F, not "+caseCorrectFilePath)
assert(t, entry.Path == "xxx", "entry.Path should be xxx") assert(t, entry.Path == "xxx", "entry.Path should be xxx")
numCalled++ numCalled++
return nil
} }
tree.WalkFiles("A", walkFunc) err := tree.WalkFiles("A", walkFunc)
assert(t, err == nil, "No error should be returned")
assert(t, numCalled == 1, "walk func should be called only once") assert(t, numCalled == 1, "walk func should be called only once")
assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported") assert(t, fs.Stats.GetErrors() == errors, "No errors should be reported")
} }
@ -115,10 +115,11 @@ func TestPutAndWalkIncompleteTree(t *testing.T) {
tree := newNameTree() tree := newNameTree()
tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"}) tree.PutFile("a", "F", &dropboxapi.Entry{Path: "xxx"})
walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) { walkFunc := func(caseCorrectFilePath string, entry *dropboxapi.Entry) error {
t.Fatal("Should not be called") t.Fatal("Should not be called")
return nil
} }
tree.WalkFiles("", walkFunc) err := tree.WalkFiles("", walkFunc)
assert(t, err == nil, "No error should be returned")
assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported") assert(t, fs.Stats.GetErrors() == errors+1, "One error should be reported")
} }

View File

@ -6,6 +6,7 @@ import (
"bufio" "bufio"
"fmt" "fmt"
"os" "os"
"path"
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
@ -69,7 +70,8 @@ type Filter struct {
ModTimeFrom time.Time ModTimeFrom time.Time
ModTimeTo time.Time ModTimeTo time.Time
rules []rule rules []rule
files filesMap files filesMap // files if filesFrom
dirs filesMap // dirs from filesFrom
} }
// We use time conventions // We use time conventions
@ -244,9 +246,21 @@ func (f *Filter) AddRule(rule string) error {
func (f *Filter) AddFile(file string) error { func (f *Filter) AddFile(file string) error {
if f.files == nil { if f.files == nil {
f.files = make(filesMap) f.files = make(filesMap)
f.dirs = make(filesMap)
} }
file = strings.Trim(file, "/") file = strings.Trim(file, "/")
f.files[file] = struct{}{} f.files[file] = struct{}{}
// Put all the parent directories into f.dirs
for {
file = path.Dir(file)
if file == "." {
break
}
if _, found := f.dirs[file]; found {
break
}
f.dirs[file] = struct{}{}
}
return nil return nil
} }
@ -265,6 +279,28 @@ func (f *Filter) InActive() bool {
len(f.rules) == 0) len(f.rules) == 0)
} }
// includeRemote returns whether this remote passes the filter rules.
func (f *Filter) includeRemote(remote string) bool {
for _, rule := range f.rules {
if rule.Match(remote) {
return rule.Include
}
}
return true
}
// IncludeDirectory returns whether this directory should be included
// in the sync or not.
func (f *Filter) IncludeDirectory(remote string) bool {
remote = strings.Trim(remote, "/")
// filesFrom takes precedence
if f.files != nil {
_, include := f.dirs[remote]
return include
}
return f.includeRemote(remote + "/")
}
// Include returns whether this object should be included into the // Include returns whether this object should be included into the
// sync or not // sync or not
func (f *Filter) Include(remote string, size int64, modTime time.Time) bool { func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
@ -285,12 +321,7 @@ func (f *Filter) Include(remote string, size int64, modTime time.Time) bool {
if f.MaxSize != 0 && size > f.MaxSize { if f.MaxSize != 0 && size > f.MaxSize {
return false return false
} }
for _, rule := range f.rules { return f.includeRemote(remote)
if rule.Match(remote) {
return rule.Include
}
}
return true
} }
// IncludeObject returns whether this object should be included into // IncludeObject returns whether this object should be included into

View File

@ -6,6 +6,8 @@ import (
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestAgeSuffix(t *testing.T) { func TestAgeSuffix(t *testing.T) {
@ -192,6 +194,20 @@ func testInclude(t *testing.T, f *Filter, tests []includeTest) {
} }
} }
type includeDirTest struct {
in string
want bool
}
func testDirInclude(t *testing.T, f *Filter, tests []includeDirTest) {
for _, test := range tests {
got := f.IncludeDirectory(test.in)
if test.want != got {
t.Errorf("%q: want %v got %v", test.in, test.want, got)
}
}
}
func TestNewFilterIncludeFiles(t *testing.T) { func TestNewFilterIncludeFiles(t *testing.T) {
f, err := NewFilter() f, err := NewFilter()
if err != nil { if err != nil {
@ -205,6 +221,11 @@ func TestNewFilterIncludeFiles(t *testing.T) {
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
assert.Equal(t, filesMap{
"file1.jpg": {},
"file2.jpg": {},
}, f.files)
assert.Equal(t, filesMap{}, f.dirs)
testInclude(t, f, []includeTest{ testInclude(t, f, []includeTest{
{"file1.jpg", 0, 0, true}, {"file1.jpg", 0, 0, true},
{"file2.jpg", 1, 0, true}, {"file2.jpg", 1, 0, true},
@ -216,6 +237,42 @@ func TestNewFilterIncludeFiles(t *testing.T) {
} }
} }
func TestNewFilterIncludeFilesDirs(t *testing.T) {
f, err := NewFilter()
if err != nil {
t.Fatal(err)
}
for _, path := range []string{
"path/to/dir/file1.png",
"/path/to/dir/file2.png",
"/path/to/file3.png",
"/path/to/dir2/file4.png",
} {
err = f.AddFile(path)
if err != nil {
t.Error(err)
}
}
assert.Equal(t, filesMap{
"path": {},
"path/to": {},
"path/to/dir": {},
"path/to/dir2": {},
}, f.dirs)
testDirInclude(t, f, []includeDirTest{
{"path", true},
{"path/to", true},
{"path/to/", true},
{"/path/to", true},
{"/path/to/", true},
{"path/to/dir", true},
{"path/to/dir2", true},
{"path/too", false},
{"path/three", false},
{"four", false},
})
}
func TestNewFilterMinSize(t *testing.T) { func TestNewFilterMinSize(t *testing.T) {
f, err := NewFilter() f, err := NewFilter()
if err != nil { if err != nil {
@ -340,6 +397,16 @@ func TestNewFilterMatches(t *testing.T) {
{"sausage3/potato", 101, 0, true}, {"sausage3/potato", 101, 0, true},
{"unicorn", 99, 0, false}, {"unicorn", 99, 0, false},
}) })
testDirInclude(t, f, []includeDirTest{
{"sausage1", false},
{"sausage2", false},
{"sausage2/sub", false},
{"sausage2/sub/dir", false},
{"sausage3", true},
{"sausage3/sub", true},
{"sausage3/sub/dir", true},
{"sausage4", false},
})
if f.InActive() { if f.InActive() {
t.Errorf("want !InActive") t.Errorf("want !InActive")
} }

303
fs/fs.go
View File

@ -5,9 +5,12 @@ import (
"fmt" "fmt"
"io" "io"
"log" "log"
"math"
"path/filepath" "path/filepath"
"regexp" "regexp"
"sort" "sort"
"strings"
"sync"
"time" "time"
) )
@ -18,6 +21,8 @@ const (
// ModTimeNotSupported is a very large precision value to show // ModTimeNotSupported is a very large precision value to show
// mod time isn't supported on this Fs // mod time isn't supported on this Fs
ModTimeNotSupported = 100 * 365 * 24 * time.Hour ModTimeNotSupported = 100 * 365 * 24 * time.Hour
// MaxLevel is a sentinel representing an infinite depth for listings
MaxLevel = math.MaxInt32
) )
// Globals // Globals
@ -32,6 +37,9 @@ var (
ErrorCantDirMove = fmt.Errorf("Can't move directory - incompatible remotes") ErrorCantDirMove = fmt.Errorf("Can't move directory - incompatible remotes")
ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists") ErrorDirExists = fmt.Errorf("Can't copy directory - destination already exists")
ErrorCantSetModTime = fmt.Errorf("Can't set modified time") ErrorCantSetModTime = fmt.Errorf("Can't set modified time")
ErrorDirNotFound = fmt.Errorf("Directory not found")
ErrorLevelNotSupported = fmt.Errorf("Level value not supported")
ErrorListAborted = fmt.Errorf("List aborted")
) )
// RegInfo provides information about a filesystem // RegInfo provides information about a filesystem
@ -90,11 +98,10 @@ func Register(info *RegInfo) {
type Fs interface { type Fs interface {
Info Info
// List the Fs into a channel // List the objects and directories of the Fs
List() ObjectsChan //
// This should return ErrDirNotFound if the directory isn't found.
// ListDir lists the Fs directories/buckets/containers into a channel List(ListOpts)
ListDir() DirChan
// NewFsObject finds the Object at remote. Returns nil if can't be found // NewFsObject finds the Object at remote. Returns nil if can't be found
NewFsObject(remote string) Object NewFsObject(remote string) Object
@ -239,6 +246,292 @@ type UnWrapper interface {
// ObjectsChan is a channel of Objects // ObjectsChan is a channel of Objects
type ObjectsChan chan Object type ObjectsChan chan Object
// ListOpts describes the interface used for Fs.List operations
type ListOpts interface {
// Add an object to the output.
// If the function returns true, the operation has been aborted.
// Multiple goroutines can safely add objects concurrently.
Add(obj Object) (abort bool)
// Add a directory to the output.
// If the function returns true, the operation has been aborted.
// Multiple goroutines can safely add objects concurrently.
AddDir(dir *Dir) (abort bool)
// IncludeDirectory returns whether this directory should be
// included in the listing (and recursed into or not).
IncludeDirectory(remote string) bool
// SetError will set an error state, and will cause the listing to
// be aborted.
// Multiple goroutines can set the error state concurrently,
// but only the first will be returned to the caller.
SetError(err error)
// Level returns the level it should recurse to. Fses may
// ignore this in which case the listing will be less
// efficient.
Level() int
// Buffer returns the channel depth in use
Buffer() int
// Finished should be called when listing is finished
Finished()
// IsFinished returns whether Finished or SetError have been called
IsFinished() bool
}
// listerResult is returned by the lister methods
type listerResult struct {
Obj Object
Dir *Dir
Err error
}
// Lister objects are used for controlling listing of Fs objects
type Lister struct {
mu sync.RWMutex
buffer int
abort bool
results chan listerResult
finished sync.Once
level int
filter *Filter
}
// NewLister creates a Lister object.
//
// The default channel buffer size will be Config.Checkers unless
// overridden with SetBuffer. The default level will be infinite.
func NewLister() *Lister {
o := &Lister{}
return o.SetLevel(-1).SetBuffer(Config.Checkers)
}
// Start starts a go routine listing the Fs passed in. It returns the
// same Lister that was passed in for convenience.
func (o *Lister) Start(f Fs) *Lister {
o.results = make(chan listerResult, o.buffer)
go func() {
f.List(o)
}()
return o
}
// SetLevel sets the level to recurse to. It returns same Lister that
// was passed in for convenience. If Level is < 0 then it sets it to
// infinite. Must be called before Start().
func (o *Lister) SetLevel(level int) *Lister {
if level < 0 {
o.level = MaxLevel
} else {
o.level = level
}
return o
}
// SetFilter sets the Filter that is in use. It defaults to no
// filtering. Must be called before Start().
func (o *Lister) SetFilter(filter *Filter) *Lister {
o.filter = filter
return o
}
// Level gets the recursion level for this listing.
//
// Fses may ignore this, but should implement it for improved efficiency if possible.
//
// Level 1 means list just the contents of the directory
//
// Each returned item must have less than level `/`s in.
func (o *Lister) Level() int {
return o.level
}
// SetBuffer sets the channel buffer size in use. Must be called
// before Start().
func (o *Lister) SetBuffer(buffer int) *Lister {
if buffer < 1 {
buffer = 1
}
o.buffer = buffer
return o
}
// Buffer gets the channel buffer size in use
func (o *Lister) Buffer() int {
return o.buffer
}
// Add an object to the output.
// If the function returns true, the operation has been aborted.
// Multiple goroutines can safely add objects concurrently.
func (o *Lister) Add(obj Object) (abort bool) {
o.mu.RLock()
defer o.mu.RUnlock()
if o.abort {
return true
}
o.results <- listerResult{Obj: obj}
return false
}
// AddDir will a directory to the output.
// If the function returns true, the operation has been aborted.
// Multiple goroutines can safely add objects concurrently.
func (o *Lister) AddDir(dir *Dir) (abort bool) {
o.mu.RLock()
defer o.mu.RUnlock()
if o.abort {
return true
}
remote := dir.Name
remote = strings.Trim(remote, "/")
dir.Name = remote
// Check the level and ignore if too high
slashes := strings.Count(remote, "/")
if slashes >= o.level {
return false
}
// Check if directory is included
if !o.IncludeDirectory(remote) {
return false
}
o.results <- listerResult{Dir: dir}
return false
}
// IncludeDirectory returns whether this directory should be
// included in the listing (and recursed into or not).
func (o *Lister) IncludeDirectory(remote string) bool {
if o.filter == nil {
return true
}
return o.filter.IncludeDirectory(remote)
}
// SetError will set an error state, and will cause the listing to
// be aborted.
// Multiple goroutines can set the error state concurrently,
// but only the first will be returned to the caller.
func (o *Lister) SetError(err error) {
o.mu.RLock()
if err != nil && !o.abort {
o.results <- listerResult{Err: err}
}
o.mu.RUnlock()
o.Finished()
}
// Finished should be called when listing is finished
func (o *Lister) Finished() {
o.finished.Do(func() {
o.mu.Lock()
o.abort = true
close(o.results)
o.mu.Unlock()
})
}
// IsFinished returns whether the directory listing is finished or not
func (o *Lister) IsFinished() bool {
o.mu.RLock()
defer o.mu.RUnlock()
return o.abort
}
// Get an object from the listing.
// Will return either an object or a directory, never both.
// Will return (nil, nil, nil) when all objects have been returned.
func (o *Lister) Get() (Object, *Dir, error) {
select {
case r := <-o.results:
return r.Obj, r.Dir, r.Err
}
}
// GetObject will return an object from the listing.
// It will skip over any directories.
// Will return (nil, nil) when all objects have been returned.
func (o *Lister) GetObject() (Object, error) {
for {
obj, dir, err := o.Get()
if err != nil {
return nil, err
}
// Check if we are finished
if dir == nil && obj == nil {
return nil, nil
}
// Ignore directories
if dir != nil {
continue
}
return obj, nil
}
}
// GetObjects will return a slice of object from the listing.
// It will skip over any directories.
func (o *Lister) GetObjects() (objs []Object, err error) {
for {
obj, dir, err := o.Get()
if err != nil {
return nil, err
}
// Check if we are finished
if dir == nil && obj == nil {
break
}
if obj != nil {
objs = append(objs, obj)
}
}
return objs, nil
}
// GetDir will return a directory from the listing.
// It will skip over any objects.
// Will return (nil, nil) when all objects have been returned.
func (o *Lister) GetDir() (*Dir, error) {
for {
obj, dir, err := o.Get()
if err != nil {
return nil, err
}
// Check if we are finished
if dir == nil && obj == nil {
return nil, nil
}
// Ignore objects
if obj != nil {
continue
}
return dir, nil
}
}
// GetDirs will return a slice of directories from the listing.
// It will skip over any objects.
func (o *Lister) GetDirs() (dirs []*Dir, err error) {
for {
obj, dir, err := o.Get()
if err != nil {
return nil, err
}
// Check if we are finished
if dir == nil && obj == nil {
break
}
if dir != nil {
dirs = append(dirs, dir)
}
}
return dirs, nil
}
// Objects is a slice of Object~s // Objects is a slice of Object~s
type Objects []Object type Objects []Object

View File

@ -38,22 +38,13 @@ func (f *Limited) String() string {
} }
// List the Fs into a channel // List the Fs into a channel
func (f *Limited) List() ObjectsChan { func (f *Limited) List(opts ListOpts) {
out := make(ObjectsChan, Config.Checkers) defer opts.Finished()
go func() {
for _, obj := range f.objects { for _, obj := range f.objects {
out <- obj if opts.Add(obj) {
return
}
} }
close(out)
}()
return out
}
// ListDir lists the Fs directories/buckets/containers into a channel
func (f *Limited) ListDir() DirChan {
out := make(DirChan, Config.Checkers)
close(out)
return out
} }
// NewFsObject finds the Object at remote. Returns nil if can't be found // NewFsObject finds the Object at remote. Returns nil if can't be found

View File

@ -456,10 +456,23 @@ func DeleteFiles(toBeDeleted ObjectsChan) {
// Read a map of Object.Remote to Object for the given Fs. // Read a map of Object.Remote to Object for the given Fs.
// If includeAll is specified all files will be added, // If includeAll is specified all files will be added,
// otherwise only files passing the filter will be added. // otherwise only files passing the filter will be added.
func readFilesMap(fs Fs, includeAll bool) map[string]Object { func readFilesMap(fs Fs, includeAll bool) (files map[string]Object, err error) {
files := make(map[string]Object) files = make(map[string]Object)
normalised := make(map[string]struct{}) normalised := make(map[string]struct{})
for o := range fs.List() { list := NewLister()
if !includeAll {
list.SetFilter(Config.Filter)
}
list.Start(fs)
for {
o, err := list.GetObject()
if err != nil {
return files, err
}
// Check if we are finished
if o == nil {
break
}
remote := o.Remote() remote := o.Remote()
normalisedRemote := strings.ToLower(norm.NFC.String(remote)) normalisedRemote := strings.ToLower(norm.NFC.String(remote))
if _, ok := files[remote]; !ok { if _, ok := files[remote]; !ok {
@ -477,7 +490,39 @@ func readFilesMap(fs Fs, includeAll bool) map[string]Object {
} }
normalised[normalisedRemote] = struct{}{} normalised[normalisedRemote] = struct{}{}
} }
return files return files, nil
}
// readFilesMaps runs readFilesMap on fdst and fsrc at the same time
func readFilesMaps(fdst Fs, fdstIncludeAll bool, fsrc Fs, fsrcIncludeAll bool) (dstFiles, srcFiles map[string]Object, err error) {
var wg sync.WaitGroup
var srcErr, dstErr error
list := func(fs Fs, includeAll bool, pMap *map[string]Object, pErr *error) {
defer wg.Done()
Log(fs, "Building file list")
dstFiles, listErr := readFilesMap(fs, includeAll)
if listErr != nil {
ErrorLog(fs, "Error building file list: %v", listErr)
*pErr = listErr
} else {
Debug(fs, "Done building file list")
*pMap = dstFiles
}
}
wg.Add(2)
go list(fdst, fdstIncludeAll, &dstFiles, &srcErr)
go list(fsrc, fsrcIncludeAll, &srcFiles, &dstErr)
wg.Wait()
if srcErr != nil {
err = srcErr
}
if dstErr != nil {
err = dstErr
}
return dstFiles, srcFiles, err
} }
// Same returns true if fdst and fsrc point to the same underlying Fs // Same returns true if fdst and fsrc point to the same underlying Fs
@ -501,31 +546,11 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
return err return err
} }
Log(fdst, "Building file list") // Read the files of both source and destination in parallel
dstFiles, srcFiles, err := readFilesMaps(fdst, Config.Filter.DeleteExcluded, fsrc, false)
// Read the files of both source and destination if err != nil {
var listWg sync.WaitGroup return err
listWg.Add(2)
var dstFiles map[string]Object
var srcFiles map[string]Object
var srcObjects = make(ObjectsChan, Config.Transfers)
// Read dst files including excluded files if DeleteExcluded is set
go func() {
dstFiles = readFilesMap(fdst, Config.Filter.DeleteExcluded)
listWg.Done()
}()
// Read src file not including excluded files
go func() {
srcFiles = readFilesMap(fsrc, false)
listWg.Done()
for _, v := range srcFiles {
srcObjects <- v
} }
close(srcObjects)
}()
startDeletion := make(chan struct{}, 0) startDeletion := make(chan struct{}, 0)
@ -564,9 +589,6 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
DeleteFiles(toDelete) DeleteFiles(toDelete)
}() }()
// Wait for all files to be read
listWg.Wait()
// Start deleting, unless we must delete after transfer // Start deleting, unless we must delete after transfer
if Delete && !Config.DeleteAfter { if Delete && !Config.DeleteAfter {
close(startDeletion) close(startDeletion)
@ -598,9 +620,7 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
} }
} }
go func() { for remote, src := range srcFiles {
for src := range srcObjects {
remote := src.Remote()
if dst, dstFound := dstFiles[remote]; dstFound { if dst, dstFound := dstFiles[remote]; dstFound {
toBeChecked <- ObjectPair{src, dst} toBeChecked <- ObjectPair{src, dst}
} else { } else {
@ -609,7 +629,6 @@ func syncCopyMove(fdst, fsrc Fs, Delete bool, DoMove bool) error {
} }
} }
close(toBeChecked) close(toBeChecked)
}()
Log(fdst, "Waiting for checks to finish") Log(fdst, "Waiting for checks to finish")
checkerWg.Wait() checkerWg.Wait()
@ -713,30 +732,11 @@ func checkIdentical(dst, src Object) bool {
// Check the files in fsrc and fdst according to Size and hash // Check the files in fsrc and fdst according to Size and hash
func Check(fdst, fsrc Fs) error { func Check(fdst, fsrc Fs) error {
dstFiles, srcFiles, err := readFilesMaps(fdst, false, fsrc, false)
if err != nil {
return err
}
differences := int32(0) differences := int32(0)
var (
wg sync.WaitGroup
dstFiles, srcFiles map[string]Object
)
wg.Add(2)
go func() {
defer wg.Done()
// Read the destination files
Log(fdst, "Building file list")
dstFiles = readFilesMap(fdst, false)
Debug(fdst, "Done building file list")
}()
go func() {
defer wg.Done()
// Read the source files
Log(fsrc, "Building file list")
srcFiles = readFilesMap(fsrc, false)
Debug(fdst, "Done building file list")
}()
wg.Wait()
// FIXME could do this as it goes along and make it use less // FIXME could do this as it goes along and make it use less
// memory. // memory.
@ -800,13 +800,21 @@ func Check(fdst, fsrc Fs) error {
// //
// Lists in parallel which may get them out of order // Lists in parallel which may get them out of order
func ListFn(f Fs, fn func(Object)) error { func ListFn(f Fs, fn func(Object)) error {
in := f.List() list := NewLister().SetFilter(Config.Filter).Start(f)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(Config.Checkers) wg.Add(Config.Checkers)
for i := 0; i < Config.Checkers; i++ { for i := 0; i < Config.Checkers; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
for o := range in { for {
o, err := list.GetObject()
if err != nil {
log.Fatal(err)
}
// check if we are finished
if o == nil {
return
}
if Config.Filter.IncludeObject(o) { if Config.Filter.IncludeObject(o) {
fn(o) fn(o)
} }
@ -901,7 +909,15 @@ func Count(f Fs) (objects int64, size int64, err error) {
// ListDir lists the directories/buckets/containers in the Fs to the supplied writer // ListDir lists the directories/buckets/containers in the Fs to the supplied writer
func ListDir(f Fs, w io.Writer) error { func ListDir(f Fs, w io.Writer) error {
for dir := range f.ListDir() { list := NewLister().SetLevel(1).Start(f)
for {
dir, err := list.GetDir()
if err != nil {
log.Fatal(err)
}
if dir == nil {
break
}
syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name) syncFprintf(w, "%12d %13s %9d %s\n", dir.Bytes, dir.When.Format("2006-01-02 15:04:05"), dir.Count, dir.Name)
} }
return nil return nil
@ -960,7 +976,8 @@ func Purge(f Fs) error {
} }
if doFallbackPurge { if doFallbackPurge {
// DeleteFiles and Rmdir observe --dry-run // DeleteFiles and Rmdir observe --dry-run
DeleteFiles(f.List()) list := NewLister().Start(f)
DeleteFiles(listToChan(list))
err = Rmdir(f) err = Rmdir(f)
} }
if err != nil { if err != nil {
@ -1115,7 +1132,16 @@ func (mode DeduplicateMode) String() string {
func Deduplicate(f Fs, mode DeduplicateMode) error { func Deduplicate(f Fs, mode DeduplicateMode) error {
Log(f, "Looking for duplicates using %v mode.", mode) Log(f, "Looking for duplicates using %v mode.", mode)
files := map[string][]Object{} files := map[string][]Object{}
for o := range f.List() { list := NewLister().Start(f)
for {
o, err := list.GetObject()
if err != nil {
return err
}
// Check if we are finished
if o == nil {
break
}
remote := o.Remote() remote := o.Remote()
files[remote] = append(files[remote], o) files[remote] = append(files[remote], o)
} }
@ -1149,3 +1175,34 @@ func Deduplicate(f Fs, mode DeduplicateMode) error {
} }
return nil return nil
} }
// listToChan will transfer all incoming objects to a new channel.
//
// If an error occurs, the error will be logged, and it will close the
// channel.
//
// If the error was ErrorDirNotFound then it will be ignored
func listToChan(list *Lister) ObjectsChan {
o := make(ObjectsChan, Config.Checkers)
go func() {
defer close(o)
for {
obj, dir, err := list.Get()
if err != nil {
if err != ErrorDirNotFound {
Stats.Error()
ErrorLog(nil, "Failed to list: %v", err)
}
return
}
if dir == nil && obj == nil {
return
}
if o == nil {
continue
}
o <- obj
}
}()
return o
}

View File

@ -140,11 +140,20 @@ func NewRun(t *testing.T) *Run {
r = new(Run) r = new(Run)
*r = *oneRun *r = *oneRun
r.cleanRemote = func() { r.cleanRemote = func() {
oldErrors := fs.Stats.GetErrors() list := fs.NewLister().Start(r.fremote)
fs.DeleteFiles(r.fremote.List()) for {
errors := fs.Stats.GetErrors() - oldErrors o, err := list.GetObject()
if errors != 0 { if err != nil {
t.Fatalf("%d errors while cleaning remote %v", errors, r.fremote) t.Fatalf("Error listing: %v", err)
}
// Check if we are finished
if o == nil {
break
}
err = o.Remove()
if err != nil {
t.Errorf("Error removing file: %v", err)
}
} }
// Check remote is empty // Check remote is empty
fstest.CheckItems(t, r.fremote) fstest.CheckItems(t, r.fremote)
@ -320,7 +329,12 @@ func TestCopyAfterDelete(t *testing.T) {
fstest.CheckItems(t, r.flocal) fstest.CheckItems(t, r.flocal)
fstest.CheckItems(t, r.fremote, file1) fstest.CheckItems(t, r.fremote, file1)
err := fs.CopyDir(r.fremote, r.flocal) err := fs.Mkdir(r.flocal)
if err != nil {
t.Fatalf("Mkdir failed: %v", err)
}
err = fs.CopyDir(r.fremote, r.flocal)
if err != nil { if err != nil {
t.Fatalf("Copy failed: %v", err) t.Fatalf("Copy failed: %v", err)
} }
@ -1167,7 +1181,16 @@ func TestDeduplicateRename(t *testing.T) {
t.Fatalf("fs.Deduplicate returned error: %v", err) t.Fatalf("fs.Deduplicate returned error: %v", err)
} }
for o := range r.fremote.List() { list := fs.NewLister().Start(r.fremote)
for {
o, err := list.GetObject()
if err != nil {
t.Fatalf("Listing failed: %v", err)
}
// Check if we are finished
if o == nil {
break
}
remote := o.Remote() remote := o.Remote()
if remote != "one-1.txt" && if remote != "one-1.txt" &&
remote != "one-2.txt" && remote != "one-2.txt" &&

View File

@ -154,12 +154,13 @@ func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, precision ti
is := NewItems(items) is := NewItems(items)
oldErrors := fs.Stats.GetErrors() oldErrors := fs.Stats.GetErrors()
var objs []fs.Object var objs []fs.Object
var err error
const retries = 6 const retries = 6
sleep := time.Second / 2 sleep := time.Second / 2
for i := 1; i <= retries; i++ { for i := 1; i <= retries; i++ {
objs = nil objs, err = fs.NewLister().Start(f).GetObjects()
for obj := range f.List() { if err != nil && err != fs.ErrorDirNotFound {
objs = append(objs, obj) t.Fatalf("Error listing: %v", err)
} }
if len(objs) == len(items) { if len(objs) == len(items) {
// Put an extra sleep in if we did any retries just to make sure it really // Put an extra sleep in if we did any retries just to make sure it really

View File

@ -129,8 +129,12 @@ func TestFsListEmpty(t *testing.T) {
// TestFsListDirEmpty tests listing the directories from an empty directory // TestFsListDirEmpty tests listing the directories from an empty directory
func TestFsListDirEmpty(t *testing.T) { func TestFsListDirEmpty(t *testing.T) {
skipIfNotOk(t) skipIfNotOk(t)
for obj := range remote.ListDir() { dirs, err := fs.NewLister().SetLevel(1).Start(remote).GetDirs()
t.Errorf("Found unexpected item %q", obj.Name) if err != nil {
t.Fatal(err)
}
for _, dir := range dirs {
t.Errorf("Found unexpected item %q", dir.Name)
} }
} }
@ -193,9 +197,13 @@ func TestFsListDirFile2(t *testing.T) {
skipIfNotOk(t) skipIfNotOk(t)
found := false found := false
for i := 1; i <= eventualConsistencyRetries; i++ { for i := 1; i <= eventualConsistencyRetries; i++ {
for obj := range remote.ListDir() { dirs, err := fs.NewLister().SetLevel(1).Start(remote).GetDirs()
if obj.Name != `hello? sausage` && obj.Name != `hello_ sausage` { if err != nil {
t.Errorf("Found unexpected item %q", obj.Name) t.Fatal(err)
}
for _, dir := range dirs {
if dir.Name != `hello? sausage` && dir.Name != `hello_ sausage` {
t.Errorf("Found unexpected item %q", dir.Name)
} else { } else {
found = true found = true
} }
@ -219,8 +227,12 @@ func TestFsListDirRoot(t *testing.T) {
t.Fatalf("Failed to make remote %q: %v", RemoteName, err) t.Fatalf("Failed to make remote %q: %v", RemoteName, err)
} }
found := false found := false
for obj := range rootRemote.ListDir() { dirs, err := fs.NewLister().SetLevel(1).Start(rootRemote).GetDirs()
if obj.Name == subRemoteLeaf { if err != nil {
t.Fatal(err)
}
for _, dir := range dirs {
if dir.Name == subRemoteLeaf {
found = true found = true
} }
} }
@ -243,8 +255,11 @@ func TestFsListRoot(t *testing.T) {
f2 := subRemoteLeaf + "/" + file2.Path f2 := subRemoteLeaf + "/" + file2.Path
f2Alt := subRemoteLeaf + "/" + file2.WinPath f2Alt := subRemoteLeaf + "/" + file2.WinPath
count := 0 count := 0
errors := fs.Stats.GetErrors() objs, err := fs.NewLister().Start(rootRemote).GetObjects()
for obj := range rootRemote.List() { if err != nil {
t.Fatal(err)
}
for _, obj := range objs {
count++ count++
if obj.Remote() == f1 { if obj.Remote() == f1 {
found1 = true found1 = true
@ -253,17 +268,12 @@ func TestFsListRoot(t *testing.T) {
found2 = true found2 = true
} }
} }
errors -= fs.Stats.GetErrors()
if count == 0 { if count == 0 {
if errors == 0 { // Nothing found is OK
t.Error("Expecting error if count==0")
}
return return
} }
if found1 && found2 { if found1 && found2 {
if errors != 0 { // Both found is OK
t.Error("Not expecting error if found")
}
return return
} }
t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count) t.Errorf("Didn't find %q (%v) and %q (%v) or no files (count %d)", f1, found1, f2, found2, count)

View File

@ -15,6 +15,7 @@ FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 erro
import ( import (
"encoding/base64" "encoding/base64"
"encoding/hex" "encoding/hex"
"errors"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -294,38 +295,48 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil) return f.newFsObjectWithInfo(remote, nil)
} }
// listFn is called from list to handle an object.
type listFn func(remote string, object *storage.Object, isDirectory bool) error
// list the objects into the function supplied // list the objects into the function supplied
// //
// If directories is set it only sends directories // If directories is set it only sends directories
func (f *Fs) list(directories bool, fn func(string, *storage.Object)) { func (f *Fs) list(level int, fn listFn) error {
list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks) list := f.svc.Objects.List(f.bucket).Prefix(f.root).MaxResults(listChunks)
if directories { switch level {
case 1:
list = list.Delimiter("/") list = list.Delimiter("/")
case fs.MaxLevel:
default:
return fs.ErrorLevelNotSupported
} }
rootLength := len(f.root) rootLength := len(f.root)
for { for {
objects, err := list.Do() objects, err := list.Do()
if err != nil { if err != nil {
fs.Stats.Error() return err
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err) }
return if level == 1 {
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
err = fn(prefix[:len(prefix)-1], &object, true)
if err != nil {
return err
}
}
} }
if !directories {
for _, object := range objects.Items { for _, object := range objects.Items {
if !strings.HasPrefix(object.Name, f.root) { if !strings.HasPrefix(object.Name, f.root) {
fs.Log(f, "Odd name received %q", object.Name) fs.Log(f, "Odd name received %q", object.Name)
continue continue
} }
remote := object.Name[rootLength:] remote := object.Name[rootLength:]
fn(remote, object) err = fn(remote, object, false)
} if err != nil {
} else { return err
var object storage.Object
for _, prefix := range objects.Prefixes {
if !strings.HasSuffix(prefix, "/") {
continue
}
fn(prefix[:len(prefix)-1], &object)
} }
} }
if objects.NextPageToken == "" { if objects.NextPageToken == "" {
@ -333,56 +344,68 @@ func (f *Fs) list(directories bool, fn func(string, *storage.Object)) {
} }
list.PageToken(objects.NextPageToken) list.PageToken(objects.NextPageToken)
} }
return nil
} }
// List walks the path returning a channel of FsObjects // listFiles lists files and directories to out
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) listFiles(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
if f.bucket == "" { if f.bucket == "" {
// Return no objects at top level list out.SetError(fmt.Errorf("Can't list objects at root - choose a bucket using lsd"))
close(out) return
fs.Stats.Error() }
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd")
} else {
// List the objects // List the objects
go func() { err := f.list(out.Level(), func(remote string, object *storage.Object, isDirectory bool) error {
defer close(out) if isDirectory {
f.list(false, func(remote string, object *storage.Object) { dir := &fs.Dir{
if fs := f.newFsObjectWithInfo(remote, object); fs != nil { Name: remote,
out <- fs Bytes: int64(object.Size),
Count: 0,
} }
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
if out.Add(o) {
return fs.ErrorListAborted
}
}
}
return nil
}) })
}() if err != nil {
if gErr, ok := err.(*googleapi.Error); ok {
if gErr.Code == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
out.SetError(err)
} }
return out
} }
// ListDir lists the buckets // listBuckets lists the buckets to out
func (f *Fs) ListDir() fs.DirChan { func (f *Fs) listBuckets(out fs.ListOpts) {
out := make(fs.DirChan, fs.Config.Checkers) defer out.Finished()
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
if f.projectNumber == "" { if f.projectNumber == "" {
fs.Stats.Error() out.SetError(errors.New("Can't list buckets without project number"))
fs.ErrorLog(f, "Can't list buckets without project number")
return return
} }
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks) listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
for { for {
buckets, err := listBuckets.Do() buckets, err := listBuckets.Do()
if err != nil { if err != nil {
fs.Stats.Error() out.SetError(err)
fs.ErrorLog(f, "Couldn't list buckets: %v", err) return
break }
} else {
for _, bucket := range buckets.Items { for _, bucket := range buckets.Items {
out <- &fs.Dir{ dir := &fs.Dir{
Name: bucket.Name, Name: bucket.Name,
Bytes: 0, Bytes: 0,
Count: 0, Count: 0,
} }
if out.AddDir(dir) {
return
} }
} }
if buckets.NextPageToken == "" { if buckets.NextPageToken == "" {
@ -390,21 +413,16 @@ func (f *Fs) ListDir() fs.DirChan {
} }
listBuckets.PageToken(buckets.NextPageToken) listBuckets.PageToken(buckets.NextPageToken)
} }
}() }
// List lists the path to out
func (f *Fs) List(out fs.ListOpts) {
if f.bucket == "" {
f.listBuckets(out)
} else { } else {
// List the directories in the path in the bucket f.listFiles(out)
go func() {
defer close(out)
f.list(true, func(remote string, object *storage.Object) {
out <- &fs.Dir{
Name: remote,
Bytes: int64(object.Size),
Count: 0,
} }
}) return
}()
}
return out
} }
// Put the object into the bucket // Put the object into the bucket

View File

@ -6,6 +6,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime" "runtime"
@ -137,42 +138,123 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil) return f.newFsObjectWithInfo(remote, nil)
} }
// List the path returning a channel of FsObjects // listArgs is the arguments that a new list takes
type listArgs struct {
remote string
dirpath string
level int
}
// list traverses the directory passed in, listing to out.
// it returns a boolean whether it is finished or not.
func (f *Fs) list(out fs.ListOpts, remote string, dirpath string, level int) (subdirs []listArgs) {
fd, err := os.Open(dirpath)
if err != nil {
out.SetError(err)
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
return nil
}
defer func() {
err := fd.Close()
if err != nil {
out.SetError(err)
fs.Stats.Error()
fs.ErrorLog(f, "Failed to close directory: %s: %s", dirpath, err)
}
}()
for {
fis, err := fd.Readdir(1024)
if err == io.EOF && len(fis) == 0 {
break
}
if err != nil {
out.SetError(err)
fs.Stats.Error()
fs.ErrorLog(f, "Failed to read directory: %s: %s", dirpath, err)
return nil
}
for _, fi := range fis {
name := fi.Name()
newRemote := path.Join(remote, name)
newPath := filepath.Join(dirpath, name)
if fi.IsDir() {
if out.IncludeDirectory(newRemote) {
dir := &fs.Dir{
Name: f.cleanUtf8(newRemote),
When: fi.ModTime(),
Bytes: 0,
Count: 0,
}
if out.AddDir(dir) {
return nil
}
if level > 0 {
subdirs = append(subdirs, listArgs{remote: newRemote, dirpath: newPath, level: level - 1})
}
}
} else {
if fso := f.newFsObjectWithInfo(newRemote, fi); fso != nil {
if fso.Storable() && out.Add(fso) {
return nil
}
}
}
}
}
return subdirs
}
// List the path into out
// //
// Ignores everything which isn't Storable, eg links etc // Ignores everything which isn't Storable, eg links etc
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) List(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
_, err := os.Stat(f.root)
if err != nil {
out.SetError(fs.ErrorDirNotFound)
fs.Stats.Error()
fs.ErrorLog(f, "Directory not found: %s: %s", f.root, err)
return
}
in := make(chan listArgs, out.Buffer())
var wg sync.WaitGroup // sync closing of go routines
var traversing sync.WaitGroup // running directory traversals
// Start the process
traversing.Add(1)
in <- listArgs{remote: "", dirpath: f.root, level: out.Level() - 1}
for i := 0; i < fs.Config.Checkers; i++ {
wg.Add(1)
go func() { go func() {
err := filepath.Walk(f.root, func(path string, fi os.FileInfo, err error) error { defer wg.Done()
if err != nil { for job := range in {
fs.Stats.Error() if out.IsFinished() {
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err) continue
} else {
remote, err := filepath.Rel(f.root, path)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to get relative path %s: %s", path, err)
return nil
} }
if remote == "." { newJobs := f.list(out, job.remote, job.dirpath, job.level)
return nil // Now we have traversed this directory, send
// remote = "" // these ones off for traversal
if len(newJobs) != 0 {
traversing.Add(len(newJobs))
go func() {
for _, newJob := range newJobs {
in <- newJob
} }
if fs := f.newFsObjectWithInfo(remote, fi); fs != nil {
if fs.Storable() {
out <- fs
}
}
}
return nil
})
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", f.root, err)
}
close(out)
}() }()
return out }
traversing.Done()
}
}()
}
// Wait for traversal to finish
traversing.Wait()
close(in)
wg.Wait()
} }
// CleanUtf8 makes string a valid UTF-8 string // CleanUtf8 makes string a valid UTF-8 string
@ -194,16 +276,17 @@ func (f *Fs) cleanUtf8(name string) string {
return name return name
} }
/*
// ListDir walks the path returning a channel of FsObjects // ListDir walks the path returning a channel of FsObjects
func (f *Fs) ListDir() fs.DirChan { func (f *Fs) ListDir(out fs.ListDirOpts) {
out := make(fs.DirChan, fs.Config.Checkers) defer out.Finished()
go func() {
defer close(out)
items, err := ioutil.ReadDir(f.root) items, err := ioutil.ReadDir(f.root)
if err != nil { if err != nil {
fs.Stats.Error() fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find read directory: %s", err) fs.ErrorLog(f, "Couldn't find read directory: %s", err)
} else { out.SetError(err)
return
}
for _, item := range items { for _, item := range items {
if item.IsDir() { if item.IsDir() {
dir := &fs.Dir{ dir := &fs.Dir{
@ -218,6 +301,7 @@ func (f *Fs) ListDir() fs.DirChan {
if err != nil { if err != nil {
fs.Stats.Error() fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err) fs.ErrorLog(f, "Failed to open directory: %s: %s", path, err)
out.SetError(err)
} else { } else {
dir.Count++ dir.Count++
dir.Bytes += fi.Size() dir.Bytes += fi.Size()
@ -225,17 +309,17 @@ func (f *Fs) ListDir() fs.DirChan {
return nil return nil
}) })
if err != nil { if err != nil {
out.SetError(err)
fs.Stats.Error() fs.Stats.Error()
fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err) fs.ErrorLog(f, "Failed to open directory: %s: %s", dirpath, err)
} }
out <- dir if out.Add(dir) {
return
} }
} }
} }
// err := f.findRoot(false)
}()
return out
} }
*/
// Put the FsObject to the local filesystem // Put the FsObject to the local filesystem
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) { func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {

View File

@ -10,7 +10,6 @@ import (
"net/http" "net/http"
"regexp" "regexp"
"strings" "strings"
"sync"
"time" "time"
"github.com/ncw/rclone/dircache" "github.com/ncw/rclone/dircache"
@ -369,98 +368,45 @@ OUTER:
return return
} }
// Path should be directory path either "" or "path/" // ListDir reads the directory specified by the job into out, returning any more jobs
// func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
// List the directory using a recursive list from the root fs.Debug(f, "Reading %q", job.Path)
// _, err = f.listAll(job.DirID, false, false, func(info *api.Item) bool {
// This fetches the minimum amount of stuff but does more API calls remote := job.Path + info.Name
// which makes it slow
func (f *Fs) listDirRecursive(dirID string, path string, out fs.ObjectsChan) error {
var subError error
// Make the API request
var wg sync.WaitGroup
_, err := f.listAll(dirID, false, false, func(info *api.Item) bool {
// Recurse on directories
if info.Folder != nil { if info.Folder != nil {
wg.Add(1) if out.IncludeDirectory(remote) {
folder := path + info.Name + "/"
fs.Debug(f, "Reading %s", folder)
go func() {
defer wg.Done()
err := f.listDirRecursive(info.ID, folder, out)
if err != nil {
subError = err
fs.ErrorLog(f, "Error reading %s:%s", folder, err)
}
}()
} else {
if fs := f.newObjectWithInfo(path+info.Name, info); fs != nil {
out <- fs
}
}
return false
})
wg.Wait()
fs.Debug(f, "Finished reading %s", path)
if err != nil {
return err
}
if subError != nil {
return subError
}
return nil
}
// List walks the path returning a channel of Objects
func (f *Fs) List() fs.ObjectsChan {
out := make(fs.ObjectsChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
err = f.listDirRecursive(f.dirCache.RootID(), "", out)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "List failed: %s", err)
}
}
}()
return out
}
// ListDir lists the directories
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
err := f.dirCache.FindRoot(false)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't find root: %s", err)
} else {
_, err := f.listAll(f.dirCache.RootID(), true, false, func(item *api.Item) bool {
dir := &fs.Dir{ dir := &fs.Dir{
Name: item.Name, Name: remote,
Bytes: -1, Bytes: -1,
Count: -1, Count: -1,
When: time.Time(item.LastModifiedDateTime), When: time.Time(info.LastModifiedDateTime),
}
if info.Folder != nil {
dir.Count = info.Folder.ChildCount
}
if out.AddDir(dir) {
return true
}
if job.Depth > 0 {
jobs = append(jobs, dircache.ListDirJob{DirID: info.ID, Path: remote + "/", Depth: job.Depth - 1})
}
}
} else {
if o := f.newObjectWithInfo(remote, info); o != nil {
if out.Add(o) {
return true
}
} }
if item.Folder != nil {
dir.Count = item.Folder.ChildCount
} }
out <- dir
return false return false
}) })
if err != nil { fs.Debug(f, "Finished reading %q", job.Path)
fs.Stats.Error() return jobs, err
fs.ErrorLog(f, "ListDir failed: %s", err) }
}
} // List walks the path returning files and directories into out
}() func (f *Fs) List(out fs.ListOpts) {
return out f.dirCache.List(f, out)
} }
// Creates from the parameters passed in a half finished Object which // Creates from the parameters passed in a half finished Object which

126
s3/s3.go
View File

@ -364,14 +364,21 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil) return f.newFsObjectWithInfo(remote, nil)
} }
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list the objects into the function supplied // list the objects into the function supplied
// //
// If directories is set it only sends directories // Level is the level of the recursion
func (f *Fs) list(directories bool, fn func(string, *s3.Object)) { func (f *Fs) list(level int, fn listFn) error {
maxKeys := int64(listChunkSize) maxKeys := int64(listChunkSize)
delimiter := "" delimiter := ""
if directories { switch level {
case 1:
delimiter = "/" delimiter = "/"
case fs.MaxLevel:
default:
return fs.ErrorLevelNotSupported
} }
var marker *string var marker *string
for { for {
@ -385,12 +392,10 @@ func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
} }
resp, err := f.c.ListObjects(&req) resp, err := f.c.ListObjects(&req)
if err != nil { if err != nil {
fs.Stats.Error() return err
fs.ErrorLog(f, "Couldn't read bucket %q: %s", f.bucket, err) }
break
} else {
rootLength := len(f.root) rootLength := len(f.root)
if directories { if level == 1 {
for _, commonPrefix := range resp.CommonPrefixes { for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil { if commonPrefix.Prefix == nil {
fs.Log(f, "Nil common prefix received") fs.Log(f, "Nil common prefix received")
@ -405,9 +410,12 @@ func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
if strings.HasSuffix(remote, "/") { if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1] remote = remote[:len(remote)-1]
} }
fn(remote, &s3.Object{Key: &remote}) err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
} }
} else {
for _, object := range resp.Contents { for _, object := range resp.Contents {
key := aws.StringValue(object.Key) key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) { if !strings.HasPrefix(key, f.root) {
@ -415,7 +423,9 @@ func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
continue continue
} }
remote := key[rootLength:] remote := key[rootLength:]
fn(remote, object) err = fn(remote, object, false)
if err != nil {
return err
} }
} }
if !aws.BoolValue(resp.IsTruncated) { if !aws.BoolValue(resp.IsTruncated) {
@ -428,71 +438,81 @@ func (f *Fs) list(directories bool, fn func(string, *s3.Object)) {
marker = resp.NextMarker marker = resp.NextMarker
} }
} }
} return nil
} }
// List walks the path returning a channel of FsObjects // listFiles lists files and directories to out
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) listFiles(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
if f.bucket == "" { if f.bucket == "" {
// Return no objects at top level list // Return no objects at top level list
close(out) out.SetError(errors.New("Can't list objects at root - choose a bucket using lsd"))
fs.Stats.Error() return
fs.ErrorLog(f, "Can't list objects at root - choose a bucket using lsd") }
// List the objects and directories
err := f.list(out.Level(), func(remote string, object *s3.Object, isDirectory bool) error {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
dir := &fs.Dir{
Name: remote,
Bytes: size,
Count: 0,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else { } else {
go func() { if o := f.newFsObjectWithInfo(remote, object); o != nil {
defer close(out) if out.Add(o) {
f.list(false, func(remote string, object *s3.Object) { return fs.ErrorListAborted
if fs := f.newFsObjectWithInfo(remote, object); fs != nil {
out <- fs
} }
}
}
return nil
}) })
}() if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
out.SetError(err)
} }
return out
} }
// ListDir lists the buckets // listBuckets lists the buckets to out
func (f *Fs) ListDir() fs.DirChan { func (f *Fs) listBuckets(out fs.ListOpts) {
out := make(fs.DirChan, fs.Config.Checkers) defer out.Finished()
if f.bucket == "" {
// List the buckets
go func() {
defer close(out)
req := s3.ListBucketsInput{} req := s3.ListBucketsInput{}
resp, err := f.c.ListBuckets(&req) resp, err := f.c.ListBuckets(&req)
if err != nil { if err != nil {
fs.Stats.Error() out.SetError(err)
fs.ErrorLog(f, "Couldn't list buckets: %s", err) return
} else { }
for _, bucket := range resp.Buckets { for _, bucket := range resp.Buckets {
out <- &fs.Dir{ dir := &fs.Dir{
Name: aws.StringValue(bucket.Name), Name: aws.StringValue(bucket.Name),
When: aws.TimeValue(bucket.CreationDate), When: aws.TimeValue(bucket.CreationDate),
Bytes: -1, Bytes: -1,
Count: -1, Count: -1,
} }
if out.AddDir(dir) {
break
} }
} }
}() }
// List lists files and directories to out
func (f *Fs) List(out fs.ListOpts) {
if f.bucket == "" {
f.listBuckets(out)
} else { } else {
// List the directories in the path in the bucket f.listFiles(out)
go func() {
defer close(out)
f.list(true, func(remote string, object *s3.Object) {
size := int64(0)
if object.Size != nil {
size = *object.Size
} }
out <- &fs.Dir{ return
Name: remote,
Bytes: size,
Count: 0,
}
})
}()
}
return out
} }
// Put the FsObject into the bucket // Put the FsObject into the bucket

View File

@ -253,21 +253,25 @@ func (f *Fs) NewFsObject(remote string) fs.Object {
return f.newFsObjectWithInfo(remote, nil) return f.newFsObjectWithInfo(remote, nil)
} }
// listFn is called from list and listContainerRoot to handle an object // listFn is called from list and listContainerRoot to handle an object.
type listFn func(string, *swift.Object) error type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from // listContainerRoot lists the objects into the function supplied from
// the container and root supplied // the container and root supplied
// //
// If directories is set it only sends directories // Level is the level of the recursion
func (f *Fs) listContainerRoot(container, root string, directories bool, fn listFn) error { func (f *Fs) listContainerRoot(container, root string, level int, fn listFn) error {
// Options for ObjectsWalk // Options for ObjectsWalk
opts := swift.ObjectsOpts{ opts := swift.ObjectsOpts{
Prefix: root, Prefix: root,
Limit: 256, Limit: 256,
} }
if directories { switch level {
case 1:
opts.Delimiter = '/' opts.Delimiter = '/'
case fs.MaxLevel:
default:
return fs.ErrorLevelNotSupported
} }
rootLength := len(root) rootLength := len(root)
return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) { return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
@ -275,19 +279,19 @@ func (f *Fs) listContainerRoot(container, root string, directories bool, fn list
if err == nil { if err == nil {
for i := range objects { for i := range objects {
object := &objects[i] object := &objects[i]
// FIXME if there are no directories, swift gives back the files for some reason! isDirectory := false
if directories { if level == 1 {
if !strings.HasSuffix(object.Name, "/") { if strings.HasSuffix(object.Name, "/") {
continue isDirectory = true
}
object.Name = object.Name[:len(object.Name)-1] object.Name = object.Name[:len(object.Name)-1]
} }
}
if !strings.HasPrefix(object.Name, root) { if !strings.HasPrefix(object.Name, root) {
fs.Log(f, "Odd name received %q", object.Name) fs.Log(f, "Odd name received %q", object.Name)
continue continue
} }
remote := object.Name[rootLength:] remote := object.Name[rootLength:]
err = fn(remote, object) err = fn(remote, object, isDirectory)
if err != nil { if err != nil {
break break
} }
@ -298,86 +302,79 @@ func (f *Fs) listContainerRoot(container, root string, directories bool, fn list
} }
// list the objects into the function supplied // list the objects into the function supplied
// func (f *Fs) list(level int, fn listFn) error {
// If directories is set it only sends directories return f.listContainerRoot(f.container, f.root, level, fn)
func (f *Fs) list(directories bool, fn listFn) {
err := f.listContainerRoot(f.container, f.root, directories, fn)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't read container %q: %s", f.container, err)
}
} }
// listFiles walks the path returning a channel of FsObjects // listFiles walks the path returning a channel of FsObjects
// //
// if ignoreStorable is set then it outputs the file even if Storable() is false // if ignoreStorable is set then it outputs the file even if Storable() is false
func (f *Fs) listFiles(ignoreStorable bool) fs.ObjectsChan { func (f *Fs) listFiles(out fs.ListOpts, ignoreStorable bool) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
if f.container == "" { if f.container == "" {
// Return no objects at top level list out.SetError(errors.New("Can't list objects at root - choose a container using lsd"))
close(out) return
fs.Stats.Error() }
fs.ErrorLog(f, "Can't list objects at root - choose a container using lsd")
} else {
// List the objects // List the objects
go func() { err := f.list(out.Level(), func(remote string, object *swift.Object, isDirectory bool) error {
defer close(out) if isDirectory {
f.list(false, func(remote string, object *swift.Object) error { dir := &fs.Dir{
if o := f.newFsObjectWithInfo(remote, object); o != nil {
// Storable does a full metadata read on 0 size objects which might be dynamic large objects
storable := o.Storable()
if storable || ignoreStorable {
out <- o
}
}
return nil
})
}()
}
return out
}
// List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan {
return f.listFiles(false)
}
// ListDir lists the containers
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
if f.container == "" {
// List the containers
go func() {
defer close(out)
containers, err := f.c.ContainersAll(nil)
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list containers: %v", err)
} else {
for _, container := range containers {
out <- &fs.Dir{
Name: container.Name,
Bytes: container.Bytes,
Count: container.Count,
}
}
}
}()
} else {
// List the directories in the path in the container
go func() {
defer close(out)
f.list(true, func(remote string, object *swift.Object) error {
out <- &fs.Dir{
Name: remote, Name: remote,
Bytes: object.Bytes, Bytes: object.Bytes,
Count: 0, Count: 0,
} }
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
// Storable does a full metadata read on 0 size objects which might be dynamic large objects
storable := o.Storable()
if storable || ignoreStorable {
if out.Add(o) {
return fs.ErrorListAborted
}
}
}
}
return nil return nil
}) })
}() if err != nil {
if err == swift.ContainerNotFound {
err = fs.ErrorDirNotFound
} }
return out out.SetError(err)
}
}
// listContainers lists the containers
func (f *Fs) listContainers(out fs.ListOpts) {
defer out.Finished()
containers, err := f.c.ContainersAll(nil)
if err != nil {
out.SetError(err)
return
}
for _, container := range containers {
dir := &fs.Dir{
Name: container.Name,
Bytes: container.Bytes,
Count: container.Count,
}
if out.AddDir(dir) {
break
}
}
}
// List walks the path returning files and directories to out
func (f *Fs) List(out fs.ListOpts) {
if f.container == "" {
f.listContainers(out)
} else {
f.listFiles(out, false)
}
return
} }
// Put the object into the container // Put the object into the container
@ -427,7 +424,24 @@ func (f *Fs) Precision() time.Duration {
// //
// Implemented here so we can make sure we delete directory markers // Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge() error { func (f *Fs) Purge() error {
fs.DeleteFiles(f.listFiles(true)) // Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
var err error
go func() {
err = f.list(fs.MaxLevel, func(remote string, object *swift.Object, isDirectory bool) error {
if !isDirectory {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
toBeDeleted <- o
}
}
return nil
})
close(toBeDeleted)
}()
fs.DeleteFiles(toBeDeleted)
if err != nil {
return err
}
return f.Rmdir() return f.Rmdir()
} }
@ -611,7 +625,10 @@ func min(x, y int64) int64 {
// if except is passed in then segments with that prefix won't be deleted // if except is passed in then segments with that prefix won't be deleted
func (o *Object) removeSegments(except string) error { func (o *Object) removeSegments(except string) error {
segmentsRoot := o.fs.root + o.remote + "/" segmentsRoot := o.fs.root + o.remote + "/"
err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, false, func(remote string, object *swift.Object) error { err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, fs.MaxLevel, func(remote string, object *swift.Object, isDirectory bool) error {
if isDirectory {
return nil
}
if except != "" && strings.HasPrefix(remote, except) { if except != "" && strings.HasPrefix(remote, except) {
// fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer) // fs.Debug(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
return nil return nil

View File

@ -163,10 +163,44 @@ func (f *Fs) setRoot(root string) {
f.diskRoot = diskRoot f.diskRoot = diskRoot
} }
// listFn is called from list and listContainerRoot to handle an object.
type listFn func(remote string, item *yandex.ResourceInfoResponse, isDirectory bool) error
// listDir lists this directory only returning objects and directories
func (f *Fs) listDir(fn listFn) (err error) {
//request object meta info
var opt yandex.ResourceInfoRequestOptions
ResourceInfoResponse, err := f.yd.NewResourceInfoRequest(f.diskRoot, opt).Exec()
if err != nil {
return err
}
if ResourceInfoResponse.ResourceType == "dir" {
//list all subdirs
for _, element := range ResourceInfoResponse.Embedded.Items {
remote := element.Name
switch element.ResourceType {
case "dir":
err = fn(remote, &element, true)
if err != nil {
return err
}
case "file":
err = fn(remote, &element, false)
if err != nil {
return err
}
default:
fs.Debug(f, "Unknown resource type %q", element.ResourceType)
}
}
}
return nil
}
// list the objects into the function supplied // list the objects into the function supplied
// //
// If directories is set it only sends directories // This does a flat listing of all the files in the drive
func (f *Fs) list(directories bool, fn func(string, yandex.ResourceInfoResponse)) { func (f *Fs) list(fn listFn) error {
//request files list. list is divided into pages. We send request for each page //request files list. list is divided into pages. We send request for each page
//items per page is limited by limit //items per page is limited by limit
//TODO may be add config parameter for the items per page limit //TODO may be add config parameter for the items per page limit
@ -182,9 +216,7 @@ func (f *Fs) list(directories bool, fn func(string, yandex.ResourceInfoResponse)
//send request //send request
info, err := f.yd.NewFlatFileListRequest(opt).Exec() info, err := f.yd.NewFlatFileListRequest(opt).Exec()
if err != nil { if err != nil {
fs.Stats.Error() return err
fs.ErrorLog(f, "Couldn't list: %s", err)
return
} }
itemsCount = uint32(len(info.Items)) itemsCount = uint32(len(info.Items))
@ -194,7 +226,10 @@ func (f *Fs) list(directories bool, fn func(string, yandex.ResourceInfoResponse)
if strings.HasPrefix(item.Path, f.diskRoot) { if strings.HasPrefix(item.Path, f.diskRoot) {
//trim root folder from filename //trim root folder from filename
var name = strings.TrimPrefix(item.Path, f.diskRoot) var name = strings.TrimPrefix(item.Path, f.diskRoot)
fn(name, item) err = fn(name, &item, false)
if err != nil {
return err
}
} }
} }
@ -205,21 +240,55 @@ func (f *Fs) list(directories bool, fn func(string, yandex.ResourceInfoResponse)
break break
} }
} }
return nil
} }
// List walks the path returning a channel of FsObjects // List walks the path returning a channel of FsObjects
func (f *Fs) List() fs.ObjectsChan { func (f *Fs) List(out fs.ListOpts) {
out := make(fs.ObjectsChan, fs.Config.Checkers) defer out.Finished()
// List the objects
go func() { listItem := func(remote string, object *yandex.ResourceInfoResponse, isDirectory bool) error {
defer close(out) if isDirectory {
f.list(false, func(remote string, object yandex.ResourceInfoResponse) { t, err := time.Parse(time.RFC3339Nano, object.Modified)
if fs := f.newFsObjectWithInfo(remote, &object); fs != nil { if err != nil {
out <- fs return err
}
dir := &fs.Dir{
Name: remote,
When: t,
Bytes: int64(object.Size),
Count: -1,
}
if out.AddDir(dir) {
return fs.ErrorListAborted
}
} else {
if o := f.newFsObjectWithInfo(remote, object); o != nil {
if out.Add(o) {
return fs.ErrorListAborted
}
}
}
return nil
}
var err error
switch out.Level() {
case 1:
err = f.listDir(listItem)
case fs.MaxLevel:
err = f.list(listItem)
default:
out.SetError(fs.ErrorLevelNotSupported)
}
if err != nil {
// FIXME
// if err == swift.ContainerNotFound {
// err = fs.ErrorDirNotFound
// }
out.SetError(err)
} }
})
}()
return out
} }
// NewFsObject returns an Object from a path // NewFsObject returns an Object from a path
@ -242,7 +311,7 @@ func (f *Fs) newFsObjectWithInfo(remote string, info *yandex.ResourceInfoRespons
} else { } else {
err := o.readMetaData() err := o.readMetaData()
if err != nil { if err != nil {
fs.ErrorLog(f, "Couldn't get object '%s' metadata: %s", o.remotePath(), err) fs.Debug(f, "Couldn't get object '%s' metadata: %s", o.remotePath(), err)
return nil return nil
} }
} }
@ -288,40 +357,6 @@ func (o *Object) readMetaData() (err error) {
return nil return nil
} }
// ListDir walks the path returning a channel of FsObjects
func (f *Fs) ListDir() fs.DirChan {
out := make(fs.DirChan, fs.Config.Checkers)
go func() {
defer close(out)
//request object meta info
var opt yandex.ResourceInfoRequestOptions
ResourceInfoResponse, err := f.yd.NewResourceInfoRequest(f.diskRoot, opt).Exec()
if err != nil {
return
}
if ResourceInfoResponse.ResourceType == "dir" {
//list all subdirs
for _, element := range ResourceInfoResponse.Embedded.Items {
if element.ResourceType == "dir" {
t, err := time.Parse(time.RFC3339Nano, element.Modified)
if err != nil {
return
}
out <- &fs.Dir{
Name: element.Name,
When: t,
Bytes: int64(element.Size),
Count: -1,
}
}
}
}
}()
return out
}
// Put the object // Put the object
// //
// Copy the reader in to the new object which is returned // Copy the reader in to the new object which is returned