mirror of
https://github.com/rclone/rclone.git
synced 2025-01-03 04:49:47 +01:00
Implement single file operations for all file systems
This commit is contained in:
parent
ca3752f824
commit
99695d57ab
20
README.md
20
README.md
@ -143,7 +143,8 @@ Swift / Rackspace cloudfiles / Memset Memstore
|
|||||||
----------------------------------------------
|
----------------------------------------------
|
||||||
|
|
||||||
Paths are specified as remote:container (or remote: for the `lsd`
|
Paths are specified as remote:container (or remote: for the `lsd`
|
||||||
command.)
|
command.) You may put subdirectories in too, eg
|
||||||
|
`remote:container/path/to/dir`.
|
||||||
|
|
||||||
So to copy a local directory to a swift container called backup:
|
So to copy a local directory to a swift container called backup:
|
||||||
|
|
||||||
@ -159,7 +160,8 @@ os.Stat) for an object.
|
|||||||
Amazon S3
|
Amazon S3
|
||||||
---------
|
---------
|
||||||
|
|
||||||
Paths are specified as remote:bucket
|
Paths are specified as remote:bucket. You may put subdirectories in
|
||||||
|
too, eg `remote:bucket/path/to/dir`.
|
||||||
|
|
||||||
So to copy a local directory to a s3 container called backup
|
So to copy a local directory to a s3 container called backup
|
||||||
|
|
||||||
@ -183,6 +185,19 @@ To copy a local directory to a drive directory called backup
|
|||||||
|
|
||||||
Google drive stores modification times accurate to 1 ms.
|
Google drive stores modification times accurate to 1 ms.
|
||||||
|
|
||||||
|
Single file copies
|
||||||
|
------------------
|
||||||
|
|
||||||
|
Rclone can copy single files
|
||||||
|
|
||||||
|
rclone src:path/to/file dst:path/dir
|
||||||
|
|
||||||
|
Or
|
||||||
|
|
||||||
|
rclone src:path/to/file dst:path/to/file
|
||||||
|
|
||||||
|
Note that you can't rename the file if you are copying from one file to another.
|
||||||
|
|
||||||
License
|
License
|
||||||
-------
|
-------
|
||||||
|
|
||||||
@ -192,7 +207,6 @@ COPYING file included in this package).
|
|||||||
Bugs
|
Bugs
|
||||||
----
|
----
|
||||||
|
|
||||||
* Doesn't sync individual files yet, only directories.
|
|
||||||
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
|
* Drive: Sometimes get: Failed to copy: Upload failed: googleapi: Error 403: Rate Limit Exceeded
|
||||||
* quota is 100.0 requests/second/user
|
* quota is 100.0 requests/second/user
|
||||||
* Empty directories left behind with Local and Drive
|
* Empty directories left behind with Local and Drive
|
||||||
|
@ -135,14 +135,15 @@ func (name tokenCache) PutToken(token *oauth.Token) error {
|
|||||||
|
|
||||||
// FsDrive represents a remote drive server
|
// FsDrive represents a remote drive server
|
||||||
type FsDrive struct {
|
type FsDrive struct {
|
||||||
svc *drive.Service // the connection to the drive server
|
svc *drive.Service // the connection to the drive server
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
about *drive.About // information about the drive, including the root
|
about *drive.About // information about the drive, including the root
|
||||||
rootId string // Id of the root directory
|
rootId string // Id of the root directory
|
||||||
foundRoot sync.Once // Whether we need to find the root directory or not
|
foundRoot bool // Whether we have found the root or not
|
||||||
dirCache dirCache // Map of directory path to directory id
|
findRootLock sync.Mutex // Protect findRoot from concurrent use
|
||||||
findDirLock sync.Mutex // Protect findDir from concurrent use
|
dirCache dirCache // Map of directory path to directory id
|
||||||
|
findDirLock sync.Mutex // Protect findDir from concurrent use
|
||||||
}
|
}
|
||||||
|
|
||||||
// FsObjectDrive describes a drive object
|
// FsObjectDrive describes a drive object
|
||||||
@ -305,7 +306,10 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f := &FsDrive{root: root, dirCache: newDirCache()}
|
f := &FsDrive{
|
||||||
|
root: root,
|
||||||
|
dirCache: newDirCache(),
|
||||||
|
}
|
||||||
|
|
||||||
// Try to pull the token from the cache; if this fails, we need to get one.
|
// Try to pull the token from the cache; if this fails, we need to get one.
|
||||||
token, err := t.Config.TokenCache.Token()
|
token, err := t.Config.TokenCache.Token()
|
||||||
@ -331,14 +335,33 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
f.rootId = f.about.RootFolderId
|
f.rootId = f.about.RootFolderId
|
||||||
// Put the root directory in
|
// Put the root directory in
|
||||||
f.dirCache.Put("", f.rootId)
|
f.dirCache.Put("", f.rootId)
|
||||||
|
// Find the current root
|
||||||
|
err = f.findRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
// Assume it is a file
|
||||||
|
newRoot, remote := splitPath(root)
|
||||||
|
newF := *f
|
||||||
|
newF.root = newRoot
|
||||||
|
// Make new Fs which is the parent
|
||||||
|
err = newF.findRoot(false)
|
||||||
|
if err != nil {
|
||||||
|
// No root so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
obj, err := newF.newFsObjectWithInfo(remote, nil)
|
||||||
|
if err != nil {
|
||||||
|
// File doesn't exist so return old f
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
// return a Fs Limited to this object
|
||||||
|
return fs.NewLimited(&newF, obj), nil
|
||||||
|
}
|
||||||
// fmt.Printf("Root id %s", f.rootId)
|
// fmt.Printf("Root id %s", f.rootId)
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an FsObject from a path
|
// Return an FsObject from a path
|
||||||
//
|
func (f *FsDrive) newFsObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
||||||
// May return nil if an error occurred
|
|
||||||
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
|
||||||
fs := &FsObjectDrive{
|
fs := &FsObjectDrive{
|
||||||
drive: f,
|
drive: f,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
@ -349,9 +372,18 @@ func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object
|
|||||||
err := fs.readMetaData() // reads info and meta, returning an error
|
err := fs.readMetaData() // reads info and meta, returning an error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// logged already fs.Debug("Failed to read info: %s", err)
|
// logged already fs.Debug("Failed to read info: %s", err)
|
||||||
return nil
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return fs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an FsObject from a path
|
||||||
|
//
|
||||||
|
// May return nil if an error occurred
|
||||||
|
func (f *FsDrive) NewFsObjectWithInfo(remote string, info *drive.File) fs.Object {
|
||||||
|
fs, _ := f.newFsObjectWithInfo(remote, info)
|
||||||
|
// Errors have already been logged
|
||||||
return fs
|
return fs
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -585,14 +617,21 @@ func (f *FsDrive) _findDir(path string, create bool) (pathId string, err error)
|
|||||||
//
|
//
|
||||||
// If create is set it will make the directory if not found
|
// If create is set it will make the directory if not found
|
||||||
func (f *FsDrive) findRoot(create bool) error {
|
func (f *FsDrive) findRoot(create bool) error {
|
||||||
var err error
|
f.findRootLock.Lock()
|
||||||
f.foundRoot.Do(func() {
|
defer f.findRootLock.Unlock()
|
||||||
f.rootId, err = f.findDir(f.root, create)
|
if f.foundRoot {
|
||||||
f.dirCache.Flush()
|
return nil
|
||||||
// Put the root directory in
|
}
|
||||||
f.dirCache.Put("", f.rootId)
|
rootId, err := f.findDir(f.root, create)
|
||||||
})
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
f.rootId = rootId
|
||||||
|
f.dirCache.Flush()
|
||||||
|
// Put the root directory in
|
||||||
|
f.dirCache.Put("", f.rootId)
|
||||||
|
f.foundRoot = true
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Walk the path returning a channel of FsObjects
|
// Walk the path returning a channel of FsObjects
|
||||||
|
12
fs/fs.go
12
fs/fs.go
@ -18,9 +18,15 @@ var (
|
|||||||
|
|
||||||
// Filesystem info
|
// Filesystem info
|
||||||
type FsInfo struct {
|
type FsInfo struct {
|
||||||
Name string // name of this fs
|
// Name of this fs
|
||||||
NewFs func(string, string) (Fs, error) // create a new file system
|
Name string
|
||||||
Config func(string) // function to call to help with config
|
// Create a new file system. If root refers to an existing
|
||||||
|
// object, then it should return a Fs which only returns that
|
||||||
|
// object.
|
||||||
|
NewFs func(name string, root string) (Fs, error)
|
||||||
|
// Function to call to help with config
|
||||||
|
Config func(string)
|
||||||
|
// Options for the Fs configuration
|
||||||
Options []Option
|
Options []Option
|
||||||
}
|
}
|
||||||
|
|
||||||
|
88
fs/limited.go
Normal file
88
fs/limited.go
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
package fs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This defines a Limited Fs which can only return the Objects passed in from the Fs passed in
|
||||||
|
type Limited struct {
|
||||||
|
objects []Object
|
||||||
|
fs Fs
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLimited maks a limited Fs limited to the objects passed in
|
||||||
|
func NewLimited(fs Fs, objects ...Object) Fs {
|
||||||
|
f := &Limited{
|
||||||
|
objects: objects,
|
||||||
|
fs: fs,
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a description of the FS
|
||||||
|
func (f *Limited) String() string {
|
||||||
|
return fmt.Sprintf("%s limited to %d objects", f.fs.String(), len(f.objects))
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the Fs into a channel
|
||||||
|
func (f *Limited) List() ObjectsChan {
|
||||||
|
out := make(ObjectsChan, Config.Checkers)
|
||||||
|
go func() {
|
||||||
|
for _, obj := range f.objects {
|
||||||
|
out <- obj
|
||||||
|
}
|
||||||
|
close(out)
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// List the Fs directories/buckets/containers into a channel
|
||||||
|
func (f *Limited) ListDir() DirChan {
|
||||||
|
out := make(DirChan, Config.Checkers)
|
||||||
|
close(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the Object at remote. Returns nil if can't be found
|
||||||
|
func (f *Limited) NewFsObject(remote string) Object {
|
||||||
|
for _, obj := range f.objects {
|
||||||
|
if obj.Remote() == remote {
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put in to the remote path with the modTime given of the given size
|
||||||
|
//
|
||||||
|
// May create the object even if it returns an error - if so
|
||||||
|
// will return the object and the error, otherwise will return
|
||||||
|
// nil and the error
|
||||||
|
func (f *Limited) Put(in io.Reader, remote string, modTime time.Time, size int64) (Object, error) {
|
||||||
|
obj := f.NewFsObject(remote)
|
||||||
|
if obj == nil {
|
||||||
|
return nil, fmt.Errorf("Can't create %q in limited fs", remote)
|
||||||
|
}
|
||||||
|
return obj, obj.Update(in, modTime, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the directory (container, bucket)
|
||||||
|
func (f *Limited) Mkdir() error {
|
||||||
|
// All directories are already made - just ignore
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the directory (container, bucket) if empty
|
||||||
|
func (f *Limited) Rmdir() error {
|
||||||
|
return fmt.Errorf("Can't rmdir in limited fs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Precision of the ModTimes in this Fs
|
||||||
|
func (f *Limited) Precision() time.Duration {
|
||||||
|
return f.fs.Precision()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the interfaces are satisfied
|
||||||
|
var _ Fs = &Limited{}
|
@ -45,6 +45,16 @@ type FsObjectLocal struct {
|
|||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
root = path.Clean(root)
|
root = path.Clean(root)
|
||||||
f := &FsLocal{root: root}
|
f := &FsLocal{root: root}
|
||||||
|
// Check to see if this points to a file
|
||||||
|
fi, err := os.Lstat(f.root)
|
||||||
|
if err == nil && fi.Mode().IsRegular() {
|
||||||
|
// It is a file, so use the parent as the root
|
||||||
|
remote := path.Base(root)
|
||||||
|
f.root = path.Dir(root)
|
||||||
|
obj := f.NewFsObject(remote)
|
||||||
|
// return a Fs Limited to this object
|
||||||
|
return fs.NewLimited(f, obj), nil
|
||||||
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
Todo
|
Todo
|
||||||
* FIXME: ls without an argument for buckets/containers?
|
|
||||||
* FIXME: More -dry-run checks for object transfer
|
* FIXME: More -dry-run checks for object transfer
|
||||||
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
|
* Might be quicker to check md5sums first? for swift <-> swift certainly, and maybe for small files
|
||||||
* swift: Ignoring the pseudo directories
|
* swift: Ignoring the pseudo directories
|
||||||
@ -12,7 +11,6 @@ Todo
|
|||||||
* make Account do progress meter
|
* make Account do progress meter
|
||||||
* Make logging controllable with flags (mostly done)
|
* Make logging controllable with flags (mostly done)
|
||||||
* -timeout: Make all timeouts be settable with command line parameters
|
* -timeout: Make all timeouts be settable with command line parameters
|
||||||
* Check the locking in swift module!
|
|
||||||
* Windows paths? Do we need to translate / and \?
|
* Windows paths? Do we need to translate / and \?
|
||||||
* Make a fs.Errorf and count errors and log them at a different level
|
* Make a fs.Errorf and count errors and log them at a different level
|
||||||
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
|
* Add max object size to fs metadata - 5GB for swift, infinite for local, ? for s3
|
||||||
@ -22,7 +20,6 @@ Ideas
|
|||||||
* could do encryption - put IV into metadata?
|
* could do encryption - put IV into metadata?
|
||||||
* optimise remote copy container to another container using remote
|
* optimise remote copy container to another container using remote
|
||||||
copy if local is same as remote - use an optional Copier interface
|
copy if local is same as remote - use an optional Copier interface
|
||||||
* Allow subpaths container:/sub/path
|
|
||||||
* support
|
* support
|
||||||
* sftp
|
* sftp
|
||||||
* scp
|
* scp
|
||||||
@ -35,6 +32,8 @@ Need to make directory objects otherwise can't upload an empty directory
|
|||||||
* Or could upload empty directories only?
|
* Or could upload empty directories only?
|
||||||
* Can't purge a local filesystem because it leaves the directories behind
|
* Can't purge a local filesystem because it leaves the directories behind
|
||||||
|
|
||||||
|
Copying a single file? Or maybe with a glob pattern? Could do with LimitedFs
|
||||||
|
|
||||||
s3
|
s3
|
||||||
* Can maybe set last modified?
|
* Can maybe set last modified?
|
||||||
* https://forums.aws.amazon.com/message.jspa?messageID=214062
|
* https://forums.aws.amazon.com/message.jspa?messageID=214062
|
||||||
@ -43,6 +42,7 @@ s3
|
|||||||
|
|
||||||
Bugs
|
Bugs
|
||||||
* Non verbose - not sure number transferred got counted up? CHECK
|
* Non verbose - not sure number transferred got counted up? CHECK
|
||||||
|
* When doing copy it recurses the whole of the destination FS which isn't necessary
|
||||||
|
|
||||||
Making a release
|
Making a release
|
||||||
* go build ./...
|
* go build ./...
|
||||||
|
@ -106,7 +106,7 @@ var Commands = []Command{
|
|||||||
Name: "lsd",
|
Name: "lsd",
|
||||||
ArgsHelp: "[remote://path]",
|
ArgsHelp: "[remote://path]",
|
||||||
Help: `
|
Help: `
|
||||||
List all directoryes/objects/buckets in the the path.`,
|
List all directories/containers/buckets in the the path.`,
|
||||||
Run: func(fdst, fsrc fs.Fs) {
|
Run: func(fdst, fsrc fs.Fs) {
|
||||||
err := fs.ListDir(fdst)
|
err := fs.ListDir(fdst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
27
s3/s3.go
27
s3/s3.go
@ -188,8 +188,8 @@ func s3Connection(name string) (*s3.S3, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFsS3 contstructs an FsS3 from the path, bucket:path
|
// NewFsS3 contstructs an FsS3 from the path, bucket:path
|
||||||
func NewFs(name, path string) (fs.Fs, error) {
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
bucket, directory, err := s3ParsePath(path)
|
bucket, directory, err := s3ParsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -197,10 +197,6 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// FIXME - check if it is a file before doing this and make a limited fs
|
|
||||||
if directory != "" {
|
|
||||||
directory += "/"
|
|
||||||
}
|
|
||||||
f := &FsS3{
|
f := &FsS3{
|
||||||
c: c,
|
c: c,
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
@ -208,6 +204,23 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
perm: s3.Private, // FIXME need user to specify
|
perm: s3.Private, // FIXME need user to specify
|
||||||
root: directory,
|
root: directory,
|
||||||
}
|
}
|
||||||
|
if f.root != "" {
|
||||||
|
f.root += "/"
|
||||||
|
// Check to see if the object exists
|
||||||
|
_, err = f.b.Head(directory, nil)
|
||||||
|
if err == nil {
|
||||||
|
remote := path.Base(directory)
|
||||||
|
f.root = path.Dir(directory)
|
||||||
|
if f.root == "." {
|
||||||
|
f.root = ""
|
||||||
|
} else {
|
||||||
|
f.root += "/"
|
||||||
|
}
|
||||||
|
obj := f.NewFsObject(remote)
|
||||||
|
// return a Fs Limited to this object
|
||||||
|
return fs.NewLimited(f, obj), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,7 +341,7 @@ func (f *FsS3) ListDir() fs.DirChan {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
// List the directories in the path in the container
|
// List the directories in the path in the bucket
|
||||||
go func() {
|
go func() {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
f.list(true, func(remote string, object *s3.Key) {
|
f.list(true, func(remote string, object *s3.Key) {
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -118,8 +119,8 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an FsSwift from the path, container:path
|
// NewFs contstructs an FsSwift from the path, container:path
|
||||||
func NewFs(name, path string) (fs.Fs, error) {
|
func NewFs(name, root string) (fs.Fs, error) {
|
||||||
container, directory, err := parsePath(path)
|
container, directory, err := parsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -127,15 +128,28 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// FIXME - check if it is a file before doing this and make a limited fs
|
|
||||||
if directory != "" {
|
|
||||||
directory += "/"
|
|
||||||
}
|
|
||||||
f := &FsSwift{
|
f := &FsSwift{
|
||||||
c: *c,
|
c: *c,
|
||||||
container: container,
|
container: container,
|
||||||
root: directory,
|
root: directory,
|
||||||
}
|
}
|
||||||
|
if f.root != "" {
|
||||||
|
f.root += "/"
|
||||||
|
// Check to see if the object exists
|
||||||
|
_, _, err = f.c.Object(container, directory)
|
||||||
|
if err == nil {
|
||||||
|
remote := path.Base(directory)
|
||||||
|
f.root = path.Dir(directory)
|
||||||
|
if f.root == "." {
|
||||||
|
f.root = ""
|
||||||
|
} else {
|
||||||
|
f.root += "/"
|
||||||
|
}
|
||||||
|
obj := f.NewFsObject(remote)
|
||||||
|
// return a Fs Limited to this object
|
||||||
|
return fs.NewLimited(f, obj), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user