2015-09-22 19:47:16 +02:00
|
|
|
// Package local provides a filesystem interface
|
2013-06-27 21:13:07 +02:00
|
|
|
package local
|
2012-12-26 13:23:58 +01:00
|
|
|
|
|
|
|
import (
|
2018-09-13 00:54:30 +02:00
|
|
|
"bytes"
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2012-12-26 13:23:58 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2013-01-19 00:21:02 +01:00
|
|
|
"io/ioutil"
|
2012-12-26 13:23:58 +01:00
|
|
|
"os"
|
2016-04-21 21:06:21 +02:00
|
|
|
"path"
|
2012-12-26 13:23:58 +01:00
|
|
|
"path/filepath"
|
2015-09-22 19:47:16 +02:00
|
|
|
"regexp"
|
|
|
|
"runtime"
|
|
|
|
"strings"
|
2013-01-19 00:21:02 +01:00
|
|
|
"sync"
|
2012-12-26 13:23:58 +01:00
|
|
|
"time"
|
2015-05-21 19:40:16 +02:00
|
|
|
"unicode/utf8"
|
2014-03-15 17:06:11 +01:00
|
|
|
|
2016-05-30 20:49:21 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/lib/file"
|
|
|
|
"github.com/rclone/rclone/lib/readers"
|
2012-12-26 13:23:58 +01:00
|
|
|
)
|
|
|
|
|
2016-11-03 12:51:36 +01:00
|
|
|
// Constants
|
2019-05-12 19:32:04 +02:00
|
|
|
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
|
|
|
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
|
|
|
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
2016-11-02 08:26:09 +01:00
|
|
|
|
2013-06-27 21:13:07 +02:00
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
2016-02-18 12:35:25 +01:00
|
|
|
fsi := &fs.RegInfo{
|
2016-02-15 19:11:53 +01:00
|
|
|
Name: "local",
|
|
|
|
Description: "Local Disk",
|
|
|
|
NewFs: NewFs,
|
2016-02-29 17:57:23 +01:00
|
|
|
Options: []fs.Option{{
|
2018-05-14 19:06:57 +02:00
|
|
|
Name: "nounc",
|
|
|
|
Help: "Disable UNC (long path names) conversion on Windows",
|
2016-01-04 12:28:47 +01:00
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
Value: "true",
|
|
|
|
Help: "Disables long file names",
|
|
|
|
}},
|
2018-05-14 19:06:57 +02:00
|
|
|
}, {
|
|
|
|
Name: "copy_links",
|
|
|
|
Help: "Follow symlinks and copy the pointed to item.",
|
|
|
|
Default: false,
|
|
|
|
NoPrefix: true,
|
|
|
|
ShortOpt: "L",
|
|
|
|
Advanced: true,
|
2018-09-13 00:54:30 +02:00
|
|
|
}, {
|
|
|
|
Name: "links",
|
|
|
|
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension",
|
|
|
|
Default: false,
|
|
|
|
NoPrefix: true,
|
|
|
|
ShortOpt: "l",
|
|
|
|
Advanced: true,
|
2018-05-14 19:06:57 +02:00
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
Name: "skip_links",
|
|
|
|
Help: `Don't warn about skipped symlinks.
|
|
|
|
This flag disables warning messages on skipped symlinks or junction
|
|
|
|
points, as you explicitly acknowledge that they should be skipped.`,
|
2018-05-14 19:06:57 +02:00
|
|
|
Default: false,
|
|
|
|
NoPrefix: true,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
Name: "no_unicode_normalization",
|
|
|
|
Help: `Don't apply unicode normalization to paths and filenames (Deprecated)
|
|
|
|
|
|
|
|
This flag is deprecated now. Rclone no longer normalizes unicode file
|
|
|
|
names, but it compares them with unicode normalization in the sync
|
|
|
|
routine instead.`,
|
2018-05-14 19:06:57 +02:00
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
Name: "no_check_updated",
|
|
|
|
Help: `Don't check to see if the files change during upload
|
|
|
|
|
|
|
|
Normally rclone checks the size and modification time of files as they
|
|
|
|
are being uploaded and aborts with a message which starts "can't copy
|
|
|
|
- source file is being updated" if the file changes during upload.
|
|
|
|
|
|
|
|
However on some file systems this modification time check may fail (eg
|
2019-07-28 19:47:38 +02:00
|
|
|
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
2018-10-01 19:36:15 +02:00
|
|
|
check can be disabled with this flag.`,
|
2018-05-14 19:06:57 +02:00
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
|
|
|
Name: "one_file_system",
|
|
|
|
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
|
|
|
Default: false,
|
|
|
|
NoPrefix: true,
|
|
|
|
ShortOpt: "x",
|
|
|
|
Advanced: true,
|
2019-06-14 15:41:54 +02:00
|
|
|
}, {
|
|
|
|
Name: "case_sensitive",
|
|
|
|
Help: `Force the filesystem to report itself as case sensitive.
|
|
|
|
|
|
|
|
Normally the local backend declares itself as case insensitive on
|
|
|
|
Windows/macOS and case sensitive for everything else. Use this flag
|
|
|
|
to override the default choice.`,
|
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
|
|
|
}, {
|
|
|
|
Name: "case_insensitive",
|
|
|
|
Help: `Force the filesystem to report itself as case insensitive
|
|
|
|
|
|
|
|
Normally the local backend declares itself as case insensitive on
|
|
|
|
Windows/macOS and case sensitive for everything else. Use this flag
|
|
|
|
to override the default choice.`,
|
|
|
|
Default: false,
|
|
|
|
Advanced: true,
|
2016-01-04 12:28:47 +01:00
|
|
|
}},
|
|
|
|
}
|
|
|
|
fs.Register(fsi)
|
2013-06-27 21:13:07 +02:00
|
|
|
}
|
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
2018-09-13 00:54:30 +02:00
|
|
|
FollowSymlinks bool `config:"copy_links"`
|
|
|
|
TranslateSymlinks bool `config:"links"`
|
|
|
|
SkipSymlinks bool `config:"skip_links"`
|
|
|
|
NoUTFNorm bool `config:"no_unicode_normalization"`
|
|
|
|
NoCheckUpdated bool `config:"no_check_updated"`
|
|
|
|
NoUNC bool `config:"nounc"`
|
|
|
|
OneFileSystem bool `config:"one_file_system"`
|
2019-06-14 15:41:54 +02:00
|
|
|
CaseSensitive bool `config:"case_sensitive"`
|
|
|
|
CaseInsensitive bool `config:"case_insensitive"`
|
2018-05-14 19:06:57 +02:00
|
|
|
}
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
// Fs represents a local filesystem rooted at root
|
|
|
|
type Fs struct {
|
2015-08-22 17:53:11 +02:00
|
|
|
name string // the name of the remote
|
2016-08-19 00:16:47 +02:00
|
|
|
root string // The root directory (OS path)
|
2018-05-14 19:06:57 +02:00
|
|
|
opt Options // parsed config options
|
2017-01-13 18:21:47 +01:00
|
|
|
features *fs.Features // optional features
|
2016-11-03 12:51:36 +01:00
|
|
|
dev uint64 // device number of root node
|
2015-05-21 19:40:16 +02:00
|
|
|
precisionOk sync.Once // Whether we need to read the precision
|
|
|
|
precision time.Duration // precision of local filesystem
|
2016-03-10 15:51:56 +01:00
|
|
|
wmu sync.Mutex // used for locking access to 'warned'.
|
2015-05-21 19:40:16 +02:00
|
|
|
warned map[string]struct{} // whether we have warned about this string
|
2017-01-29 14:43:20 +01:00
|
|
|
// do os.Lstat or os.Stat
|
2018-03-08 17:31:44 +01:00
|
|
|
lstat func(name string) (os.FileInfo, error)
|
|
|
|
dirNames *mapper // directory name mapping
|
|
|
|
objectHashesMu sync.Mutex // global lock for Object.hashes
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
// Object represents a local filesystem object
|
|
|
|
type Object struct {
|
2018-09-13 00:54:30 +02:00
|
|
|
fs *Fs // The Fs this object is part of
|
|
|
|
remote string // The remote path - properly UTF-8 encoded - for rclone
|
|
|
|
path string // The local path - may not be properly UTF-8 encoded - for OS
|
|
|
|
size int64 // file metadata - always present
|
|
|
|
mode os.FileMode
|
|
|
|
modTime time.Time
|
|
|
|
hashes map[hash.Type]string // Hashes
|
|
|
|
translatedLink bool // Is this object a translated link
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
2012-12-29 12:35:41 +01:00
|
|
|
|
2019-01-27 20:28:57 +01:00
|
|
|
var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
// NewFs constructs an Fs from the path
|
2018-05-14 19:06:57 +02:00
|
|
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|
|
|
// Parse config into Options struct
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-01-27 20:28:57 +01:00
|
|
|
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
|
|
|
return nil, errLinksAndCopyLinks
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
if opt.NoUTFNorm {
|
2018-07-16 22:38:34 +02:00
|
|
|
fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
2017-09-08 17:19:41 +02:00
|
|
|
}
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
f := &Fs{
|
2017-03-16 23:37:56 +01:00
|
|
|
name: name,
|
2018-05-14 19:06:57 +02:00
|
|
|
opt: *opt,
|
2017-03-16 23:37:56 +01:00
|
|
|
warned: make(map[string]struct{}),
|
|
|
|
dev: devUnset,
|
|
|
|
lstat: os.Lstat,
|
|
|
|
dirNames: newMapper(),
|
2015-05-21 19:40:16 +02:00
|
|
|
}
|
2016-08-19 00:16:47 +02:00
|
|
|
f.root = f.cleanPath(root)
|
2017-08-09 16:27:43 +02:00
|
|
|
f.features = (&fs.Features{
|
|
|
|
CaseInsensitive: f.caseInsensitive(),
|
|
|
|
CanHaveEmptyDirectories: true,
|
|
|
|
}).Fill(f)
|
2018-05-14 19:06:57 +02:00
|
|
|
if opt.FollowSymlinks {
|
2017-01-29 14:43:20 +01:00
|
|
|
f.lstat = os.Stat
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
2014-05-05 20:52:52 +02:00
|
|
|
// Check to see if this points to a file
|
2017-01-29 14:43:20 +01:00
|
|
|
fi, err := f.lstat(f.root)
|
2016-11-03 12:51:36 +01:00
|
|
|
if err == nil {
|
2018-05-14 19:06:57 +02:00
|
|
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
2016-11-03 12:51:36 +01:00
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
if err == nil && f.isRegular(fi.Mode()) {
|
2014-05-05 20:52:52 +02:00
|
|
|
// It is a file, so use the parent as the root
|
2018-08-31 22:10:36 +02:00
|
|
|
f.root = filepath.Dir(f.root)
|
2016-06-21 19:01:53 +02:00
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
return f, fs.ErrorIsFile
|
2014-05-05 20:52:52 +02:00
|
|
|
}
|
2012-12-29 12:35:41 +01:00
|
|
|
return f, nil
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
// Determine whether a file is a 'regular' file,
|
|
|
|
// Symlinks are regular files, only if the TranslateSymlink
|
|
|
|
// option is in-effect
|
|
|
|
func (f *Fs) isRegular(mode os.FileMode) bool {
|
|
|
|
if !f.opt.TranslateSymlinks {
|
|
|
|
return mode.IsRegular()
|
|
|
|
}
|
|
|
|
|
|
|
|
// fi.Mode().IsRegular() tests that all mode bits are zero
|
|
|
|
// Since symlinks are accepted, test that all other bits are zero,
|
|
|
|
// except the symlink bit
|
|
|
|
return mode&os.ModeType&^os.ModeSymlink == 0
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Name of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
func (f *Fs) Name() string {
|
2015-08-22 17:53:11 +02:00
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Root of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
func (f *Fs) Root() string {
|
2015-09-01 21:45:27 +02:00
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
2012-12-31 17:40:34 +01:00
|
|
|
return fmt.Sprintf("Local file system at %s", f.root)
|
|
|
|
}
|
|
|
|
|
2017-01-13 18:21:47 +01:00
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
2019-02-07 18:41:17 +01:00
|
|
|
// caseInsensitive returns whether the remote is case insensitive or not
|
2017-01-13 18:21:47 +01:00
|
|
|
func (f *Fs) caseInsensitive() bool {
|
2019-06-14 15:41:54 +02:00
|
|
|
if f.opt.CaseSensitive {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if f.opt.CaseInsensitive {
|
|
|
|
return true
|
|
|
|
}
|
2017-01-13 18:21:47 +01:00
|
|
|
// FIXME not entirely accurate since you can have case
|
2019-02-07 18:41:17 +01:00
|
|
|
// sensitive Fses on darwin and case insensitive Fses on linux.
|
2017-01-13 18:21:47 +01:00
|
|
|
// Should probably check but that would involve creating a
|
|
|
|
// file in the remote to be most accurate which probably isn't
|
|
|
|
// desirable.
|
|
|
|
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
|
|
|
}
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
// translateLink checks whether the remote is a translated link
|
|
|
|
// and returns a new path, removing the suffix as needed,
|
|
|
|
// It also returns whether this is a translated link at all
|
|
|
|
//
|
|
|
|
// for regular files, dstPath is returned unchanged
|
|
|
|
func translateLink(remote, dstPath string) (newDstPath string, isTranslatedLink bool) {
|
|
|
|
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
|
|
|
newDstPath = strings.TrimSuffix(dstPath, linkSuffix)
|
|
|
|
return newDstPath, isTranslatedLink
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
// newObject makes a half completed Object
|
2017-03-16 23:37:56 +01:00
|
|
|
//
|
|
|
|
// if dstPath is empty then it is made from remote
|
|
|
|
func (f *Fs) newObject(remote, dstPath string) *Object {
|
2018-09-13 00:54:30 +02:00
|
|
|
translatedLink := false
|
|
|
|
|
2017-03-16 23:37:56 +01:00
|
|
|
if dstPath == "" {
|
|
|
|
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
|
|
|
}
|
2016-08-19 00:16:47 +02:00
|
|
|
remote = f.cleanRemote(remote)
|
2018-09-13 00:54:30 +02:00
|
|
|
|
|
|
|
if f.opt.TranslateSymlinks {
|
|
|
|
// Possibly receive a new name for dstPath
|
|
|
|
dstPath, translatedLink = translateLink(remote, dstPath)
|
|
|
|
}
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
return &Object{
|
2018-09-13 00:54:30 +02:00
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
path: dstPath,
|
|
|
|
translatedLink: translatedLink,
|
2015-11-07 12:14:46 +01:00
|
|
|
}
|
2015-08-31 22:05:51 +02:00
|
|
|
}
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
// Return an Object from a path
|
2012-12-26 13:23:58 +01:00
|
|
|
//
|
|
|
|
// May return nil if an error occurred
|
2017-03-16 23:37:56 +01:00
|
|
|
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
|
|
|
|
o := f.newObject(remote, dstPath)
|
2012-12-26 13:23:58 +01:00
|
|
|
if info != nil {
|
2017-06-30 11:24:06 +02:00
|
|
|
o.setMetadata(info)
|
2012-12-26 13:23:58 +01:00
|
|
|
} else {
|
2013-06-27 21:13:07 +02:00
|
|
|
err := o.lstat()
|
2012-12-26 13:23:58 +01:00
|
|
|
if err != nil {
|
2016-06-25 22:23:20 +02:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
2018-01-16 21:00:16 +01:00
|
|
|
if os.IsPermission(err) {
|
|
|
|
return nil, fs.ErrorPermissionDenied
|
|
|
|
}
|
2016-06-25 22:23:20 +02:00
|
|
|
return nil, err
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
2019-02-07 18:41:17 +01:00
|
|
|
// Handle the odd case, that a symlink was specified by name without the link suffix
|
2018-09-13 00:54:30 +02:00
|
|
|
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
2017-06-30 11:24:06 +02:00
|
|
|
if o.mode.IsDir() {
|
2017-02-25 12:09:57 +01:00
|
|
|
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
|
|
|
}
|
2016-06-25 22:23:20 +02:00
|
|
|
return o, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2016-06-25 22:23:20 +02:00
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2017-03-16 23:37:56 +01:00
|
|
|
return f.newObjectWithInfo(remote, "", nil)
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
dir = f.dirNames.Load(dir)
|
|
|
|
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
|
|
|
remote := f.cleanRemote(dir)
|
|
|
|
_, err = os.Stat(fsDirPath)
|
2016-04-21 21:06:21 +02:00
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
return nil, fs.ErrorDirNotFound
|
2016-04-21 21:06:21 +02:00
|
|
|
}
|
2016-11-02 08:26:09 +01:00
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
fd, err := os.Open(fsDirPath)
|
|
|
|
if err != nil {
|
2019-05-12 19:32:04 +02:00
|
|
|
isPerm := os.IsPermission(err)
|
|
|
|
err = errors.Wrapf(err, "failed to open directory %q", dir)
|
|
|
|
fs.Errorf(dir, "%v", err)
|
|
|
|
if isPerm {
|
2019-07-18 12:13:54 +02:00
|
|
|
accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
2019-05-12 19:32:04 +02:00
|
|
|
err = nil // ignore error but fail sync
|
|
|
|
}
|
|
|
|
return nil, err
|
2017-06-11 23:43:31 +02:00
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
defer func() {
|
2017-06-11 23:43:31 +02:00
|
|
|
cerr := fd.Close()
|
|
|
|
if cerr != nil && err == nil {
|
|
|
|
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
|
2016-04-21 21:06:21 +02:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
2019-05-12 19:32:04 +02:00
|
|
|
var fis []os.FileInfo
|
|
|
|
if useReadDir {
|
|
|
|
// Windows and Plan9 read the directory entries with the stat information in which
|
|
|
|
// shouldn't fail because of unreadable entries.
|
|
|
|
fis, err = fd.Readdir(1024)
|
|
|
|
if err == io.EOF && len(fis) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// For other OSes we read the names only (which shouldn't fail) then stat the
|
|
|
|
// individual ourselves so we can log errors but not fail the directory read.
|
|
|
|
var names []string
|
|
|
|
names, err = fd.Readdirnames(1024)
|
|
|
|
if err == io.EOF && len(names) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
for _, name := range names {
|
|
|
|
namepath := filepath.Join(fsDirPath, name)
|
|
|
|
fi, fierr := os.Lstat(namepath)
|
|
|
|
if fierr != nil {
|
|
|
|
err = errors.Wrapf(err, "failed to read directory %q", namepath)
|
|
|
|
fs.Errorf(dir, "%v", fierr)
|
2019-07-18 12:13:54 +02:00
|
|
|
accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
2019-05-12 19:32:04 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
fis = append(fis, fi)
|
|
|
|
}
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
2019-05-12 19:32:04 +02:00
|
|
|
return nil, errors.Wrap(err, "failed to read directory entry")
|
2016-04-21 21:06:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, fi := range fis {
|
|
|
|
name := fi.Name()
|
2017-01-29 14:43:20 +01:00
|
|
|
mode := fi.Mode()
|
2016-04-21 21:06:21 +02:00
|
|
|
newRemote := path.Join(remote, name)
|
2017-06-11 23:43:31 +02:00
|
|
|
newPath := filepath.Join(fsDirPath, name)
|
2017-01-29 14:43:20 +01:00
|
|
|
// Follow symlinks if required
|
2018-05-14 19:06:57 +02:00
|
|
|
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
2017-01-29 14:43:20 +01:00
|
|
|
fi, err = os.Stat(newPath)
|
2018-09-02 15:47:54 +02:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// Skip bad symlinks
|
|
|
|
err = fserrors.NoRetryError(errors.Wrap(err, "symlink"))
|
|
|
|
fs.Errorf(newRemote, "Listing error: %v", err)
|
2019-07-18 12:13:54 +02:00
|
|
|
accounting.Stats(ctx).Error(err)
|
2018-09-02 15:47:54 +02:00
|
|
|
continue
|
|
|
|
}
|
2017-01-29 14:43:20 +01:00
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
return nil, err
|
2017-01-29 14:43:20 +01:00
|
|
|
}
|
|
|
|
mode = fi.Mode()
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
if fi.IsDir() {
|
2016-09-07 20:49:42 +02:00
|
|
|
// Ignore directories which are symlinks. These are junction points under windows which
|
|
|
|
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
2018-05-14 19:06:57 +02:00
|
|
|
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
2017-06-30 14:37:29 +02:00
|
|
|
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
2017-06-11 23:43:31 +02:00
|
|
|
entries = append(entries, d)
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
} else {
|
2018-09-13 00:54:30 +02:00
|
|
|
// Check whether this link should be translated
|
|
|
|
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
|
|
|
newRemote += linkSuffix
|
|
|
|
}
|
2017-03-16 23:37:56 +01:00
|
|
|
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
2016-06-25 22:23:20 +02:00
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
return nil, err
|
2016-06-25 22:23:20 +02:00
|
|
|
}
|
2017-06-11 23:43:31 +02:00
|
|
|
if fso.Storable() {
|
|
|
|
entries = append(entries, fso)
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
}
|
2017-06-11 23:43:31 +02:00
|
|
|
return entries, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 00:16:47 +02:00
|
|
|
// cleanRemote makes string a valid UTF-8 string for remote strings.
|
2015-05-21 19:40:16 +02:00
|
|
|
//
|
|
|
|
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
2016-08-19 00:16:47 +02:00
|
|
|
// It also normalises the UTF-8 and converts the slashes if necessary.
|
|
|
|
func (f *Fs) cleanRemote(name string) string {
|
2015-09-11 11:37:12 +02:00
|
|
|
if !utf8.ValidString(name) {
|
2016-03-10 15:51:56 +01:00
|
|
|
f.wmu.Lock()
|
2015-09-11 11:37:12 +02:00
|
|
|
if _, ok := f.warned[name]; !ok {
|
2017-02-11 21:19:44 +01:00
|
|
|
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
|
2015-09-11 11:37:12 +02:00
|
|
|
f.warned[name] = struct{}{}
|
|
|
|
}
|
2016-03-10 15:51:56 +01:00
|
|
|
f.wmu.Unlock()
|
2015-09-11 11:37:12 +02:00
|
|
|
name = string([]rune(name))
|
2015-05-21 19:40:16 +02:00
|
|
|
}
|
2016-08-19 00:16:47 +02:00
|
|
|
name = filepath.ToSlash(name)
|
2015-09-11 11:37:12 +02:00
|
|
|
return name
|
2015-05-21 19:40:16 +02:00
|
|
|
}
|
|
|
|
|
2017-03-16 23:37:56 +01:00
|
|
|
// mapper maps raw to cleaned directory names
|
|
|
|
type mapper struct {
|
|
|
|
mu sync.RWMutex // mutex to protect the below
|
|
|
|
m map[string]string // map of un-normalised directory names
|
|
|
|
}
|
|
|
|
|
|
|
|
func newMapper() *mapper {
|
|
|
|
return &mapper{
|
|
|
|
m: make(map[string]string),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup a directory name to make a local name (reverses
|
|
|
|
// cleanDirName)
|
|
|
|
//
|
|
|
|
// FIXME this is temporary before we make a proper Directory object
|
|
|
|
func (m *mapper) Load(in string) string {
|
|
|
|
m.mu.RLock()
|
|
|
|
out, ok := m.m[in]
|
|
|
|
m.mu.RUnlock()
|
|
|
|
if ok {
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cleans a directory name recording if it needed to be altered
|
|
|
|
//
|
|
|
|
// FIXME this is temporary before we make a proper Directory object
|
|
|
|
func (m *mapper) Save(in, out string) string {
|
|
|
|
if in != out {
|
|
|
|
m.mu.Lock()
|
|
|
|
m.m[out] = in
|
|
|
|
m.mu.Unlock()
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
// Put the Object to the local filesystem
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2016-02-18 12:35:25 +01:00
|
|
|
remote := src.Remote()
|
2016-06-25 22:58:34 +02:00
|
|
|
// Temporary Object under construction - info filled in by Update()
|
2017-03-16 23:37:56 +01:00
|
|
|
o := f.newObject(remote, "")
|
2019-06-17 10:34:30 +02:00
|
|
|
err := o.Update(ctx, in, src, options...)
|
2014-07-19 12:34:44 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return o, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2017-08-03 21:42:35 +02:00
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
return f.Put(ctx, in, src, options...)
|
2017-08-03 21:42:35 +02:00
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Mkdir creates the directory if it doesn't exist
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2015-09-11 11:37:12 +02:00
|
|
|
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
2016-12-05 19:09:45 +01:00
|
|
|
root := f.cleanPath(filepath.Join(f.root, dir))
|
2016-11-25 22:52:43 +01:00
|
|
|
err := os.MkdirAll(root, 0777)
|
2016-11-03 12:51:36 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-25 22:52:43 +01:00
|
|
|
if dir == "" {
|
2017-01-29 14:43:20 +01:00
|
|
|
fi, err := f.lstat(root)
|
2016-11-25 22:52:43 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
2016-11-03 12:51:36 +01:00
|
|
|
}
|
|
|
|
return nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir removes the directory
|
|
|
|
//
|
|
|
|
// If it isn't empty it will return an error
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2016-12-05 19:09:45 +01:00
|
|
|
root := f.cleanPath(filepath.Join(f.root, dir))
|
|
|
|
return os.Remove(root)
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Precision of the file system
|
2015-11-07 12:14:46 +01:00
|
|
|
func (f *Fs) Precision() (precision time.Duration) {
|
2013-01-19 00:21:02 +01:00
|
|
|
f.precisionOk.Do(func() {
|
|
|
|
f.precision = f.readPrecision()
|
|
|
|
})
|
|
|
|
return f.precision
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the precision
|
2015-11-07 12:14:46 +01:00
|
|
|
func (f *Fs) readPrecision() (precision time.Duration) {
|
2013-01-19 00:21:02 +01:00
|
|
|
// Default precision of 1s
|
|
|
|
precision = time.Second
|
|
|
|
|
|
|
|
// Create temporary file and test it
|
2013-06-27 21:00:01 +02:00
|
|
|
fd, err := ioutil.TempFile("", "rclone")
|
2013-01-19 00:21:02 +01:00
|
|
|
if err != nil {
|
|
|
|
// If failed return 1s
|
|
|
|
// fmt.Println("Failed to create temp file", err)
|
|
|
|
return time.Second
|
|
|
|
}
|
|
|
|
path := fd.Name()
|
|
|
|
// fmt.Println("Created temp file", path)
|
2014-07-25 19:19:49 +02:00
|
|
|
err = fd.Close()
|
|
|
|
if err != nil {
|
|
|
|
return time.Second
|
|
|
|
}
|
2013-01-19 00:21:02 +01:00
|
|
|
|
|
|
|
// Delete it on return
|
|
|
|
defer func() {
|
|
|
|
// fmt.Println("Remove temp file")
|
2014-07-25 19:19:49 +02:00
|
|
|
_ = os.Remove(path) // ignore error
|
2013-01-19 00:21:02 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Find the minimum duration we can detect
|
|
|
|
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
|
|
|
|
// Current time with delta
|
|
|
|
t := time.Unix(time.Now().Unix(), int64(duration))
|
2013-06-27 21:13:07 +02:00
|
|
|
err := os.Chtimes(path, t, t)
|
2013-01-19 00:21:02 +01:00
|
|
|
if err != nil {
|
|
|
|
// fmt.Println("Failed to Chtimes", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the actual time back
|
|
|
|
fi, err := os.Stat(path)
|
|
|
|
if err != nil {
|
|
|
|
// fmt.Println("Failed to Stat", err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it matches - have found the precision
|
2019-06-17 10:34:30 +02:00
|
|
|
// fmt.Println("compare", fi.ModTime(ctx), t)
|
2018-04-04 18:12:30 +02:00
|
|
|
if fi.ModTime().Equal(t) {
|
2013-01-19 00:21:02 +01:00
|
|
|
// fmt.Println("Precision detected as", duration)
|
|
|
|
return duration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-07-19 13:00:42 +02:00
|
|
|
// Purge deletes all the files and directories
|
|
|
|
//
|
|
|
|
// Optional interface: Only implement this if you have a way of
|
|
|
|
// deleting all the files quicker than just running Remove() on the
|
|
|
|
// result of List()
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Purge(ctx context.Context) error {
|
2017-01-29 14:43:20 +01:00
|
|
|
fi, err := f.lstat(f.root)
|
2014-07-28 22:02:00 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !fi.Mode().IsDir() {
|
2016-06-12 16:06:02 +02:00
|
|
|
return errors.Errorf("can't purge non directory: %q", f.root)
|
2014-07-28 22:02:00 +02:00
|
|
|
}
|
2014-07-19 13:00:42 +02:00
|
|
|
return os.RemoveAll(f.root)
|
|
|
|
}
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
// Move src to this remote using server side move operations.
|
|
|
|
//
|
|
|
|
// This is stored with the remote path given
|
|
|
|
//
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2015-11-07 12:14:46 +01:00
|
|
|
srcObj, ok := src.(*Object)
|
2015-08-31 22:05:51 +02:00
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
}
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
// Temporary Object under construction
|
2017-03-16 23:37:56 +01:00
|
|
|
dstObj := f.newObject(remote, "")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
|
|
|
// Check it is a file if it exists
|
|
|
|
err := dstObj.lstat()
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// OK
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
2018-09-13 00:54:30 +02:00
|
|
|
} else if !dstObj.fs.isRegular(dstObj.mode) {
|
2015-08-31 22:05:51 +02:00
|
|
|
// It isn't a file
|
2016-06-12 16:06:02 +02:00
|
|
|
return nil, errors.New("can't move file onto non-file")
|
2015-08-31 22:05:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create destination
|
|
|
|
err = dstObj.mkdirAll()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the move
|
|
|
|
err = os.Rename(srcObj.path, dstObj.path)
|
2017-08-04 23:16:29 +02:00
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// race condition, source was deleted in the meantime
|
|
|
|
return nil, err
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
// not enough rights to write to dst
|
2015-08-31 22:05:51 +02:00
|
|
|
return nil, err
|
2017-08-04 23:16:29 +02:00
|
|
|
} else if err != nil {
|
|
|
|
// not quite clear, but probably trying to move a file across file system
|
|
|
|
// boundaries. Copying might still work.
|
2018-02-28 22:27:34 +01:00
|
|
|
fs.Debugf(src, "Can't move: %v: trying copy", err)
|
2017-08-04 23:16:29 +02:00
|
|
|
return nil, fs.ErrorCantMove
|
2015-08-31 22:05:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the info
|
|
|
|
err = dstObj.lstat()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return dstObj, nil
|
|
|
|
}
|
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
|
|
// using server side move operations.
|
2015-08-31 22:05:51 +02:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
//
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2015-11-07 12:14:46 +01:00
|
|
|
srcFs, ok := src.(*Fs)
|
2015-08-31 22:05:51 +02:00
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
2017-02-05 22:20:56 +01:00
|
|
|
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
|
|
|
|
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
|
2015-09-11 11:37:12 +02:00
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
// Check if destination exists
|
2017-02-05 22:20:56 +01:00
|
|
|
_, err := os.Lstat(dstPath)
|
2015-08-31 22:05:51 +02:00
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
// Create parent of destination
|
2018-08-31 22:10:36 +02:00
|
|
|
dstParentPath := filepath.Dir(dstPath)
|
2017-02-05 22:20:56 +01:00
|
|
|
err = os.MkdirAll(dstParentPath, 0777)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
// Do the move
|
2018-02-26 13:55:05 +01:00
|
|
|
err = os.Rename(srcPath, dstPath)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// race condition, source was deleted in the meantime
|
|
|
|
return err
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
// not enough rights to write to dst
|
|
|
|
return err
|
|
|
|
} else if err != nil {
|
|
|
|
// not quite clear, but probably trying to move directory across file system
|
|
|
|
// boundaries. Copying might still work.
|
2018-02-28 22:27:34 +01:00
|
|
|
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
|
2018-02-26 13:55:05 +01:00
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
|
|
|
return nil
|
2015-08-31 22:05:51 +02:00
|
|
|
}
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2018-01-18 21:27:52 +01:00
|
|
|
return hash.Supported
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Fs returns the parent Fs
|
2016-02-18 12:35:25 +01:00
|
|
|
func (o *Object) Fs() fs.Info {
|
2015-11-07 12:14:46 +01:00
|
|
|
return o.fs
|
2014-03-28 18:56:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return a string version
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) String() string {
|
2014-03-28 18:56:04 +01:00
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Remote returns the remote path
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) Remote() string {
|
2016-07-15 15:18:09 +02:00
|
|
|
return o.remote
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
// Hash returns the requested hash of a file as a lowercase hex string
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
2016-01-11 13:39:33 +01:00
|
|
|
// Check that the underlying file hasn't changed
|
2017-06-30 11:24:06 +02:00
|
|
|
oldtime := o.modTime
|
|
|
|
oldsize := o.size
|
2016-01-17 11:35:38 +01:00
|
|
|
err := o.lstat()
|
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
return "", errors.Wrap(err, "hash: failed to stat")
|
2016-01-17 11:35:38 +01:00
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Lock()
|
|
|
|
hashes := o.hashes
|
2019-05-11 23:21:37 +02:00
|
|
|
hashValue, hashFound := o.hashes[r]
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Unlock()
|
2016-01-11 13:39:33 +01:00
|
|
|
|
2019-05-11 23:21:37 +02:00
|
|
|
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil || !hashFound {
|
2018-09-13 00:54:30 +02:00
|
|
|
var in io.ReadCloser
|
|
|
|
|
|
|
|
if !o.translatedLink {
|
|
|
|
in, err = file.Open(o.path)
|
|
|
|
} else {
|
|
|
|
in, err = o.openTranslatedLink(0, -1)
|
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
return "", errors.Wrap(err, "hash: failed to open")
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
2019-05-11 23:21:37 +02:00
|
|
|
hashes, err = hash.StreamTypes(in, hash.NewHashSet(r))
|
2016-01-11 13:39:33 +01:00
|
|
|
closeErr := in.Close()
|
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
return "", errors.Wrap(err, "hash: failed to read")
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
|
|
|
if closeErr != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
return "", errors.Wrap(closeErr, "hash: failed to close")
|
2016-01-11 13:39:33 +01:00
|
|
|
}
|
2019-05-11 23:21:37 +02:00
|
|
|
hashValue = hashes[r]
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Lock()
|
2019-05-11 23:21:37 +02:00
|
|
|
if o.hashes == nil {
|
|
|
|
o.hashes = hashes
|
|
|
|
} else {
|
|
|
|
o.hashes[r] = hashValue
|
|
|
|
}
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Unlock()
|
2014-07-19 12:06:25 +02:00
|
|
|
}
|
2019-05-11 23:21:37 +02:00
|
|
|
return hashValue, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) Size() int64 {
|
2017-06-30 11:24:06 +02:00
|
|
|
return o.size
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
2017-06-30 11:24:06 +02:00
|
|
|
return o.modTime
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2019-01-27 20:14:55 +01:00
|
|
|
var err error
|
|
|
|
if o.translatedLink {
|
|
|
|
err = lChtimes(o.path, modTime, modTime)
|
|
|
|
} else {
|
|
|
|
err = os.Chtimes(o.path, modTime, modTime)
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
if err != nil {
|
2016-03-22 16:07:10 +01:00
|
|
|
return err
|
2014-07-24 23:51:34 +02:00
|
|
|
}
|
|
|
|
// Re-read metadata
|
2016-03-22 16:07:10 +01:00
|
|
|
return o.lstat()
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
// Storable returns a boolean showing if this object is storable
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) Storable() bool {
|
2016-10-18 16:24:29 +02:00
|
|
|
// Check for control characters in the remote name and show non storable
|
|
|
|
for _, c := range o.Remote() {
|
|
|
|
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
2017-02-11 21:19:44 +01:00
|
|
|
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
|
2016-10-18 16:24:29 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2017-06-30 11:24:06 +02:00
|
|
|
mode := o.mode
|
2018-09-13 00:54:30 +02:00
|
|
|
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
2018-05-14 19:06:57 +02:00
|
|
|
if !o.fs.opt.SkipSymlinks {
|
2017-07-21 12:15:58 +02:00
|
|
|
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
|
|
|
}
|
2017-01-29 14:43:20 +01:00
|
|
|
return false
|
|
|
|
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
2017-02-11 21:19:44 +01:00
|
|
|
fs.Logf(o, "Can't transfer non file/directory")
|
2012-12-26 13:23:58 +01:00
|
|
|
return false
|
|
|
|
} else if mode&os.ModeDir != 0 {
|
2017-02-09 12:01:20 +01:00
|
|
|
// fs.Debugf(o, "Skipping directory")
|
2012-12-26 13:23:58 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2014-07-19 12:06:25 +02:00
|
|
|
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
|
|
|
// object that is read
|
|
|
|
type localOpenFile struct {
|
2018-01-12 17:30:54 +01:00
|
|
|
o *Object // object that is open
|
|
|
|
in io.ReadCloser // handle we are wrapping
|
|
|
|
hash *hash.MultiHasher // currently accumulating hashes
|
2018-01-31 21:18:31 +01:00
|
|
|
fd *os.File // file object reference
|
2014-07-19 12:06:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
2018-05-14 19:06:57 +02:00
|
|
|
if !file.o.fs.opt.NoCheckUpdated {
|
2018-04-09 16:27:58 +02:00
|
|
|
// Check if file has the same size and modTime
|
|
|
|
fi, err := file.fd.Stat()
|
|
|
|
if err != nil {
|
|
|
|
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
|
|
|
}
|
|
|
|
if file.o.size != fi.Size() {
|
|
|
|
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
|
|
|
}
|
|
|
|
if !file.o.modTime.Equal(fi.ModTime()) {
|
|
|
|
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
|
|
|
}
|
2018-01-31 21:18:31 +01:00
|
|
|
}
|
|
|
|
|
2014-07-19 12:06:25 +02:00
|
|
|
n, err = file.in.Read(p)
|
|
|
|
if n > 0 {
|
|
|
|
// Hash routines never return an error
|
|
|
|
_, _ = file.hash.Write(p[:n])
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-06-19 14:49:34 +02:00
|
|
|
// Close the object and update the hashes
|
2014-07-19 12:06:25 +02:00
|
|
|
func (file *localOpenFile) Close() (err error) {
|
|
|
|
err = file.in.Close()
|
|
|
|
if err == nil {
|
2016-06-19 14:49:34 +02:00
|
|
|
if file.hash.Size() == file.o.Size() {
|
2018-03-08 17:31:44 +01:00
|
|
|
file.o.fs.objectHashesMu.Lock()
|
2016-06-19 14:49:34 +02:00
|
|
|
file.o.hashes = file.hash.Sums()
|
2018-03-08 17:31:44 +01:00
|
|
|
file.o.fs.objectHashesMu.Unlock()
|
2016-06-19 14:49:34 +02:00
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
|
|
|
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
|
|
|
// Read the link and return the destination it as the contents of the object
|
|
|
|
linkdst, err := os.Readlink(o.path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Open an object for read
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2018-01-27 11:07:17 +01:00
|
|
|
var offset, limit int64 = 0, -1
|
2018-01-18 21:27:52 +01:00
|
|
|
hashes := hash.Supported
|
2016-09-10 12:29:57 +02:00
|
|
|
for _, option := range options {
|
|
|
|
switch x := option.(type) {
|
|
|
|
case *fs.SeekOption:
|
2018-01-27 11:07:17 +01:00
|
|
|
offset = x.Offset
|
2018-01-20 20:37:38 +01:00
|
|
|
case *fs.RangeOption:
|
2018-01-22 20:44:00 +01:00
|
|
|
offset, limit = x.Decode(o.size)
|
2017-05-28 13:44:22 +02:00
|
|
|
case *fs.HashesOption:
|
|
|
|
hashes = x.Hashes
|
2016-09-10 12:29:57 +02:00
|
|
|
default:
|
|
|
|
if option.Mandatory() {
|
2017-02-09 12:01:20 +01:00
|
|
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
2016-09-10 12:29:57 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
// Handle a translated link
|
|
|
|
if o.translatedLink {
|
|
|
|
return o.openTranslatedLink(offset, limit)
|
|
|
|
}
|
|
|
|
|
2019-01-09 20:27:15 +01:00
|
|
|
fd, err := file.Open(o.path)
|
2014-07-19 12:06:25 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2018-01-22 20:44:00 +01:00
|
|
|
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
2016-09-10 12:29:57 +02:00
|
|
|
if offset != 0 {
|
|
|
|
// seek the object
|
2018-04-06 20:53:06 +02:00
|
|
|
_, err = fd.Seek(offset, io.SeekStart)
|
2016-09-10 12:29:57 +02:00
|
|
|
// don't attempt to make checksums
|
2018-01-20 20:37:38 +01:00
|
|
|
return wrappedFd, err
|
2016-09-10 12:29:57 +02:00
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
hash, err := hash.NewMultiHasherTypes(hashes)
|
2017-05-28 13:44:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
// Update the md5sum as we go along
|
|
|
|
in = &localOpenFile{
|
|
|
|
o: o,
|
2018-01-20 20:37:38 +01:00
|
|
|
in: wrappedFd,
|
2017-05-28 13:44:22 +02:00
|
|
|
hash: hash,
|
2018-01-31 21:18:31 +01:00
|
|
|
fd: fd,
|
2014-07-19 12:06:25 +02:00
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
return in, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
// mkdirAll makes all the directories needed to store the object
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) mkdirAll() error {
|
2018-08-31 22:10:36 +02:00
|
|
|
dir := filepath.Dir(o.path)
|
2015-08-31 22:05:51 +02:00
|
|
|
return os.MkdirAll(dir, 0777)
|
|
|
|
}
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
type nopWriterCloser struct {
|
|
|
|
*bytes.Buffer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (nwc nopWriterCloser) Close() error {
|
|
|
|
// noop
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
// Update the object from in with modTime and size
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
2018-09-13 00:54:30 +02:00
|
|
|
var out io.WriteCloser
|
|
|
|
|
2018-01-18 21:27:52 +01:00
|
|
|
hashes := hash.Supported
|
2017-05-28 13:44:22 +02:00
|
|
|
for _, option := range options {
|
|
|
|
switch x := option.(type) {
|
|
|
|
case *fs.HashesOption:
|
|
|
|
hashes = x.Hashes
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
err := o.mkdirAll()
|
2014-04-18 18:04:21 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
var symlinkData bytes.Buffer
|
|
|
|
// If the object is a regular file, create it.
|
|
|
|
// If it is a translated link, just read in the contents, and
|
|
|
|
// then create a symlink
|
|
|
|
if !o.translatedLink {
|
|
|
|
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Pre-allocate the file for performance reasons
|
|
|
|
err = preAllocate(src.Size(), f)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
|
|
|
}
|
|
|
|
out = f
|
|
|
|
} else {
|
|
|
|
out = nopWriterCloser{&symlinkData}
|
2018-08-19 12:38:26 +02:00
|
|
|
}
|
|
|
|
|
2016-02-18 12:35:25 +01:00
|
|
|
// Calculate the hash of the object we are reading as we go along
|
2018-01-12 17:30:54 +01:00
|
|
|
hash, err := hash.NewMultiHasherTypes(hashes)
|
2017-05-28 13:44:22 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
in = io.TeeReader(in, hash)
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
_, err = io.Copy(out, in)
|
2016-11-04 18:06:56 +01:00
|
|
|
closeErr := out.Close()
|
|
|
|
if err == nil {
|
|
|
|
err = closeErr
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
|
|
|
if o.translatedLink {
|
|
|
|
if err == nil {
|
2019-02-07 18:41:17 +01:00
|
|
|
// Remove any current symlink or file, if one exists
|
2018-09-13 00:54:30 +02:00
|
|
|
if _, err := os.Lstat(o.path); err == nil {
|
|
|
|
if removeErr := os.Remove(o.path); removeErr != nil {
|
|
|
|
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
|
|
|
return removeErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Use the contents for the copied object to create a symlink
|
|
|
|
err = os.Symlink(symlinkData.String(), o.path)
|
|
|
|
}
|
|
|
|
|
|
|
|
// only continue if symlink creation succeeded
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
if err != nil {
|
2017-02-11 21:19:44 +01:00
|
|
|
fs.Logf(o, "Removing partially written file on error: %v", err)
|
2016-11-04 18:06:56 +01:00
|
|
|
if removeErr := os.Remove(o.path); removeErr != nil {
|
2017-02-09 12:01:20 +01:00
|
|
|
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
2016-11-04 18:06:56 +01:00
|
|
|
}
|
2014-04-18 18:04:21 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
// All successful so update the hashes
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Lock()
|
2016-01-11 13:39:33 +01:00
|
|
|
o.hashes = hash.Sums()
|
2018-03-08 17:31:44 +01:00
|
|
|
o.fs.objectHashesMu.Unlock()
|
2014-07-19 12:06:25 +02:00
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
// Set the mtime
|
2019-06-17 10:34:30 +02:00
|
|
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
2016-03-22 16:07:10 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-07-19 12:34:44 +02:00
|
|
|
|
|
|
|
// ReRead info now that we have finished
|
|
|
|
return o.lstat()
|
2014-04-18 18:04:21 +02:00
|
|
|
}
|
|
|
|
|
2019-04-22 20:22:42 +02:00
|
|
|
// OpenWriterAt opens with a handle for random access writes
|
|
|
|
//
|
|
|
|
// Pass in the remote desired and the size if known.
|
|
|
|
//
|
|
|
|
// It truncates any existing object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
2019-04-22 20:22:42 +02:00
|
|
|
// Temporary Object under construction
|
|
|
|
o := f.newObject(remote, "")
|
|
|
|
|
|
|
|
err := o.mkdirAll()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if o.translatedLink {
|
|
|
|
return nil, errors.New("can't open a symlink for random writing")
|
|
|
|
}
|
|
|
|
|
|
|
|
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Pre-allocate the file for performance reasons
|
|
|
|
err = preAllocate(size, out)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-06-30 11:24:06 +02:00
|
|
|
// setMetadata sets the file info from the os.FileInfo passed in
|
|
|
|
func (o *Object) setMetadata(info os.FileInfo) {
|
2017-11-03 17:12:16 +01:00
|
|
|
// Don't overwrite the info if we don't need to
|
|
|
|
// this avoids upsetting the race detector
|
|
|
|
if o.size != info.Size() {
|
|
|
|
o.size = info.Size()
|
|
|
|
}
|
2017-11-05 10:02:23 +01:00
|
|
|
if !o.modTime.Equal(info.ModTime()) {
|
2017-11-03 17:12:16 +01:00
|
|
|
o.modTime = info.ModTime()
|
|
|
|
}
|
|
|
|
if o.mode != info.Mode() {
|
|
|
|
o.mode = info.Mode()
|
|
|
|
}
|
2017-06-30 11:24:06 +02:00
|
|
|
}
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
// Stat a Object into info
|
2015-11-07 12:14:46 +01:00
|
|
|
func (o *Object) lstat() error {
|
2017-01-29 14:43:20 +01:00
|
|
|
info, err := o.fs.lstat(o.path)
|
2017-06-30 11:24:06 +02:00
|
|
|
if err == nil {
|
|
|
|
o.setMetadata(info)
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2018-04-04 15:24:04 +02:00
|
|
|
return remove(o.path)
|
2012-12-26 13:23:58 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 00:16:47 +02:00
|
|
|
// cleanPathFragment cleans an OS path fragment which is part of a
|
|
|
|
// bigger path and not necessarily absolute
|
|
|
|
func cleanPathFragment(s string) string {
|
2016-05-30 20:44:15 +02:00
|
|
|
if s == "" {
|
|
|
|
return s
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
s = filepath.Clean(s)
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
s = strings.Replace(s, `/`, `\`, -1)
|
2016-05-30 20:44:15 +02:00
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
2016-08-19 00:16:47 +02:00
|
|
|
// cleanPath cleans and makes absolute the path passed in and returns
|
|
|
|
// an OS path.
|
|
|
|
//
|
|
|
|
// The input might be in OS form or rclone form or a mixture, but the
|
|
|
|
// output is in OS form.
|
2016-05-30 20:44:15 +02:00
|
|
|
//
|
2016-08-19 00:16:47 +02:00
|
|
|
// On windows it makes the path UNC also and replaces any characters
|
|
|
|
// Windows can't deal with with their replacements.
|
|
|
|
func (f *Fs) cleanPath(s string) string {
|
|
|
|
s = cleanPathFragment(s)
|
2016-05-30 20:44:15 +02:00
|
|
|
if runtime.GOOS == "windows" {
|
2015-09-11 11:37:12 +02:00
|
|
|
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
|
|
|
s2, err := filepath.Abs(s)
|
|
|
|
if err == nil {
|
|
|
|
s = s2
|
|
|
|
}
|
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
if !f.opt.NoUNC {
|
2016-08-19 00:16:47 +02:00
|
|
|
// Convert to UNC
|
|
|
|
s = uncPath(s)
|
2016-01-04 12:28:47 +01:00
|
|
|
}
|
2016-08-19 00:16:47 +02:00
|
|
|
s = cleanWindowsName(f, s)
|
|
|
|
} else {
|
|
|
|
if !filepath.IsAbs(s) {
|
|
|
|
s2, err := filepath.Abs(s)
|
|
|
|
if err == nil {
|
|
|
|
s = s2
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2015-09-17 11:50:41 +02:00
|
|
|
// Pattern to match a windows absolute path: "c:\" and similar
|
|
|
|
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
2015-09-11 11:37:12 +02:00
|
|
|
|
|
|
|
// uncPath converts an absolute Windows path
|
|
|
|
// to a UNC long path.
|
|
|
|
func uncPath(s string) string {
|
|
|
|
// UNC can NOT use "/", so convert all to "\"
|
|
|
|
s = strings.Replace(s, `/`, `\`, -1)
|
|
|
|
|
|
|
|
// If prefix is "\\", we already have a UNC path or server.
|
|
|
|
if strings.HasPrefix(s, `\\`) {
|
|
|
|
// If already long path, just keep it
|
|
|
|
if strings.HasPrefix(s, `\\?\`) {
|
|
|
|
return s
|
|
|
|
}
|
2015-09-17 11:50:41 +02:00
|
|
|
|
|
|
|
// Trim "\\" from path and add UNC prefix.
|
2015-09-11 11:37:12 +02:00
|
|
|
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
|
|
|
}
|
2015-09-17 11:50:41 +02:00
|
|
|
if isAbsWinDrive.MatchString(s) {
|
2015-09-11 11:37:12 +02:00
|
|
|
return `\\?\` + s
|
|
|
|
}
|
|
|
|
return s
|
|
|
|
}
|
|
|
|
|
2016-08-19 00:16:47 +02:00
|
|
|
// cleanWindowsName will clean invalid Windows characters replacing them with _
|
2015-11-07 12:14:46 +01:00
|
|
|
func cleanWindowsName(f *Fs, name string) string {
|
2015-10-16 11:49:00 +02:00
|
|
|
original := name
|
|
|
|
var name2 string
|
|
|
|
if strings.HasPrefix(name, `\\?\`) {
|
|
|
|
name2 = `\\?\`
|
|
|
|
name = strings.TrimPrefix(name, `\\?\`)
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(name, `//?/`) {
|
|
|
|
name2 = `//?/`
|
|
|
|
name = strings.TrimPrefix(name, `//?/`)
|
|
|
|
}
|
|
|
|
// Colon is allowed as part of a drive name X:\
|
|
|
|
colonAt := strings.Index(name, ":")
|
|
|
|
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
|
|
|
// Copy to name2, which is unfiltered
|
|
|
|
name2 += name[0 : colonAt+1]
|
|
|
|
name = name[colonAt+1:]
|
|
|
|
}
|
|
|
|
|
|
|
|
name2 += strings.Map(func(r rune) rune {
|
|
|
|
switch r {
|
|
|
|
case '<', '>', '"', '|', '?', '*', ':':
|
|
|
|
return '_'
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}, name)
|
|
|
|
|
|
|
|
if name2 != original && f != nil {
|
2016-03-10 15:51:56 +01:00
|
|
|
f.wmu.Lock()
|
2015-10-16 11:49:00 +02:00
|
|
|
if _, ok := f.warned[name]; !ok {
|
2017-02-11 21:19:44 +01:00
|
|
|
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
|
2015-10-16 11:49:00 +02:00
|
|
|
f.warned[name] = struct{}{}
|
|
|
|
}
|
2016-03-10 15:51:56 +01:00
|
|
|
f.wmu.Unlock()
|
2015-10-16 11:49:00 +02:00
|
|
|
}
|
|
|
|
return name2
|
|
|
|
}
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
// Check the interfaces are satisfied
|
2015-11-07 12:14:46 +01:00
|
|
|
var (
|
2019-04-22 20:22:42 +02:00
|
|
|
_ fs.Fs = &Fs{}
|
|
|
|
_ fs.Purger = &Fs{}
|
|
|
|
_ fs.PutStreamer = &Fs{}
|
|
|
|
_ fs.Mover = &Fs{}
|
|
|
|
_ fs.DirMover = &Fs{}
|
|
|
|
_ fs.OpenWriterAter = &Fs{}
|
|
|
|
_ fs.Object = &Object{}
|
2015-11-07 12:14:46 +01:00
|
|
|
)
|