2015-09-22 19:47:16 +02:00
|
|
|
|
// Package local provides a filesystem interface
|
2013-06-27 21:13:07 +02:00
|
|
|
|
package local
|
2012-12-26 13:23:58 +01:00
|
|
|
|
|
|
|
|
|
import (
|
2018-09-13 00:54:30 +02:00
|
|
|
|
"bytes"
|
2019-06-17 10:34:30 +02:00
|
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
|
"errors"
|
2012-12-26 13:23:58 +01:00
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
|
|
|
|
"os"
|
2016-04-21 21:06:21 +02:00
|
|
|
|
"path"
|
2012-12-26 13:23:58 +01:00
|
|
|
|
"path/filepath"
|
2015-09-22 19:47:16 +02:00
|
|
|
|
"runtime"
|
|
|
|
|
"strings"
|
2013-01-19 00:21:02 +01:00
|
|
|
|
"sync"
|
2023-08-18 16:10:13 +02:00
|
|
|
|
"sync/atomic"
|
2012-12-26 13:23:58 +01:00
|
|
|
|
"time"
|
2015-05-21 19:40:16 +02:00
|
|
|
|
"unicode/utf8"
|
2014-03-15 17:06:11 +01:00
|
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
2020-01-14 18:33:35 +01:00
|
|
|
|
"github.com/rclone/rclone/fs/config"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
2022-08-11 13:20:34 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/filter"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2020-01-14 18:33:35 +01:00
|
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/lib/file"
|
|
|
|
|
"github.com/rclone/rclone/lib/readers"
|
2020-09-30 17:24:50 +02:00
|
|
|
|
"golang.org/x/text/unicode/norm"
|
2012-12-26 13:23:58 +01:00
|
|
|
|
)
|
|
|
|
|
|
2016-11-03 12:51:36 +01:00
|
|
|
|
// Constants
|
2019-05-12 19:32:04 +02:00
|
|
|
|
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
|
|
|
|
const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link
|
|
|
|
|
const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly
|
2016-11-02 08:26:09 +01:00
|
|
|
|
|
2023-12-08 16:26:53 +01:00
|
|
|
|
// timeType allows the user to choose what exactly ModTime() returns
|
|
|
|
|
type timeType = fs.Enum[timeTypeChoices]
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
mTime timeType = iota
|
|
|
|
|
aTime
|
|
|
|
|
bTime
|
|
|
|
|
cTime
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type timeTypeChoices struct{}
|
|
|
|
|
|
|
|
|
|
func (timeTypeChoices) Choices() []string {
|
|
|
|
|
return []string{
|
|
|
|
|
mTime: "mtime",
|
|
|
|
|
aTime: "atime",
|
|
|
|
|
bTime: "btime",
|
|
|
|
|
cTime: "ctime",
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-27 21:13:07 +02:00
|
|
|
|
// Register with Fs
|
|
|
|
|
func init() {
|
2016-02-18 12:35:25 +01:00
|
|
|
|
fsi := &fs.RegInfo{
|
2016-02-15 19:11:53 +01:00
|
|
|
|
Name: "local",
|
|
|
|
|
Description: "Local Disk",
|
|
|
|
|
NewFs: NewFs,
|
2020-04-28 14:01:55 +02:00
|
|
|
|
CommandHelp: commandHelp,
|
2022-05-24 19:06:16 +02:00
|
|
|
|
MetadataInfo: &fs.MetadataInfo{
|
|
|
|
|
System: systemMetadataInfo,
|
|
|
|
|
Help: `Depending on which OS is in use the local backend may return only some
|
|
|
|
|
of the system metadata. Setting system metadata is supported on all
|
|
|
|
|
OSes but setting user metadata is only supported on linux, freebsd,
|
|
|
|
|
netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
|
|
|
|
([see pkg/attrs#47](https://github.com/pkg/xattr/issues/47)).
|
|
|
|
|
|
|
|
|
|
User metadata is stored as extended attributes (which may not be
|
|
|
|
|
supported by all file systems) under the "user.*" prefix.
|
2024-02-06 17:02:03 +01:00
|
|
|
|
|
|
|
|
|
Metadata is supported on files and directories.
|
2022-05-24 19:06:16 +02:00
|
|
|
|
`,
|
|
|
|
|
},
|
2016-02-29 17:57:23 +01:00
|
|
|
|
Options: []fs.Option{{
|
2021-03-14 22:14:44 +01:00
|
|
|
|
Name: "nounc",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "Disable UNC (long path names) conversion on Windows.",
|
2022-06-16 19:08:12 +02:00
|
|
|
|
Default: false,
|
2021-03-14 22:14:44 +01:00
|
|
|
|
Advanced: runtime.GOOS != "windows",
|
2016-01-04 12:28:47 +01:00
|
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
|
Value: "true",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "Disables long file names.",
|
2016-01-04 12:28:47 +01:00
|
|
|
|
}},
|
2018-05-14 19:06:57 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "copy_links",
|
|
|
|
|
Help: "Follow symlinks and copy the pointed to item.",
|
|
|
|
|
Default: false,
|
|
|
|
|
NoPrefix: true,
|
|
|
|
|
ShortOpt: "L",
|
|
|
|
|
Advanced: true,
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "links",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension.",
|
2018-09-13 00:54:30 +02:00
|
|
|
|
Default: false,
|
|
|
|
|
NoPrefix: true,
|
|
|
|
|
ShortOpt: "l",
|
|
|
|
|
Advanced: true,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
|
Name: "skip_links",
|
|
|
|
|
Help: `Don't warn about skipped symlinks.
|
2021-08-16 11:30:01 +02:00
|
|
|
|
|
2018-10-01 19:36:15 +02:00
|
|
|
|
This flag disables warning messages on skipped symlinks or junction
|
|
|
|
|
points, as you explicitly acknowledge that they should be skipped.`,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
Default: false,
|
|
|
|
|
NoPrefix: true,
|
|
|
|
|
Advanced: true,
|
2021-01-25 22:25:40 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: "zero_size_links",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Assume the Stat size of links is zero (and read them instead) (deprecated).
|
2021-01-25 22:25:40 +01:00
|
|
|
|
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Rclone used to use the Stat size of links as the link size, but this fails in quite a few places:
|
2021-01-25 22:25:40 +01:00
|
|
|
|
|
2021-05-01 13:39:29 +02:00
|
|
|
|
- Windows
|
|
|
|
|
- On some virtual filesystems (such ash LucidLink)
|
|
|
|
|
- Android
|
2021-01-25 22:25:40 +01:00
|
|
|
|
|
2021-08-16 11:30:01 +02:00
|
|
|
|
So rclone now always reads the link.
|
2021-05-01 13:39:29 +02:00
|
|
|
|
`,
|
2021-01-25 22:25:40 +01:00
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
}, {
|
2020-09-30 17:24:50 +02:00
|
|
|
|
Name: "unicode_normalization",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Apply unicode NFC normalization to paths and filenames.
|
2018-10-01 19:36:15 +02:00
|
|
|
|
|
2020-09-30 17:24:50 +02:00
|
|
|
|
This flag can be used to normalize file names into unicode NFC form
|
|
|
|
|
that are read from the local filesystem.
|
|
|
|
|
|
|
|
|
|
Rclone does not normally touch the encoding of file names it reads from
|
|
|
|
|
the file system.
|
|
|
|
|
|
|
|
|
|
This can be useful when using macOS as it normally provides decomposed (NFD)
|
|
|
|
|
unicode which in some language (eg Korean) doesn't display properly on
|
|
|
|
|
some OSes.
|
|
|
|
|
|
|
|
|
|
Note that rclone compares filenames with unicode normalization in the sync
|
|
|
|
|
routine so this flag shouldn't normally be used.`,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
}, {
|
2018-10-01 19:36:15 +02:00
|
|
|
|
Name: "no_check_updated",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Don't check to see if the files change during upload.
|
2018-10-01 19:36:15 +02:00
|
|
|
|
|
|
|
|
|
Normally rclone checks the size and modification time of files as they
|
2022-11-07 08:59:40 +01:00
|
|
|
|
are being uploaded and aborts with a message which starts "can't copy -
|
|
|
|
|
source file is being updated" if the file changes during upload.
|
2018-10-01 19:36:15 +02:00
|
|
|
|
|
2020-10-13 23:49:58 +02:00
|
|
|
|
However on some file systems this modification time check may fail (e.g.
|
2019-07-28 19:47:38 +02:00
|
|
|
|
[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this
|
2020-05-20 10:19:54 +02:00
|
|
|
|
check can be disabled with this flag.
|
|
|
|
|
|
|
|
|
|
If this flag is set, rclone will use its best efforts to transfer a
|
|
|
|
|
file which is being updated. If the file is only having things
|
2020-10-13 23:49:58 +02:00
|
|
|
|
appended to it (e.g. a log) then rclone will transfer the log file with
|
2020-05-20 10:19:54 +02:00
|
|
|
|
the size it had the first time rclone saw it.
|
|
|
|
|
|
|
|
|
|
If the file is being modified throughout (not just appended to) then
|
|
|
|
|
the transfer may fail with a hash check failure.
|
|
|
|
|
|
|
|
|
|
In detail, once the file has had stat() called on it for the first
|
|
|
|
|
time we:
|
|
|
|
|
|
|
|
|
|
- Only transfer the size that stat gave
|
|
|
|
|
- Only checksum the size that stat gave
|
|
|
|
|
- Don't update the stat info for the file
|
|
|
|
|
|
2023-10-14 13:39:48 +02:00
|
|
|
|
**NB** do not use this flag on a Windows Volume Shadow (VSS). For some
|
|
|
|
|
unknown reason, files in a VSS sometimes show different sizes from the
|
|
|
|
|
directory listing (where the initial stat value comes from on Windows)
|
|
|
|
|
and when stat is called on them directly. Other copy tools always use
|
|
|
|
|
the direct stat value and setting this flag will disable that.
|
2020-05-20 10:19:54 +02:00
|
|
|
|
`,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
}, {
|
|
|
|
|
Name: "one_file_system",
|
|
|
|
|
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
|
|
|
|
Default: false,
|
|
|
|
|
NoPrefix: true,
|
|
|
|
|
ShortOpt: "x",
|
|
|
|
|
Advanced: true,
|
2019-06-14 15:41:54 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "case_sensitive",
|
|
|
|
|
Help: `Force the filesystem to report itself as case sensitive.
|
|
|
|
|
|
|
|
|
|
Normally the local backend declares itself as case insensitive on
|
|
|
|
|
Windows/macOS and case sensitive for everything else. Use this flag
|
|
|
|
|
to override the default choice.`,
|
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
}, {
|
|
|
|
|
Name: "case_insensitive",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Force the filesystem to report itself as case insensitive.
|
2019-06-14 15:41:54 +02:00
|
|
|
|
|
|
|
|
|
Normally the local backend declares itself as case insensitive on
|
|
|
|
|
Windows/macOS and case sensitive for everything else. Use this flag
|
|
|
|
|
to override the default choice.`,
|
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
2021-01-31 21:25:24 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: "no_preallocate",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Disable preallocation of disk space for transferred files.
|
2021-01-31 21:25:24 +01:00
|
|
|
|
|
|
|
|
|
Preallocation of disk space helps prevent filesystem fragmentation.
|
|
|
|
|
However, some virtual filesystem layers (such as Google Drive File
|
|
|
|
|
Stream) may incorrectly set the actual file size equal to the
|
|
|
|
|
preallocated space, causing checksum and file size checks to fail.
|
|
|
|
|
Use this flag to disable preallocation.`,
|
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
2020-05-19 11:16:43 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "no_sparse",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Disable sparse files for multi-thread downloads.
|
2020-05-19 11:16:43 +02:00
|
|
|
|
|
|
|
|
|
On Windows platforms rclone will make sparse files when doing
|
|
|
|
|
multi-thread downloads. This avoids long pauses on large files where
|
|
|
|
|
the OS zeros the file. However sparse files may be undesirable as they
|
|
|
|
|
cause disk fragmentation and can be slow to work with.`,
|
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
2020-07-30 17:43:17 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "no_set_modtime",
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: `Disable setting modtime.
|
2020-07-30 17:43:17 +02:00
|
|
|
|
|
|
|
|
|
Normally rclone updates modification time of files after they are done
|
|
|
|
|
uploading. This can cause permissions issues on Linux platforms when
|
|
|
|
|
the user rclone is running as does not own the file uploaded, such as
|
|
|
|
|
when copying to a CIFS mount owned by another user. If this option is
|
|
|
|
|
enabled, rclone will no longer update the modtime after copying a file.`,
|
|
|
|
|
Default: false,
|
|
|
|
|
Advanced: true,
|
2023-12-08 16:26:53 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: "time_type",
|
|
|
|
|
Help: `Set what kind of time is returned.
|
|
|
|
|
|
|
|
|
|
Normally rclone does all operations on the mtime or Modification time.
|
|
|
|
|
|
|
|
|
|
If you set this flag then rclone will return the Modified time as whatever
|
|
|
|
|
you set here. So if you use "rclone lsl --local-time-type ctime" then
|
|
|
|
|
you will see ctimes in the listing.
|
|
|
|
|
|
|
|
|
|
If the OS doesn't support returning the time_type specified then rclone
|
|
|
|
|
will silently replace it with the modification time which all OSes support.
|
|
|
|
|
|
|
|
|
|
- mtime is supported by all OSes
|
|
|
|
|
- atime is supported on all OSes except: plan9, js
|
|
|
|
|
- btime is only supported on: Windows, macOS, freebsd, netbsd
|
|
|
|
|
- ctime is supported on all Oses except: Windows, plan9, js
|
|
|
|
|
|
|
|
|
|
Note that setting the time will still set the modified time so this is
|
|
|
|
|
only useful for reading.
|
|
|
|
|
`,
|
|
|
|
|
Default: mTime,
|
|
|
|
|
Advanced: true,
|
|
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
|
Value: mTime.String(),
|
|
|
|
|
Help: "The last modification time.",
|
|
|
|
|
}, {
|
|
|
|
|
Value: aTime.String(),
|
|
|
|
|
Help: "The last access time.",
|
|
|
|
|
}, {
|
|
|
|
|
Value: bTime.String(),
|
|
|
|
|
Help: "The creation time.",
|
|
|
|
|
}, {
|
|
|
|
|
Value: cTime.String(),
|
|
|
|
|
Help: "The last status change time.",
|
|
|
|
|
}},
|
2020-01-14 18:33:35 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
|
Advanced: true,
|
2021-05-28 13:34:29 +02:00
|
|
|
|
Default: encoder.OS,
|
2016-01-04 12:28:47 +01:00
|
|
|
|
}},
|
|
|
|
|
}
|
|
|
|
|
fs.Register(fsi)
|
2013-06-27 21:13:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
|
// Options defines the configuration for this backend
|
|
|
|
|
type Options struct {
|
2020-01-14 18:33:35 +01:00
|
|
|
|
FollowSymlinks bool `config:"copy_links"`
|
|
|
|
|
TranslateSymlinks bool `config:"links"`
|
|
|
|
|
SkipSymlinks bool `config:"skip_links"`
|
2020-09-30 17:24:50 +02:00
|
|
|
|
UTFNorm bool `config:"unicode_normalization"`
|
2020-01-14 18:33:35 +01:00
|
|
|
|
NoCheckUpdated bool `config:"no_check_updated"`
|
|
|
|
|
NoUNC bool `config:"nounc"`
|
|
|
|
|
OneFileSystem bool `config:"one_file_system"`
|
|
|
|
|
CaseSensitive bool `config:"case_sensitive"`
|
|
|
|
|
CaseInsensitive bool `config:"case_insensitive"`
|
2021-01-31 21:25:24 +01:00
|
|
|
|
NoPreAllocate bool `config:"no_preallocate"`
|
2020-05-19 11:16:43 +02:00
|
|
|
|
NoSparse bool `config:"no_sparse"`
|
2020-07-30 17:43:17 +02:00
|
|
|
|
NoSetModTime bool `config:"no_set_modtime"`
|
2023-12-08 16:26:53 +01:00
|
|
|
|
TimeType timeType `config:"time_type"`
|
2020-01-14 18:33:35 +01:00
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2018-05-14 19:06:57 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// Fs represents a local filesystem rooted at root
|
|
|
|
|
type Fs struct {
|
2022-08-08 11:03:25 +02:00
|
|
|
|
name string // the name of the remote
|
|
|
|
|
root string // The root directory (OS path)
|
|
|
|
|
opt Options // parsed config options
|
|
|
|
|
features *fs.Features // optional features
|
|
|
|
|
dev uint64 // device number of root node
|
|
|
|
|
precisionOk sync.Once // Whether we need to read the precision
|
|
|
|
|
precision time.Duration // precision of local filesystem
|
|
|
|
|
warnedMu sync.Mutex // used for locking access to 'warned'.
|
|
|
|
|
warned map[string]struct{} // whether we have warned about this string
|
2023-08-18 16:10:13 +02:00
|
|
|
|
xattrSupported atomic.Int32 // whether xattrs are supported
|
2018-11-02 13:12:51 +01:00
|
|
|
|
|
2017-01-29 14:43:20 +01:00
|
|
|
|
// do os.Lstat or os.Stat
|
2020-06-21 14:02:46 +02:00
|
|
|
|
lstat func(name string) (os.FileInfo, error)
|
|
|
|
|
objectMetaMu sync.RWMutex // global lock for Object metadata
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// Object represents a local filesystem object
|
|
|
|
|
type Object struct {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
fs *Fs // The Fs this object is part of
|
|
|
|
|
remote string // The remote path (encoded path)
|
|
|
|
|
path string // The local path (OS path)
|
|
|
|
|
// When using these items the fs.objectMetaMu must be held
|
|
|
|
|
size int64 // file metadata - always present
|
|
|
|
|
mode os.FileMode
|
|
|
|
|
modTime time.Time
|
|
|
|
|
hashes map[hash.Type]string // Hashes
|
|
|
|
|
// these are read only and don't need the mutex held
|
|
|
|
|
translatedLink bool // Is this object a translated link
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 17:02:03 +01:00
|
|
|
|
// Directory represents a local filesystem directory
|
|
|
|
|
type Directory struct {
|
|
|
|
|
Object
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// ------------------------------------------------------------
|
2012-12-29 12:35:41 +01:00
|
|
|
|
|
2023-04-03 18:14:04 +02:00
|
|
|
|
var (
|
|
|
|
|
errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links")
|
|
|
|
|
errLinksNeedsSuffix = errors.New("need \"" + linkSuffix + "\" suffix to refer to symlink when using -l/--links")
|
|
|
|
|
)
|
2019-01-27 20:28:57 +01:00
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// NewFs constructs an Fs from the path
|
2020-11-05 16:18:51 +01:00
|
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-05-14 19:06:57 +02:00
|
|
|
|
// Parse config into Options struct
|
|
|
|
|
opt := new(Options)
|
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2019-01-27 20:28:57 +01:00
|
|
|
|
if opt.TranslateSymlinks && opt.FollowSymlinks {
|
|
|
|
|
return nil, errLinksAndCopyLinks
|
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
f := &Fs{
|
2018-11-02 13:12:51 +01:00
|
|
|
|
name: name,
|
|
|
|
|
opt: *opt,
|
|
|
|
|
warned: make(map[string]struct{}),
|
|
|
|
|
dev: devUnset,
|
|
|
|
|
lstat: os.Lstat,
|
|
|
|
|
}
|
2022-08-08 11:03:25 +02:00
|
|
|
|
if xattrSupported {
|
2023-08-18 16:10:13 +02:00
|
|
|
|
f.xattrSupported.Store(1)
|
2022-08-08 11:03:25 +02:00
|
|
|
|
}
|
2020-01-14 18:33:35 +01:00
|
|
|
|
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
2017-08-09 16:27:43 +02:00
|
|
|
|
f.features = (&fs.Features{
|
2024-02-06 17:02:03 +01:00
|
|
|
|
CaseInsensitive: f.caseInsensitive(),
|
|
|
|
|
CanHaveEmptyDirectories: true,
|
|
|
|
|
IsLocal: true,
|
|
|
|
|
SlowHash: true,
|
|
|
|
|
ReadMetadata: true,
|
|
|
|
|
WriteMetadata: true,
|
|
|
|
|
ReadDirMetadata: true,
|
|
|
|
|
WriteDirMetadata: true,
|
|
|
|
|
WriteDirSetModTime: true,
|
|
|
|
|
UserDirMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
|
|
|
|
DirModTimeUpdatesOnWrite: true,
|
|
|
|
|
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
|
|
|
|
FilterAware: true,
|
|
|
|
|
PartialUploads: true,
|
2020-11-05 17:00:40 +01:00
|
|
|
|
}).Fill(ctx, f)
|
2018-05-14 19:06:57 +02:00
|
|
|
|
if opt.FollowSymlinks {
|
2017-01-29 14:43:20 +01:00
|
|
|
|
f.lstat = os.Stat
|
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
|
2014-05-05 20:52:52 +02:00
|
|
|
|
// Check to see if this points to a file
|
2017-01-29 14:43:20 +01:00
|
|
|
|
fi, err := f.lstat(f.root)
|
2016-11-03 12:51:36 +01:00
|
|
|
|
if err == nil {
|
2018-05-14 19:06:57 +02:00
|
|
|
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
2016-11-03 12:51:36 +01:00
|
|
|
|
}
|
2023-04-03 18:14:04 +02:00
|
|
|
|
// Check to see if this is a .rclonelink if not found
|
|
|
|
|
hasLinkSuffix := strings.HasSuffix(f.root, linkSuffix)
|
|
|
|
|
if hasLinkSuffix && opt.TranslateSymlinks && os.IsNotExist(err) {
|
|
|
|
|
fi, err = f.lstat(strings.TrimSuffix(f.root, linkSuffix))
|
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
if err == nil && f.isRegular(fi.Mode()) {
|
2023-04-03 18:14:04 +02:00
|
|
|
|
// Handle the odd case, that a symlink was specified by name without the link suffix
|
|
|
|
|
if !hasLinkSuffix && opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
|
|
|
|
return nil, errLinksNeedsSuffix
|
|
|
|
|
}
|
2014-05-05 20:52:52 +02:00
|
|
|
|
// It is a file, so use the parent as the root
|
2018-08-31 22:10:36 +02:00
|
|
|
|
f.root = filepath.Dir(f.root)
|
2016-06-21 19:01:53 +02:00
|
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
|
return f, fs.ErrorIsFile
|
2014-05-05 20:52:52 +02:00
|
|
|
|
}
|
2012-12-29 12:35:41 +01:00
|
|
|
|
return f, nil
|
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
// Determine whether a file is a 'regular' file,
|
|
|
|
|
// Symlinks are regular files, only if the TranslateSymlink
|
|
|
|
|
// option is in-effect
|
|
|
|
|
func (f *Fs) isRegular(mode os.FileMode) bool {
|
|
|
|
|
if !f.opt.TranslateSymlinks {
|
|
|
|
|
return mode.IsRegular()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// fi.Mode().IsRegular() tests that all mode bits are zero
|
|
|
|
|
// Since symlinks are accepted, test that all other bits are zero,
|
|
|
|
|
// except the symlink bit
|
|
|
|
|
return mode&os.ModeType&^os.ModeSymlink == 0
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Name of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Name() string {
|
2015-08-22 17:53:11 +02:00
|
|
|
|
return f.name
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Root of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Root() string {
|
2020-01-14 18:33:35 +01:00
|
|
|
|
return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root))
|
2015-09-01 21:45:27 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// String converts this Fs to a string
|
|
|
|
|
func (f *Fs) String() string {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
return fmt.Sprintf("Local file system at %s", f.Root())
|
2012-12-31 17:40:34 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-13 18:21:47 +01:00
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
|
return f.features
|
|
|
|
|
}
|
|
|
|
|
|
2019-02-07 18:41:17 +01:00
|
|
|
|
// caseInsensitive returns whether the remote is case insensitive or not
|
2017-01-13 18:21:47 +01:00
|
|
|
|
func (f *Fs) caseInsensitive() bool {
|
2019-06-14 15:41:54 +02:00
|
|
|
|
if f.opt.CaseSensitive {
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
if f.opt.CaseInsensitive {
|
|
|
|
|
return true
|
|
|
|
|
}
|
2017-01-13 18:21:47 +01:00
|
|
|
|
// FIXME not entirely accurate since you can have case
|
2019-02-07 18:41:17 +01:00
|
|
|
|
// sensitive Fses on darwin and case insensitive Fses on linux.
|
2017-01-13 18:21:47 +01:00
|
|
|
|
// Should probably check but that would involve creating a
|
|
|
|
|
// file in the remote to be most accurate which probably isn't
|
|
|
|
|
// desirable.
|
|
|
|
|
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
// translateLink checks whether the remote is a translated link
|
|
|
|
|
// and returns a new path, removing the suffix as needed,
|
|
|
|
|
// It also returns whether this is a translated link at all
|
|
|
|
|
//
|
2018-11-02 13:12:51 +01:00
|
|
|
|
// for regular files, localPath is returned unchanged
|
|
|
|
|
func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
isTranslatedLink = strings.HasSuffix(remote, linkSuffix)
|
2018-11-02 13:12:51 +01:00
|
|
|
|
newLocalPath = strings.TrimSuffix(localPath, linkSuffix)
|
|
|
|
|
return newLocalPath, isTranslatedLink
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// newObject makes a half completed Object
|
2018-11-02 13:12:51 +01:00
|
|
|
|
func (f *Fs) newObject(remote string) *Object {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
translatedLink := false
|
2018-11-02 13:12:51 +01:00
|
|
|
|
localPath := f.localPath(remote)
|
2018-09-13 00:54:30 +02:00
|
|
|
|
|
|
|
|
|
if f.opt.TranslateSymlinks {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
// Possibly receive a new name for localPath
|
|
|
|
|
localPath, translatedLink = translateLink(remote, localPath)
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
return &Object{
|
2018-09-13 00:54:30 +02:00
|
|
|
|
fs: f,
|
|
|
|
|
remote: remote,
|
2018-11-02 13:12:51 +01:00
|
|
|
|
path: localPath,
|
2018-09-13 00:54:30 +02:00
|
|
|
|
translatedLink: translatedLink,
|
2015-11-07 12:14:46 +01:00
|
|
|
|
}
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Return an Object from a path
|
2012-12-26 13:23:58 +01:00
|
|
|
|
//
|
|
|
|
|
// May return nil if an error occurred
|
2018-11-02 13:12:51 +01:00
|
|
|
|
func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) {
|
|
|
|
|
o := f.newObject(remote)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
if info != nil {
|
2017-06-30 11:24:06 +02:00
|
|
|
|
o.setMetadata(info)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
} else {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
err := o.lstat()
|
2012-12-26 13:23:58 +01:00
|
|
|
|
if err != nil {
|
2016-06-25 22:23:20 +02:00
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
2018-01-16 21:00:16 +01:00
|
|
|
|
if os.IsPermission(err) {
|
|
|
|
|
return nil, fs.ErrorPermissionDenied
|
|
|
|
|
}
|
2016-06-25 22:23:20 +02:00
|
|
|
|
return nil, err
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
2019-02-07 18:41:17 +01:00
|
|
|
|
// Handle the odd case, that a symlink was specified by name without the link suffix
|
2018-09-13 00:54:30 +02:00
|
|
|
|
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
2017-06-30 11:24:06 +02:00
|
|
|
|
if o.mode.IsDir() {
|
2021-09-06 14:54:08 +02:00
|
|
|
|
return nil, fs.ErrorIsDir
|
2017-02-25 12:09:57 +01:00
|
|
|
|
}
|
2016-06-25 22:23:20 +02:00
|
|
|
|
return o, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:23:20 +02:00
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
|
// it returns the error ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
return f.newObjectWithInfo(remote, nil)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 17:02:03 +01:00
|
|
|
|
// Create new directory object from the info passed in
|
|
|
|
|
func (f *Fs) newDirectory(dir string, fi os.FileInfo) *Directory {
|
|
|
|
|
o := f.newObject(dir)
|
|
|
|
|
o.setMetadata(fi)
|
|
|
|
|
return &Directory{
|
|
|
|
|
Object: *o,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
|
// complete directory.
|
|
|
|
|
//
|
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
|
// trailing slashes.
|
|
|
|
|
//
|
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
|
// found.
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2022-08-11 13:20:34 +02:00
|
|
|
|
filter, useFilter := filter.GetConfig(ctx), filter.GetUseFilter(ctx)
|
|
|
|
|
|
2018-11-02 13:12:51 +01:00
|
|
|
|
fsDirPath := f.localPath(dir)
|
2017-06-11 23:43:31 +02:00
|
|
|
|
_, err = os.Stat(fsDirPath)
|
2016-04-21 21:06:21 +02:00
|
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
return nil, fs.ErrorDirNotFound
|
2016-04-21 21:06:21 +02:00
|
|
|
|
}
|
2016-11-02 08:26:09 +01:00
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
|
fd, err := os.Open(fsDirPath)
|
|
|
|
|
if err != nil {
|
2019-05-12 19:32:04 +02:00
|
|
|
|
isPerm := os.IsPermission(err)
|
2021-11-04 11:12:57 +01:00
|
|
|
|
err = fmt.Errorf("failed to open directory %q: %w", dir, err)
|
2019-05-12 19:32:04 +02:00
|
|
|
|
fs.Errorf(dir, "%v", err)
|
|
|
|
|
if isPerm {
|
2019-11-18 15:13:02 +01:00
|
|
|
|
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err))
|
2019-05-12 19:32:04 +02:00
|
|
|
|
err = nil // ignore error but fail sync
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
2017-06-11 23:43:31 +02:00
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
|
defer func() {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
cerr := fd.Close()
|
|
|
|
|
if cerr != nil && err == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
err = fmt.Errorf("failed to close directory %q:: %w", dir, cerr)
|
2016-04-21 21:06:21 +02:00
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
for {
|
2019-05-12 19:32:04 +02:00
|
|
|
|
var fis []os.FileInfo
|
|
|
|
|
if useReadDir {
|
|
|
|
|
// Windows and Plan9 read the directory entries with the stat information in which
|
|
|
|
|
// shouldn't fail because of unreadable entries.
|
|
|
|
|
fis, err = fd.Readdir(1024)
|
|
|
|
|
if err == io.EOF && len(fis) == 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// For other OSes we read the names only (which shouldn't fail) then stat the
|
|
|
|
|
// individual ourselves so we can log errors but not fail the directory read.
|
|
|
|
|
var names []string
|
|
|
|
|
names, err = fd.Readdirnames(1024)
|
|
|
|
|
if err == io.EOF && len(names) == 0 {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if err == nil {
|
|
|
|
|
for _, name := range names {
|
|
|
|
|
namepath := filepath.Join(fsDirPath, name)
|
|
|
|
|
fi, fierr := os.Lstat(namepath)
|
2021-07-07 15:50:19 +02:00
|
|
|
|
if os.IsNotExist(fierr) {
|
|
|
|
|
// skip entry removed by a concurrent goroutine
|
|
|
|
|
continue
|
|
|
|
|
}
|
2019-05-12 19:32:04 +02:00
|
|
|
|
if fierr != nil {
|
2022-08-11 13:20:34 +02:00
|
|
|
|
// Don't report errors on any file names that are excluded
|
|
|
|
|
if useFilter {
|
|
|
|
|
newRemote := f.cleanRemote(dir, name)
|
|
|
|
|
if !filter.IncludeRemote(newRemote) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-07-07 17:06:31 +02:00
|
|
|
|
fierr = fmt.Errorf("failed to get info about directory entry %q: %w", namepath, fierr)
|
2019-05-12 19:32:04 +02:00
|
|
|
|
fs.Errorf(dir, "%v", fierr)
|
2019-11-18 15:13:02 +01:00
|
|
|
|
_ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync
|
2019-05-12 19:32:04 +02:00
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
fis = append(fis, fi)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("failed to read directory entry: %w", err)
|
2016-04-21 21:06:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, fi := range fis {
|
|
|
|
|
name := fi.Name()
|
2017-01-29 14:43:20 +01:00
|
|
|
|
mode := fi.Mode()
|
2018-11-02 13:12:51 +01:00
|
|
|
|
newRemote := f.cleanRemote(dir, name)
|
2017-01-29 14:43:20 +01:00
|
|
|
|
// Follow symlinks if required
|
2018-05-14 19:06:57 +02:00
|
|
|
|
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
localPath := filepath.Join(fsDirPath, name)
|
|
|
|
|
fi, err = os.Stat(localPath)
|
2023-02-27 13:16:49 +01:00
|
|
|
|
// Quietly skip errors on excluded files and directories
|
|
|
|
|
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2020-11-12 12:32:55 +01:00
|
|
|
|
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
|
|
|
|
// Skip bad symlinks and circular symlinks
|
2021-11-04 11:12:57 +01:00
|
|
|
|
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
2018-09-02 15:47:54 +02:00
|
|
|
|
fs.Errorf(newRemote, "Listing error: %v", err)
|
2019-11-18 15:13:02 +01:00
|
|
|
|
err = accounting.Stats(ctx).Error(err)
|
2018-09-02 15:47:54 +02:00
|
|
|
|
continue
|
|
|
|
|
}
|
2017-01-29 14:43:20 +01:00
|
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
return nil, err
|
2017-01-29 14:43:20 +01:00
|
|
|
|
}
|
|
|
|
|
mode = fi.Mode()
|
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
|
if fi.IsDir() {
|
2016-09-07 20:49:42 +02:00
|
|
|
|
// Ignore directories which are symlinks. These are junction points under windows which
|
|
|
|
|
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
2018-05-14 19:06:57 +02:00
|
|
|
|
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
2024-02-06 17:02:03 +01:00
|
|
|
|
d := f.newDirectory(newRemote, fi)
|
2017-06-11 23:43:31 +02:00
|
|
|
|
entries = append(entries, d)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
|
} else {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
// Check whether this link should be translated
|
|
|
|
|
if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 {
|
|
|
|
|
newRemote += linkSuffix
|
|
|
|
|
}
|
2023-04-03 17:28:12 +02:00
|
|
|
|
// Don't include non directory if not included
|
|
|
|
|
// we leave directory filtering to the layer above
|
|
|
|
|
if useFilter && !filter.IncludeRemote(newRemote) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2018-11-02 13:12:51 +01:00
|
|
|
|
fso, err := f.newObjectWithInfo(newRemote, fi)
|
2016-06-25 22:23:20 +02:00
|
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
return nil, err
|
2016-06-25 22:23:20 +02:00
|
|
|
|
}
|
2017-06-11 23:43:31 +02:00
|
|
|
|
if fso.Storable() {
|
|
|
|
|
entries = append(entries, fso)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-21 21:06:21 +02:00
|
|
|
|
}
|
2017-06-11 23:43:31 +02:00
|
|
|
|
return entries, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-02 13:12:51 +01:00
|
|
|
|
func (f *Fs) cleanRemote(dir, filename string) (remote string) {
|
2020-09-30 17:24:50 +02:00
|
|
|
|
if f.opt.UTFNorm {
|
|
|
|
|
filename = norm.NFC.String(filename)
|
|
|
|
|
}
|
2020-01-14 18:33:35 +01:00
|
|
|
|
remote = path.Join(dir, f.opt.Enc.ToStandardName(filename))
|
2017-03-16 23:37:56 +01:00
|
|
|
|
|
2018-11-02 13:12:51 +01:00
|
|
|
|
if !utf8.ValidString(filename) {
|
|
|
|
|
f.warnedMu.Lock()
|
|
|
|
|
if _, ok := f.warned[remote]; !ok {
|
|
|
|
|
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote)
|
|
|
|
|
f.warned[remote] = struct{}{}
|
|
|
|
|
}
|
|
|
|
|
f.warnedMu.Unlock()
|
2017-03-16 23:37:56 +01:00
|
|
|
|
}
|
2018-11-02 13:12:51 +01:00
|
|
|
|
return
|
2017-03-16 23:37:56 +01:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-02 13:12:51 +01:00
|
|
|
|
func (f *Fs) localPath(name string) string {
|
2020-01-14 18:33:35 +01:00
|
|
|
|
return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name)))
|
2017-03-16 23:37:56 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Put the Object to the local filesystem
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Temporary Object under construction - info filled in by Update()
|
2018-11-02 13:12:51 +01:00
|
|
|
|
o := f.newObject(src.Remote())
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err := o.Update(ctx, in, src, options...)
|
2014-07-19 12:34:44 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return o, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-03 21:42:35 +02:00
|
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
|
return f.Put(ctx, in, src, options...)
|
2017-08-03 21:42:35 +02:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// Mkdir creates the directory if it doesn't exist
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
localPath := f.localPath(dir)
|
2021-06-11 00:46:36 +02:00
|
|
|
|
err := file.MkdirAll(localPath, 0777)
|
2016-11-03 12:51:36 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2016-11-25 22:52:43 +01:00
|
|
|
|
if dir == "" {
|
2018-11-02 13:12:51 +01:00
|
|
|
|
fi, err := f.lstat(localPath)
|
2016-11-25 22:52:43 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-05-14 19:06:57 +02:00
|
|
|
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
2016-11-03 12:51:36 +01:00
|
|
|
|
}
|
|
|
|
|
return nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2024-01-13 14:19:37 +01:00
|
|
|
|
// DirSetModTime sets the directory modtime for dir
|
|
|
|
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
|
|
|
o := Object{
|
|
|
|
|
fs: f,
|
|
|
|
|
remote: dir,
|
|
|
|
|
path: f.localPath(dir),
|
|
|
|
|
}
|
|
|
|
|
return o.SetModTime(ctx, modTime)
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 17:02:03 +01:00
|
|
|
|
// MkdirMetadata makes the directory passed in as dir.
|
|
|
|
|
//
|
|
|
|
|
// It shouldn't return an error if it already exists.
|
|
|
|
|
//
|
|
|
|
|
// If the metadata is not nil it is set.
|
|
|
|
|
//
|
|
|
|
|
// It returns the directory that was created.
|
|
|
|
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
|
|
|
// Find and or create the directory
|
|
|
|
|
localPath := f.localPath(dir)
|
|
|
|
|
fi, err := f.lstat(localPath)
|
|
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
|
|
|
|
err := f.Mkdir(ctx, dir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("mkdir metadata: failed make directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
fi, err = f.lstat(localPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("mkdir metadata: failed to read info: %w", err)
|
|
|
|
|
}
|
|
|
|
|
} else if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create directory object
|
|
|
|
|
d := f.newDirectory(dir, fi)
|
|
|
|
|
|
|
|
|
|
// Set metadata on the directory object if provided
|
|
|
|
|
if metadata != nil {
|
|
|
|
|
err = d.writeMetadata(metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to set metadata on directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
// Re-read info now we have finished setting stuff
|
|
|
|
|
err = d.lstat()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("mkdir metadata: failed to re-read info: %w", err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return d, nil
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// Rmdir removes the directory
|
|
|
|
|
//
|
|
|
|
|
// If it isn't empty it will return an error
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2023-09-06 13:07:40 +02:00
|
|
|
|
localPath := f.localPath(dir)
|
|
|
|
|
if fi, err := os.Stat(localPath); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
} else if !fi.IsDir() {
|
|
|
|
|
return fs.ErrorIsFile
|
|
|
|
|
}
|
|
|
|
|
return os.Remove(localPath)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Precision of the file system
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Precision() (precision time.Duration) {
|
2020-07-30 17:43:17 +02:00
|
|
|
|
if f.opt.NoSetModTime {
|
|
|
|
|
return fs.ModTimeNotSupported
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-19 00:21:02 +01:00
|
|
|
|
f.precisionOk.Do(func() {
|
|
|
|
|
f.precision = f.readPrecision()
|
|
|
|
|
})
|
|
|
|
|
return f.precision
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read the precision
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) readPrecision() (precision time.Duration) {
|
2013-01-19 00:21:02 +01:00
|
|
|
|
// Default precision of 1s
|
|
|
|
|
precision = time.Second
|
|
|
|
|
|
|
|
|
|
// Create temporary file and test it
|
2022-08-20 16:38:02 +02:00
|
|
|
|
fd, err := os.CreateTemp("", "rclone")
|
2013-01-19 00:21:02 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
// If failed return 1s
|
|
|
|
|
// fmt.Println("Failed to create temp file", err)
|
|
|
|
|
return time.Second
|
|
|
|
|
}
|
|
|
|
|
path := fd.Name()
|
|
|
|
|
// fmt.Println("Created temp file", path)
|
2014-07-25 19:19:49 +02:00
|
|
|
|
err = fd.Close()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return time.Second
|
|
|
|
|
}
|
2013-01-19 00:21:02 +01:00
|
|
|
|
|
|
|
|
|
// Delete it on return
|
|
|
|
|
defer func() {
|
|
|
|
|
// fmt.Println("Remove temp file")
|
2014-07-25 19:19:49 +02:00
|
|
|
|
_ = os.Remove(path) // ignore error
|
2013-01-19 00:21:02 +01:00
|
|
|
|
}()
|
|
|
|
|
|
|
|
|
|
// Find the minimum duration we can detect
|
|
|
|
|
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
|
|
|
|
|
// Current time with delta
|
|
|
|
|
t := time.Unix(time.Now().Unix(), int64(duration))
|
2013-06-27 21:13:07 +02:00
|
|
|
|
err := os.Chtimes(path, t, t)
|
2013-01-19 00:21:02 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
// fmt.Println("Failed to Chtimes", err)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read the actual time back
|
|
|
|
|
fi, err := os.Stat(path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// fmt.Println("Failed to Stat", err)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If it matches - have found the precision
|
2019-06-17 10:34:30 +02:00
|
|
|
|
// fmt.Println("compare", fi.ModTime(ctx), t)
|
2018-04-04 18:12:30 +02:00
|
|
|
|
if fi.ModTime().Equal(t) {
|
2013-01-19 00:21:02 +01:00
|
|
|
|
// fmt.Println("Precision detected as", duration)
|
|
|
|
|
return duration
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
|
// Move src to this remote using server-side move operations.
|
2015-08-31 22:05:51 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// This is stored with the remote path given.
|
2015-08-31 22:05:51 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// It returns the destination Object and a possible error.
|
2015-08-31 22:05:51 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2015-11-07 12:14:46 +01:00
|
|
|
|
srcObj, ok := src.(*Object)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Temporary Object under construction
|
2018-11-02 13:12:51 +01:00
|
|
|
|
dstObj := f.newObject(remote)
|
2020-06-21 14:02:46 +02:00
|
|
|
|
dstObj.fs.objectMetaMu.RLock()
|
|
|
|
|
dstObjMode := dstObj.mode
|
|
|
|
|
dstObj.fs.objectMetaMu.RUnlock()
|
2015-08-31 22:05:51 +02:00
|
|
|
|
|
|
|
|
|
// Check it is a file if it exists
|
|
|
|
|
err := dstObj.lstat()
|
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
|
// OK
|
|
|
|
|
} else if err != nil {
|
|
|
|
|
return nil, err
|
2020-06-21 14:02:46 +02:00
|
|
|
|
} else if !dstObj.fs.isRegular(dstObjMode) {
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// It isn't a file
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.New("can't move file onto non-file")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create destination
|
|
|
|
|
err = dstObj.mkdirAll()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2024-03-05 18:21:06 +01:00
|
|
|
|
// Fetch metadata if --metadata is in use
|
|
|
|
|
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("move: failed to read metadata: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// Do the move
|
|
|
|
|
err = os.Rename(srcObj.path, dstObj.path)
|
2017-08-04 23:16:29 +02:00
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
|
// race condition, source was deleted in the meantime
|
|
|
|
|
return nil, err
|
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
|
// not enough rights to write to dst
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return nil, err
|
2017-08-04 23:16:29 +02:00
|
|
|
|
} else if err != nil {
|
|
|
|
|
// not quite clear, but probably trying to move a file across file system
|
|
|
|
|
// boundaries. Copying might still work.
|
2018-02-28 22:27:34 +01:00
|
|
|
|
fs.Debugf(src, "Can't move: %v: trying copy", err)
|
2017-08-04 23:16:29 +02:00
|
|
|
|
return nil, fs.ErrorCantMove
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
2024-03-05 18:21:06 +01:00
|
|
|
|
// Set metadata if --metadata is in use
|
|
|
|
|
err = dstObj.writeMetadata(meta)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("move: failed to set metadata: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// Update the info
|
|
|
|
|
err = dstObj.lstat()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
2020-10-13 23:43:40 +02:00
|
|
|
|
// using server-side move operations.
|
2015-08-31 22:05:51 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
|
//
|
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2015-11-07 12:14:46 +01:00
|
|
|
|
srcFs, ok := src.(*Fs)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
|
}
|
2018-11-02 13:12:51 +01:00
|
|
|
|
srcPath := srcFs.localPath(srcRemote)
|
|
|
|
|
dstPath := f.localPath(dstRemote)
|
2015-09-11 11:37:12 +02:00
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// Check if destination exists
|
2017-02-05 22:20:56 +01:00
|
|
|
|
_, err := os.Lstat(dstPath)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if !os.IsNotExist(err) {
|
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
|
// Create parent of destination
|
2018-08-31 22:10:36 +02:00
|
|
|
|
dstParentPath := filepath.Dir(dstPath)
|
2021-06-11 00:46:36 +02:00
|
|
|
|
err = file.MkdirAll(dstParentPath, 0777)
|
2017-02-05 22:20:56 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// Do the move
|
2018-02-26 13:55:05 +01:00
|
|
|
|
err = os.Rename(srcPath, dstPath)
|
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
|
// race condition, source was deleted in the meantime
|
|
|
|
|
return err
|
|
|
|
|
} else if os.IsPermission(err) {
|
|
|
|
|
// not enough rights to write to dst
|
|
|
|
|
return err
|
|
|
|
|
} else if err != nil {
|
|
|
|
|
// not quite clear, but probably trying to move directory across file system
|
|
|
|
|
// boundaries. Copying might still work.
|
2018-02-28 22:27:34 +01:00
|
|
|
|
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
|
2018-02-26 13:55:05 +01:00
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
|
}
|
|
|
|
|
return nil
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2019-09-23 15:32:36 +02:00
|
|
|
|
return hash.Supported()
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
|
|
|
|
|
2020-04-28 14:01:55 +02:00
|
|
|
|
var commandHelp = []fs.CommandHelp{
|
|
|
|
|
{
|
|
|
|
|
Name: "noop",
|
|
|
|
|
Short: "A null operation for testing backend commands",
|
|
|
|
|
Long: `This is a test command which has some options
|
|
|
|
|
you can try to change the output.`,
|
|
|
|
|
Opts: map[string]string{
|
|
|
|
|
"echo": "echo the input arguments",
|
|
|
|
|
"error": "return an error based on option value",
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Command the backend to run a named command
|
|
|
|
|
//
|
|
|
|
|
// The command run is name
|
|
|
|
|
// args may be used to read arguments from
|
|
|
|
|
// opts may be used to read optional arguments from
|
|
|
|
|
//
|
|
|
|
|
// The result should be capable of being JSON encoded
|
|
|
|
|
// If it is a string or a []string it will be shown to the user
|
|
|
|
|
// otherwise it will be JSON encoded and shown to the user like that
|
|
|
|
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
|
|
|
|
switch name {
|
|
|
|
|
case "noop":
|
|
|
|
|
if txt, ok := opt["error"]; ok {
|
|
|
|
|
if txt == "" {
|
|
|
|
|
txt = "unspecified error"
|
|
|
|
|
}
|
|
|
|
|
return nil, errors.New(txt)
|
|
|
|
|
}
|
|
|
|
|
if _, ok := opt["echo"]; ok {
|
|
|
|
|
out := map[string]interface{}{}
|
|
|
|
|
out["name"] = name
|
|
|
|
|
out["arg"] = arg
|
|
|
|
|
out["opt"] = opt
|
|
|
|
|
return out, nil
|
|
|
|
|
}
|
|
|
|
|
return nil, nil
|
|
|
|
|
default:
|
|
|
|
|
return nil, fs.ErrorCommandNotFound
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Fs returns the parent Fs
|
2016-02-18 12:35:25 +01:00
|
|
|
|
func (o *Object) Fs() fs.Info {
|
2015-11-07 12:14:46 +01:00
|
|
|
|
return o.fs
|
2014-03-28 18:56:04 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return a string version
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) String() string {
|
2014-03-28 18:56:04 +01:00
|
|
|
|
if o == nil {
|
|
|
|
|
return "<nil>"
|
|
|
|
|
}
|
|
|
|
|
return o.remote
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Remote returns the remote path
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Remote() string {
|
2016-07-15 15:18:09 +02:00
|
|
|
|
return o.remote
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// Hash returns the requested hash of a file as a lowercase hex string
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// Check that the underlying file hasn't changed
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
2017-06-30 11:24:06 +02:00
|
|
|
|
oldtime := o.modTime
|
|
|
|
|
oldsize := o.size
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RUnlock()
|
2016-01-17 11:35:38 +01:00
|
|
|
|
err := o.lstat()
|
2020-05-04 13:17:46 +02:00
|
|
|
|
var changed bool
|
2016-01-17 11:35:38 +01:00
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
if errors.Is(err, os.ErrNotExist) {
|
2020-05-04 13:17:46 +02:00
|
|
|
|
// If file not found then we assume any accumulated
|
|
|
|
|
// hashes are OK - this will error on Open
|
|
|
|
|
changed = true
|
|
|
|
|
} else {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("hash: failed to stat: %w", err)
|
2020-05-04 13:17:46 +02:00
|
|
|
|
}
|
|
|
|
|
} else {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
2020-05-04 13:17:46 +02:00
|
|
|
|
changed = !o.modTime.Equal(oldtime) || oldsize != o.size
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RUnlock()
|
2016-01-17 11:35:38 +01:00
|
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
|
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
2019-05-11 23:21:37 +02:00
|
|
|
|
hashValue, hashFound := o.hashes[r]
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RUnlock()
|
2016-01-11 13:39:33 +01:00
|
|
|
|
|
2020-06-21 14:02:46 +02:00
|
|
|
|
if changed || !hashFound {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
var in io.ReadCloser
|
|
|
|
|
|
|
|
|
|
if !o.translatedLink {
|
backend/local: Avoid polluting page cache when uploading local files to remote backends
This patch makes rclone keep linux page cache usage under control when
uploading local files to remote backends. When opening a file it issues
FADV_SEQUENTIAL to configure read ahead strategy. While reading
the file it issues FADV_DONTNEED every 128kB to free page cache from
already consumed pages.
```
fadvise64(5, 0, 0, POSIX_FADV_SEQUENTIAL) = 0
read(5, "\324\375\251\376\213\361\240\224>\t5E\301\331X\274^\203oA\353\303.2'\206z\177N\27fB"..., 32768) = 32768
read(5, "\361\311\vW!\354_\317hf\276t\307\30L\351\272T\342C\243\370\240\213\355\210\v\221\201\177[\333"..., 32768) = 32768
read(5, ":\371\337Gn\355C\322\334 \253f\373\277\301;\215\n\240\347\305\6N\257\313\4\365\276ANq!"..., 32768) = 32768
read(5, "\312\243\360P\263\242\267H\304\240Y\310\367sT\321\256\6[b\310\224\361\344$Ms\234\5\314\306i"..., 32768) = 32768
fadvise64(5, 0, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "m\251\7a\306\226\366-\v~\"\216\353\342~0\fht\315DK0\236.\\\201!A#\177\320"..., 32768) = 32768
read(5, "\7\324\207,\205\360\376\307\276\254\250\232\21G\323n\255\354\234\257P\322y\3502\37\246\21\334^42"..., 32768) = 32768
read(5, "e{*\225\223R\320\212EG:^\302\377\242\337\10\222J\16A\305\0\353\354\326P\336\357A|-"..., 32768) = 32768
read(5, "n\23XA4*R\352\234\257\364\355Y\204t9T\363\33\357\333\3674\246\221T\360\226\326G\354\374"..., 32768) = 32768
fadvise64(5, 131072, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "SX\331\251}\24\353\37\310#\307|h%\372\34\310\3070YX\250s\2269\242\236\371\302z\357_"..., 32768) = 32768
read(5, "\177\3500\236Y\245\376NIY\177\360p!\337L]\2726\206@\240\246pG\213\254N\274\226\303\357"..., 32768) = 32768
read(5, "\242$*\364\217U\264]\221Y\245\342r\t\253\25Hr\363\263\364\336\322\t\325\325\f\37z\324\201\351"..., 32768) = 32768
read(5, "\2305\242\366\370\203tM\226<\230\25\316(9\25x\2\376\212\346Q\223 \353\225\323\264jf|\216"..., 32768) = 32768
fadvise64(5, 262144, 131072, POSIX_FADV_DONTNEED) = 0
```
Page cache consumption per file can be checked with tools like [pcstat](https://github.com/tobert/pcstat).
This patch does not have a performance impact. Please find below results
of an experiment comparing local copy of 1GB file with and without this
patch.
With the patch:
```
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 0 | 000.000 |
+-----------+----------------+------------+-----------+---------+
(mmt/fadvise)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
User time (seconds): 13.19
System time (seconds): 1.12
Percent of CPU this job got: 96%
Elapsed (wall clock) time (h:mm:ss or m:ss): 0:14.81
Average shared text size (kbytes): 0
Average unshared data size (kbytes): 0
Average stack size (kbytes): 0
Average total size (kbytes): 0
Maximum resident set size (kbytes): 27660
Average resident set size (kbytes): 0
Major (requiring I/O) page faults: 0
Minor (reclaiming a frame) page faults: 2212
Voluntary context switches: 5755
Involuntary context switches: 9782
Swaps: 0
File system inputs: 4155264
File system outputs: 2097152
Socket messages sent: 0
Socket messages received: 0
Signals delivered: 0
Page size (bytes): 4096
Exit status: 0
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 0 | 000.000 |
+-----------+----------------+------------+-----------+---------+
```
Without the patch:
```
(master)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
User time (seconds): 14.46
System time (seconds): 0.81
Percent of CPU this job got: 93%
Elapsed (wall clock) time (h:mm:ss or m:ss): 0:16.41
Average shared text size (kbytes): 0
Average unshared data size (kbytes): 0
Average stack size (kbytes): 0
Average total size (kbytes): 0
Maximum resident set size (kbytes): 27600
Average resident set size (kbytes): 0
Major (requiring I/O) page faults: 0
Minor (reclaiming a frame) page faults: 2228
Voluntary context switches: 7190
Involuntary context switches: 1980
Swaps: 0
File system inputs: 2097152
File system outputs: 2097152
Socket messages sent: 0
Socket messages received: 0
Signals delivered: 0
Page size (bytes): 4096
Exit status: 0
(master)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 262144 | 100.000 |
+-----------+----------------+------------+-----------+---------+
```
2019-08-06 11:22:57 +02:00
|
|
|
|
var fd *os.File
|
|
|
|
|
fd, err = file.Open(o.path)
|
|
|
|
|
if fd != nil {
|
|
|
|
|
in = newFadviseReadCloser(o, fd, 0, 0)
|
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
} else {
|
|
|
|
|
in, err = o.openTranslatedLink(0, -1)
|
|
|
|
|
}
|
2020-05-20 10:19:54 +02:00
|
|
|
|
// If not checking for updates, only read size given
|
|
|
|
|
if o.fs.opt.NoCheckUpdated {
|
|
|
|
|
in = readers.NewLimitedReadCloser(in, o.size)
|
|
|
|
|
}
|
2016-01-11 13:39:33 +01:00
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("hash: failed to open: %w", err)
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
2020-06-21 14:02:46 +02:00
|
|
|
|
var hashes map[hash.Type]string
|
2021-11-09 10:45:10 +01:00
|
|
|
|
hashes, err = hash.StreamTypes(readers.NewContextReader(ctx, in), hash.NewHashSet(r))
|
2016-01-11 13:39:33 +01:00
|
|
|
|
closeErr := in.Close()
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("hash: failed to read: %w", err)
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
|
|
|
|
if closeErr != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("hash: failed to close: %w", closeErr)
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
2019-05-11 23:21:37 +02:00
|
|
|
|
hashValue = hashes[r]
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.Lock()
|
2019-05-11 23:21:37 +02:00
|
|
|
|
if o.hashes == nil {
|
|
|
|
|
o.hashes = hashes
|
|
|
|
|
} else {
|
|
|
|
|
o.hashes[r] = hashValue
|
|
|
|
|
}
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.Unlock()
|
2014-07-19 12:06:25 +02:00
|
|
|
|
}
|
2019-05-11 23:21:37 +02:00
|
|
|
|
return hashValue, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Size() int64 {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
|
|
|
|
defer o.fs.objectMetaMu.RUnlock()
|
2017-06-30 11:24:06 +02:00
|
|
|
|
return o.size
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
|
|
|
|
defer o.fs.objectMetaMu.RUnlock()
|
2017-06-30 11:24:06 +02:00
|
|
|
|
return o.modTime
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2022-05-24 19:06:16 +02:00
|
|
|
|
// Set the atime and ltime of the object
|
|
|
|
|
func (o *Object) setTimes(atime, mtime time.Time) (err error) {
|
|
|
|
|
if o.translatedLink {
|
|
|
|
|
err = lChtimes(o.path, atime, mtime)
|
|
|
|
|
} else {
|
|
|
|
|
err = os.Chtimes(o.path, atime, mtime)
|
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2020-07-30 17:43:17 +02:00
|
|
|
|
if o.fs.opt.NoSetModTime {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2022-05-24 19:06:16 +02:00
|
|
|
|
err := o.setTimes(modTime, modTime)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
if err != nil {
|
2016-03-22 16:07:10 +01:00
|
|
|
|
return err
|
2014-07-24 23:51:34 +02:00
|
|
|
|
}
|
|
|
|
|
// Re-read metadata
|
2016-03-22 16:07:10 +01:00
|
|
|
|
return o.lstat()
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Storable returns a boolean showing if this object is storable
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Storable() bool {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RLock()
|
2017-06-30 11:24:06 +02:00
|
|
|
|
mode := o.mode
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.RUnlock()
|
2018-09-13 00:54:30 +02:00
|
|
|
|
if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks {
|
2018-05-14 19:06:57 +02:00
|
|
|
|
if !o.fs.opt.SkipSymlinks {
|
2017-07-21 12:15:58 +02:00
|
|
|
|
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
|
|
|
|
}
|
2017-01-29 14:43:20 +01:00
|
|
|
|
return false
|
|
|
|
|
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
2017-02-11 21:19:44 +01:00
|
|
|
|
fs.Logf(o, "Can't transfer non file/directory")
|
2012-12-26 13:23:58 +01:00
|
|
|
|
return false
|
|
|
|
|
} else if mode&os.ModeDir != 0 {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
// fs.Debugf(o, "Skipping directory")
|
2012-12-26 13:23:58 +01:00
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-19 12:06:25 +02:00
|
|
|
|
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
|
|
|
|
// object that is read
|
|
|
|
|
type localOpenFile struct {
|
2018-01-12 17:30:54 +01:00
|
|
|
|
o *Object // object that is open
|
|
|
|
|
in io.ReadCloser // handle we are wrapping
|
|
|
|
|
hash *hash.MultiHasher // currently accumulating hashes
|
2018-01-31 21:18:31 +01:00
|
|
|
|
fd *os.File // file object reference
|
2014-07-19 12:06:25 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
|
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
2018-05-14 19:06:57 +02:00
|
|
|
|
if !file.o.fs.opt.NoCheckUpdated {
|
2018-04-09 16:27:58 +02:00
|
|
|
|
// Check if file has the same size and modTime
|
|
|
|
|
fi, err := file.fd.Stat()
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return 0, fmt.Errorf("can't read status of source file while transferring: %w", err)
|
2018-04-09 16:27:58 +02:00
|
|
|
|
}
|
2020-06-21 14:02:46 +02:00
|
|
|
|
file.o.fs.objectMetaMu.RLock()
|
|
|
|
|
oldtime := file.o.modTime
|
|
|
|
|
oldsize := file.o.size
|
|
|
|
|
file.o.fs.objectMetaMu.RUnlock()
|
|
|
|
|
if oldsize != fi.Size() {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size()))
|
2018-04-09 16:27:58 +02:00
|
|
|
|
}
|
2023-12-08 16:26:53 +01:00
|
|
|
|
if !oldtime.Equal(readTime(file.o.fs.opt.TimeType, fi)) {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return 0, fserrors.NoLowLevelRetryError(fmt.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime()))
|
2018-04-09 16:27:58 +02:00
|
|
|
|
}
|
2018-01-31 21:18:31 +01:00
|
|
|
|
}
|
|
|
|
|
|
2014-07-19 12:06:25 +02:00
|
|
|
|
n, err = file.in.Read(p)
|
|
|
|
|
if n > 0 {
|
|
|
|
|
// Hash routines never return an error
|
|
|
|
|
_, _ = file.hash.Write(p[:n])
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-19 14:49:34 +02:00
|
|
|
|
// Close the object and update the hashes
|
2014-07-19 12:06:25 +02:00
|
|
|
|
func (file *localOpenFile) Close() (err error) {
|
|
|
|
|
err = file.in.Close()
|
|
|
|
|
if err == nil {
|
2016-06-19 14:49:34 +02:00
|
|
|
|
if file.hash.Size() == file.o.Size() {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
file.o.fs.objectMetaMu.Lock()
|
2016-06-19 14:49:34 +02:00
|
|
|
|
file.o.hashes = file.hash.Sums()
|
2020-06-21 14:02:46 +02:00
|
|
|
|
file.o.fs.objectMetaMu.Unlock()
|
2016-06-19 14:49:34 +02:00
|
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
// Returns a ReadCloser() object that contains the contents of a symbolic link
|
|
|
|
|
func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) {
|
|
|
|
|
// Read the link and return the destination it as the contents of the object
|
|
|
|
|
linkdst, err := os.Readlink(o.path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2022-08-20 16:38:02 +02:00
|
|
|
|
return readers.NewLimitedReadCloser(io.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// Open an object for read
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2018-01-27 11:07:17 +01:00
|
|
|
|
var offset, limit int64 = 0, -1
|
2019-08-10 11:26:29 +02:00
|
|
|
|
var hasher *hash.MultiHasher
|
2016-09-10 12:29:57 +02:00
|
|
|
|
for _, option := range options {
|
|
|
|
|
switch x := option.(type) {
|
|
|
|
|
case *fs.SeekOption:
|
2018-01-27 11:07:17 +01:00
|
|
|
|
offset = x.Offset
|
2018-01-20 20:37:38 +01:00
|
|
|
|
case *fs.RangeOption:
|
2020-06-21 14:02:46 +02:00
|
|
|
|
offset, limit = x.Decode(o.Size())
|
2017-05-28 13:44:22 +02:00
|
|
|
|
case *fs.HashesOption:
|
2019-08-10 11:26:29 +02:00
|
|
|
|
if x.Hashes.Count() > 0 {
|
|
|
|
|
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
default:
|
|
|
|
|
if option.Mandatory() {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
2016-09-10 12:29:57 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-14 13:39:48 +02:00
|
|
|
|
// Update the file info before we start reading
|
|
|
|
|
err = o.lstat()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 10:19:54 +02:00
|
|
|
|
// If not checking updated then limit to current size. This means if
|
|
|
|
|
// file is being extended, readers will read a o.Size() bytes rather
|
|
|
|
|
// than the new size making for a consistent upload.
|
|
|
|
|
if limit < 0 && o.fs.opt.NoCheckUpdated {
|
|
|
|
|
limit = o.size
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
// Handle a translated link
|
|
|
|
|
if o.translatedLink {
|
|
|
|
|
return o.openTranslatedLink(offset, limit)
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-09 20:27:15 +01:00
|
|
|
|
fd, err := file.Open(o.path)
|
2014-07-19 12:06:25 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
backend/local: Avoid polluting page cache when uploading local files to remote backends
This patch makes rclone keep linux page cache usage under control when
uploading local files to remote backends. When opening a file it issues
FADV_SEQUENTIAL to configure read ahead strategy. While reading
the file it issues FADV_DONTNEED every 128kB to free page cache from
already consumed pages.
```
fadvise64(5, 0, 0, POSIX_FADV_SEQUENTIAL) = 0
read(5, "\324\375\251\376\213\361\240\224>\t5E\301\331X\274^\203oA\353\303.2'\206z\177N\27fB"..., 32768) = 32768
read(5, "\361\311\vW!\354_\317hf\276t\307\30L\351\272T\342C\243\370\240\213\355\210\v\221\201\177[\333"..., 32768) = 32768
read(5, ":\371\337Gn\355C\322\334 \253f\373\277\301;\215\n\240\347\305\6N\257\313\4\365\276ANq!"..., 32768) = 32768
read(5, "\312\243\360P\263\242\267H\304\240Y\310\367sT\321\256\6[b\310\224\361\344$Ms\234\5\314\306i"..., 32768) = 32768
fadvise64(5, 0, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "m\251\7a\306\226\366-\v~\"\216\353\342~0\fht\315DK0\236.\\\201!A#\177\320"..., 32768) = 32768
read(5, "\7\324\207,\205\360\376\307\276\254\250\232\21G\323n\255\354\234\257P\322y\3502\37\246\21\334^42"..., 32768) = 32768
read(5, "e{*\225\223R\320\212EG:^\302\377\242\337\10\222J\16A\305\0\353\354\326P\336\357A|-"..., 32768) = 32768
read(5, "n\23XA4*R\352\234\257\364\355Y\204t9T\363\33\357\333\3674\246\221T\360\226\326G\354\374"..., 32768) = 32768
fadvise64(5, 131072, 131072, POSIX_FADV_DONTNEED) = 0
read(5, "SX\331\251}\24\353\37\310#\307|h%\372\34\310\3070YX\250s\2269\242\236\371\302z\357_"..., 32768) = 32768
read(5, "\177\3500\236Y\245\376NIY\177\360p!\337L]\2726\206@\240\246pG\213\254N\274\226\303\357"..., 32768) = 32768
read(5, "\242$*\364\217U\264]\221Y\245\342r\t\253\25Hr\363\263\364\336\322\t\325\325\f\37z\324\201\351"..., 32768) = 32768
read(5, "\2305\242\366\370\203tM\226<\230\25\316(9\25x\2\376\212\346Q\223 \353\225\323\264jf|\216"..., 32768) = 32768
fadvise64(5, 262144, 131072, POSIX_FADV_DONTNEED) = 0
```
Page cache consumption per file can be checked with tools like [pcstat](https://github.com/tobert/pcstat).
This patch does not have a performance impact. Please find below results
of an experiment comparing local copy of 1GB file with and without this
patch.
With the patch:
```
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 0 | 000.000 |
+-----------+----------------+------------+-----------+---------+
(mmt/fadvise)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
User time (seconds): 13.19
System time (seconds): 1.12
Percent of CPU this job got: 96%
Elapsed (wall clock) time (h:mm:ss or m:ss): 0:14.81
Average shared text size (kbytes): 0
Average unshared data size (kbytes): 0
Average stack size (kbytes): 0
Average total size (kbytes): 0
Maximum resident set size (kbytes): 27660
Average resident set size (kbytes): 0
Major (requiring I/O) page faults: 0
Minor (reclaiming a frame) page faults: 2212
Voluntary context switches: 5755
Involuntary context switches: 9782
Swaps: 0
File system inputs: 4155264
File system outputs: 2097152
Socket messages sent: 0
Socket messages received: 0
Signals delivered: 0
Page size (bytes): 4096
Exit status: 0
(mmt/fadvise)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 0 | 000.000 |
+-----------+----------------+------------+-----------+---------+
```
Without the patch:
```
(master)$ taskset -c 0 /usr/bin/time -v ./rclone copy 1GB.bin.1 /var/empty/rclone
Command being timed: "./rclone copy 1GB.bin.1 /var/empty/rclone"
User time (seconds): 14.46
System time (seconds): 0.81
Percent of CPU this job got: 93%
Elapsed (wall clock) time (h:mm:ss or m:ss): 0:16.41
Average shared text size (kbytes): 0
Average unshared data size (kbytes): 0
Average stack size (kbytes): 0
Average total size (kbytes): 0
Maximum resident set size (kbytes): 27600
Average resident set size (kbytes): 0
Major (requiring I/O) page faults: 0
Minor (reclaiming a frame) page faults: 2228
Voluntary context switches: 7190
Involuntary context switches: 1980
Swaps: 0
File system inputs: 2097152
File system outputs: 2097152
Socket messages sent: 0
Socket messages received: 0
Signals delivered: 0
Page size (bytes): 4096
Exit status: 0
(master)$ pcstat 1GB.bin.1
+-----------+----------------+------------+-----------+---------+
| Name | Size (bytes) | Pages | Cached | Percent |
|-----------+----------------+------------+-----------+---------|
| 1GB.bin.1 | 1073741824 | 262144 | 262144 | 100.000 |
+-----------+----------------+------------+-----------+---------+
```
2019-08-06 11:22:57 +02:00
|
|
|
|
wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit)
|
2016-09-10 12:29:57 +02:00
|
|
|
|
if offset != 0 {
|
|
|
|
|
// seek the object
|
2018-04-06 20:53:06 +02:00
|
|
|
|
_, err = fd.Seek(offset, io.SeekStart)
|
2016-09-10 12:29:57 +02:00
|
|
|
|
// don't attempt to make checksums
|
2018-01-20 20:37:38 +01:00
|
|
|
|
return wrappedFd, err
|
2016-09-10 12:29:57 +02:00
|
|
|
|
}
|
2019-08-10 11:26:29 +02:00
|
|
|
|
if hasher == nil {
|
|
|
|
|
// no need to wrap since we don't need checksums
|
|
|
|
|
return wrappedFd, nil
|
2017-05-28 13:44:22 +02:00
|
|
|
|
}
|
2019-08-10 11:26:29 +02:00
|
|
|
|
// Update the hashes as we go along
|
2014-07-19 12:06:25 +02:00
|
|
|
|
in = &localOpenFile{
|
|
|
|
|
o: o,
|
2018-01-20 20:37:38 +01:00
|
|
|
|
in: wrappedFd,
|
2019-08-10 11:26:29 +02:00
|
|
|
|
hash: hasher,
|
2018-01-31 21:18:31 +01:00
|
|
|
|
fd: fd,
|
2014-07-19 12:06:25 +02:00
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
return in, nil
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// mkdirAll makes all the directories needed to store the object
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) mkdirAll() error {
|
2018-08-31 22:10:36 +02:00
|
|
|
|
dir := filepath.Dir(o.path)
|
2021-06-11 00:46:36 +02:00
|
|
|
|
return file.MkdirAll(dir, 0777)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
type nopWriterCloser struct {
|
|
|
|
|
*bytes.Buffer
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (nwc nopWriterCloser) Close() error {
|
|
|
|
|
// noop
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
|
// Update the object from in with modTime and size
|
2019-08-10 11:26:29 +02:00
|
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
2018-09-13 00:54:30 +02:00
|
|
|
|
var out io.WriteCloser
|
2019-08-10 11:26:29 +02:00
|
|
|
|
var hasher *hash.MultiHasher
|
2018-09-13 00:54:30 +02:00
|
|
|
|
|
2017-05-28 13:44:22 +02:00
|
|
|
|
for _, option := range options {
|
|
|
|
|
switch x := option.(type) {
|
|
|
|
|
case *fs.HashesOption:
|
2019-08-10 11:26:29 +02:00
|
|
|
|
if x.Hashes.Count() > 0 {
|
|
|
|
|
hasher, err = hash.NewMultiHasherTypes(x.Hashes)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-05-28 13:44:22 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-10 11:26:29 +02:00
|
|
|
|
err = o.mkdirAll()
|
2014-04-18 18:04:21 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-24 13:01:02 +01:00
|
|
|
|
// Wipe hashes before update
|
|
|
|
|
o.clearHashCache()
|
|
|
|
|
|
2018-09-13 00:54:30 +02:00
|
|
|
|
var symlinkData bytes.Buffer
|
|
|
|
|
// If the object is a regular file, create it.
|
|
|
|
|
// If it is a translated link, just read in the contents, and
|
|
|
|
|
// then create a symlink
|
|
|
|
|
if !o.translatedLink {
|
|
|
|
|
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
|
if err != nil {
|
2020-01-04 19:02:32 +01:00
|
|
|
|
if runtime.GOOS == "windows" && os.IsPermission(err) {
|
|
|
|
|
// If permission denied on Windows might be trying to update a
|
|
|
|
|
// hidden file, in which case try opening without CREATE
|
|
|
|
|
// See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod
|
|
|
|
|
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}
|
2021-01-31 21:25:24 +01:00
|
|
|
|
if !o.fs.opt.NoPreAllocate {
|
|
|
|
|
// Pre-allocate the file for performance reasons
|
|
|
|
|
err = file.PreAllocate(src.Size(), f)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
2021-02-17 14:06:24 +01:00
|
|
|
|
if err == file.ErrDiskFull {
|
|
|
|
|
_ = f.Close()
|
|
|
|
|
return err
|
|
|
|
|
}
|
2021-01-31 21:25:24 +01:00
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
}
|
|
|
|
|
out = f
|
|
|
|
|
} else {
|
|
|
|
|
out = nopWriterCloser{&symlinkData}
|
2018-08-19 12:38:26 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-18 12:35:25 +01:00
|
|
|
|
// Calculate the hash of the object we are reading as we go along
|
2019-08-10 11:26:29 +02:00
|
|
|
|
if hasher != nil {
|
|
|
|
|
in = io.TeeReader(in, hasher)
|
2017-05-28 13:44:22 +02:00
|
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
|
_, err = io.Copy(out, in)
|
2016-11-04 18:06:56 +01:00
|
|
|
|
closeErr := out.Close()
|
|
|
|
|
if err == nil {
|
|
|
|
|
err = closeErr
|
|
|
|
|
}
|
2018-09-13 00:54:30 +02:00
|
|
|
|
|
|
|
|
|
if o.translatedLink {
|
|
|
|
|
if err == nil {
|
2019-02-07 18:41:17 +01:00
|
|
|
|
// Remove any current symlink or file, if one exists
|
2018-09-13 00:54:30 +02:00
|
|
|
|
if _, err := os.Lstat(o.path); err == nil {
|
|
|
|
|
if removeErr := os.Remove(o.path); removeErr != nil {
|
|
|
|
|
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
|
|
|
|
return removeErr
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Use the contents for the copied object to create a symlink
|
|
|
|
|
err = os.Symlink(symlinkData.String(), o.path)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// only continue if symlink creation succeeded
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
|
if err != nil {
|
2017-02-11 21:19:44 +01:00
|
|
|
|
fs.Logf(o, "Removing partially written file on error: %v", err)
|
2016-11-04 18:06:56 +01:00
|
|
|
|
if removeErr := os.Remove(o.path); removeErr != nil {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
2016-11-04 18:06:56 +01:00
|
|
|
|
}
|
2014-04-18 18:04:21 +02:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// All successful so update the hashes
|
2019-08-10 11:26:29 +02:00
|
|
|
|
if hasher != nil {
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.Lock()
|
2019-08-10 11:26:29 +02:00
|
|
|
|
o.hashes = hasher.Sums()
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.Unlock()
|
2019-08-10 11:26:29 +02:00
|
|
|
|
}
|
2014-07-19 12:06:25 +02:00
|
|
|
|
|
2014-04-18 18:04:21 +02:00
|
|
|
|
// Set the mtime
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err = o.SetModTime(ctx, src.ModTime(ctx))
|
2016-03-22 16:07:10 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2014-07-19 12:34:44 +02:00
|
|
|
|
|
2022-05-24 19:06:16 +02:00
|
|
|
|
// Fetch and set metadata if --metadata is in use
|
2023-10-24 00:47:18 +02:00
|
|
|
|
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
2022-05-24 19:06:16 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("failed to read metadata from source object: %w", err)
|
|
|
|
|
}
|
|
|
|
|
err = o.writeMetadata(meta)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("failed to set metadata: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
2014-07-19 12:34:44 +02:00
|
|
|
|
// ReRead info now that we have finished
|
|
|
|
|
return o.lstat()
|
2014-04-18 18:04:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
2020-05-19 11:16:43 +02:00
|
|
|
|
var sparseWarning sync.Once
|
|
|
|
|
|
2019-04-22 20:22:42 +02:00
|
|
|
|
// OpenWriterAt opens with a handle for random access writes
|
|
|
|
|
//
|
|
|
|
|
// Pass in the remote desired and the size if known.
|
|
|
|
|
//
|
|
|
|
|
// It truncates any existing object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
2019-04-22 20:22:42 +02:00
|
|
|
|
// Temporary Object under construction
|
2018-11-02 13:12:51 +01:00
|
|
|
|
o := f.newObject(remote)
|
2019-04-22 20:22:42 +02:00
|
|
|
|
|
|
|
|
|
err := o.mkdirAll()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if o.translatedLink {
|
|
|
|
|
return nil, errors.New("can't open a symlink for random writing")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
// Pre-allocate the file for performance reasons
|
2021-01-31 21:25:24 +01:00
|
|
|
|
if !f.opt.NoPreAllocate {
|
|
|
|
|
err = file.PreAllocate(size, out)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Debugf(o, "Failed to pre-allocate: %v", err)
|
|
|
|
|
}
|
2019-04-22 20:22:42 +02:00
|
|
|
|
}
|
2020-05-19 11:16:43 +02:00
|
|
|
|
if !f.opt.NoSparse && file.SetSparseImplemented {
|
|
|
|
|
sparseWarning.Do(func() {
|
|
|
|
|
fs.Infof(nil, "Writing sparse files: use --local-no-sparse or --multi-thread-streams 0 to disable")
|
|
|
|
|
})
|
|
|
|
|
// Set the file to be a sparse file (important on Windows)
|
|
|
|
|
err = file.SetSparse(out)
|
|
|
|
|
if err != nil {
|
2020-09-11 16:34:58 +02:00
|
|
|
|
fs.Errorf(o, "Failed to set sparse: %v", err)
|
2020-05-19 11:16:43 +02:00
|
|
|
|
}
|
2020-03-06 13:41:48 +01:00
|
|
|
|
}
|
|
|
|
|
|
2019-04-22 20:22:42 +02:00
|
|
|
|
return out, nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-30 11:24:06 +02:00
|
|
|
|
// setMetadata sets the file info from the os.FileInfo passed in
|
|
|
|
|
func (o *Object) setMetadata(info os.FileInfo) {
|
2020-05-20 10:19:54 +02:00
|
|
|
|
// if not checking updated then don't update the stat
|
|
|
|
|
if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() {
|
|
|
|
|
return
|
|
|
|
|
}
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.fs.objectMetaMu.Lock()
|
|
|
|
|
o.size = info.Size()
|
2023-12-08 16:26:53 +01:00
|
|
|
|
o.modTime = readTime(o.fs.opt.TimeType, info)
|
2020-06-21 14:02:46 +02:00
|
|
|
|
o.mode = info.Mode()
|
|
|
|
|
o.fs.objectMetaMu.Unlock()
|
2021-05-01 13:39:29 +02:00
|
|
|
|
// Read the size of the link.
|
|
|
|
|
//
|
|
|
|
|
// The value in info.Size() is not always correct
|
|
|
|
|
// - Windows links read as 0 size
|
|
|
|
|
// - Some virtual filesystems (such ash LucidLink) links read as 0 size
|
|
|
|
|
// - Android - some versions the links are larger than readlink suggests
|
|
|
|
|
if o.translatedLink {
|
2020-09-25 19:32:31 +02:00
|
|
|
|
linkdst, err := os.Readlink(o.path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Errorf(o, "Failed to read link size: %v", err)
|
|
|
|
|
} else {
|
|
|
|
|
o.size = int64(len(linkdst))
|
|
|
|
|
}
|
|
|
|
|
}
|
2017-06-30 11:24:06 +02:00
|
|
|
|
}
|
|
|
|
|
|
2021-11-24 13:01:02 +01:00
|
|
|
|
// clearHashCache wipes any cached hashes for the object
|
|
|
|
|
func (o *Object) clearHashCache() {
|
|
|
|
|
o.fs.objectMetaMu.Lock()
|
|
|
|
|
o.hashes = nil
|
|
|
|
|
o.fs.objectMetaMu.Unlock()
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-25 08:05:53 +02:00
|
|
|
|
// Stat an Object into info
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) lstat() error {
|
2017-01-29 14:43:20 +01:00
|
|
|
|
info, err := o.fs.lstat(o.path)
|
2017-06-30 11:24:06 +02:00
|
|
|
|
if err == nil {
|
|
|
|
|
o.setMetadata(info)
|
|
|
|
|
}
|
2012-12-26 13:23:58 +01:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2021-11-24 13:01:02 +01:00
|
|
|
|
o.clearHashCache()
|
2018-04-04 15:24:04 +02:00
|
|
|
|
return remove(o.path)
|
2012-12-26 13:23:58 +01:00
|
|
|
|
}
|
|
|
|
|
|
2022-05-24 19:06:16 +02:00
|
|
|
|
// Metadata returns metadata for an object
|
|
|
|
|
//
|
|
|
|
|
// It should return nil if there is no Metadata
|
|
|
|
|
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
|
|
|
|
metadata, err = o.getXattr()
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
err = o.readMetadataFromFile(&metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return metadata, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Write the metadata on the object
|
|
|
|
|
func (o *Object) writeMetadata(metadata fs.Metadata) (err error) {
|
|
|
|
|
err = o.setXattr(metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
err = o.writeMetadataToFile(metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2024-05-08 18:21:57 +02:00
|
|
|
|
// SetMetadata sets metadata for an Object
|
|
|
|
|
//
|
|
|
|
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
|
|
|
func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
|
|
|
err := o.writeMetadata(metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("SetMetadata failed on Object: %w", err)
|
|
|
|
|
}
|
|
|
|
|
// Re-read info now we have finished setting stuff
|
|
|
|
|
return o.lstat()
|
|
|
|
|
}
|
|
|
|
|
|
2020-01-14 18:33:35 +01:00
|
|
|
|
func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
2024-05-09 20:50:08 +02:00
|
|
|
|
var vol string
|
2022-10-21 14:20:24 +02:00
|
|
|
|
if runtime.GOOS == "windows" {
|
2024-05-09 20:50:08 +02:00
|
|
|
|
vol = filepath.VolumeName(s)
|
2023-11-15 21:12:45 +01:00
|
|
|
|
if vol == `\\?` && len(s) >= 6 {
|
|
|
|
|
// `\\?\C:`
|
|
|
|
|
vol = s[:6]
|
|
|
|
|
}
|
2024-05-09 20:50:08 +02:00
|
|
|
|
s = s[len(vol):]
|
|
|
|
|
}
|
|
|
|
|
// Don't use FromStandardPath. Make sure Dot (`.`, `..`) as name will not be reencoded
|
|
|
|
|
// Take care of the case Standard: ././‛. (the first dot means current directory)
|
|
|
|
|
if enc != encoder.Standard {
|
|
|
|
|
s = filepath.ToSlash(s)
|
|
|
|
|
parts := strings.Split(s, "/")
|
|
|
|
|
encoded := make([]string, len(parts))
|
|
|
|
|
changed := false
|
|
|
|
|
for i, p := range parts {
|
|
|
|
|
if (p == ".") || (p == "..") {
|
|
|
|
|
encoded[i] = p
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
part := enc.FromStandardName(p)
|
|
|
|
|
changed = changed || part != p
|
|
|
|
|
encoded[i] = part
|
|
|
|
|
}
|
|
|
|
|
if changed {
|
|
|
|
|
s = strings.Join(encoded, "/")
|
2016-01-04 12:28:47 +01:00
|
|
|
|
}
|
2024-05-09 20:50:08 +02:00
|
|
|
|
s = filepath.FromSlash(s)
|
|
|
|
|
}
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
|
s = vol + s
|
|
|
|
|
}
|
|
|
|
|
s2, err := filepath.Abs(s)
|
|
|
|
|
if err == nil {
|
|
|
|
|
s = s2
|
|
|
|
|
}
|
|
|
|
|
if !noUNC {
|
|
|
|
|
// Convert to UNC. It does nothing on non windows platforms.
|
|
|
|
|
s = file.UNCPath(s)
|
2018-11-02 13:12:51 +01:00
|
|
|
|
}
|
2015-09-11 11:37:12 +02:00
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2024-02-06 17:02:03 +01:00
|
|
|
|
// Items returns the count of items in this directory or this
|
|
|
|
|
// directory and subdirectories if known, -1 for unknown
|
|
|
|
|
func (d *Directory) Items() int64 {
|
|
|
|
|
return -1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ID returns the internal ID of this directory if known, or
|
|
|
|
|
// "" otherwise
|
|
|
|
|
func (d *Directory) ID() string {
|
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMetadata sets metadata for a Directory
|
|
|
|
|
//
|
|
|
|
|
// It should return fs.ErrorNotImplemented if it can't set metadata
|
|
|
|
|
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
|
|
|
|
err := d.writeMetadata(metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("SetMetadata failed on Directory: %w", err)
|
|
|
|
|
}
|
|
|
|
|
// Re-read info now we have finished setting stuff
|
|
|
|
|
return d.lstat()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Hash does nothing on a directory
|
|
|
|
|
//
|
|
|
|
|
// This method is implemented with the incorrect type signature to
|
|
|
|
|
// stop the Directory type asserting to fs.Object or fs.ObjectInfo
|
|
|
|
|
func (d *Directory) Hash() {
|
|
|
|
|
// Does nothing
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-26 13:23:58 +01:00
|
|
|
|
// Check the interfaces are satisfied
|
2015-11-07 12:14:46 +01:00
|
|
|
|
var (
|
2024-02-06 17:02:03 +01:00
|
|
|
|
_ fs.Fs = &Fs{}
|
|
|
|
|
_ fs.PutStreamer = &Fs{}
|
|
|
|
|
_ fs.Mover = &Fs{}
|
|
|
|
|
_ fs.DirMover = &Fs{}
|
|
|
|
|
_ fs.Commander = &Fs{}
|
|
|
|
|
_ fs.OpenWriterAter = &Fs{}
|
|
|
|
|
_ fs.DirSetModTimer = &Fs{}
|
|
|
|
|
_ fs.MkdirMetadataer = &Fs{}
|
|
|
|
|
_ fs.Object = &Object{}
|
|
|
|
|
_ fs.Metadataer = &Object{}
|
2024-05-08 18:21:57 +02:00
|
|
|
|
_ fs.SetMetadataer = &Object{}
|
2024-02-06 17:02:03 +01:00
|
|
|
|
_ fs.Directory = &Directory{}
|
|
|
|
|
_ fs.SetModTimer = &Directory{}
|
|
|
|
|
_ fs.SetMetadataer = &Directory{}
|
2015-11-07 12:14:46 +01:00
|
|
|
|
)
|