diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index f2b3a0bd2..be34b2a7f 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -27,12 +27,12 @@ jobs:
strategy:
fail-fast: false
matrix:
- job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
+ job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
include:
- job_name: linux
os: ubuntu-latest
- go: '1.21'
+ go: '>=1.22.0-rc.1'
gotags: cmount
build_flags: '-include "^linux/"'
check: true
@@ -43,14 +43,14 @@ jobs:
- job_name: linux_386
os: ubuntu-latest
- go: '1.21'
+ go: '>=1.22.0-rc.1'
goarch: 386
gotags: cmount
quicktest: true
- job_name: mac_amd64
os: macos-11
- go: '1.21'
+ go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/amd64" -cgo'
quicktest: true
@@ -59,14 +59,14 @@ jobs:
- job_name: mac_arm64
os: macos-11
- go: '1.21'
+ go: '>=1.22.0-rc.1'
gotags: 'cmount'
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
deploy: true
- job_name: windows
os: windows-latest
- go: '1.21'
+ go: '>=1.22.0-rc.1'
gotags: cmount
cgo: '0'
build_flags: '-include "^windows/"'
@@ -76,23 +76,23 @@ jobs:
- job_name: other_os
os: ubuntu-latest
- go: '1.21'
+ go: '>=1.22.0-rc.1'
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
compile_all: true
deploy: true
- - job_name: go1.19
- os: ubuntu-latest
- go: '1.19'
- quicktest: true
- racequicktest: true
-
- job_name: go1.20
os: ubuntu-latest
go: '1.20'
quicktest: true
racequicktest: true
+ - job_name: go1.21
+ os: ubuntu-latest
+ go: '1.21'
+ quicktest: true
+ racequicktest: true
+
name: ${{ matrix.job_name }}
runs-on: ${{ matrix.os }}
@@ -168,7 +168,7 @@ jobs:
env
- name: Go module cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -234,7 +234,7 @@ jobs:
uses: actions/checkout@v4
- name: Code quality test
- uses: golangci/golangci-lint-action@v3
+ uses: golangci/golangci-lint-action@v4
with:
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
version: latest
@@ -243,7 +243,7 @@ jobs:
- name: Install Go
uses: actions/setup-go@v5
with:
- go-version: '1.21'
+ go-version: '>=1.22.0-rc.1'
check-latest: true
- name: Install govulncheck
@@ -268,10 +268,10 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
- go-version: '1.21'
+ go-version: '>=1.22.0-rc.1'
- name: Go module cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
diff --git a/Dockerfile b/Dockerfile
index fa6cc94c5..82f16aad7 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,8 +1,9 @@
-FROM golang AS builder
+FROM golang:alpine AS builder
COPY . /go/src/github.com/rclone/rclone/
WORKDIR /go/src/github.com/rclone/rclone/
+RUN apk add --no-cache make bash gawk git
RUN \
CGO_ENABLED=0 \
make
diff --git a/MANUAL.html b/MANUAL.html
index d16296fa0..c53b0862a 100644
--- a/MANUAL.html
+++ b/MANUAL.html
@@ -81,7 +81,7 @@
Rclone syncs your files to cloud storage
@@ -127,6 +127,7 @@
Copy new or changed files to cloud storage
Sync (one way) to make a directory identical
+Bisync (two way) to keep two directories in sync bidirectionally
Move files to cloud storage deleting the local after verification
Check hashes and for missing/extra files
Mount your cloud storage as a network disk
@@ -139,7 +140,6 @@
1Fichier
Akamai Netstorage
Alibaba Cloud (Aliyun) Object Storage System (OSS)
-Amazon Drive
Amazon S3
Backblaze B2
Box
@@ -162,6 +162,7 @@
Hetzner Storage Box
HiDrive
HTTP
+ImageKit
Internet Archive
Jottacloud
IBM COS S3
@@ -495,7 +496,6 @@ go build
1Fichier
Akamai Netstorage
Alias
-Amazon Drive
Amazon S3
Backblaze B2
Box
@@ -610,6 +610,8 @@ destpath/sourcepath/two.txt
See the --no-traverse option for controlling whether rclone lists the destination directory or not. Supplying this option when copying a small number of files into a large destination can speed transfers up greatly.
For example, if you have many files in /path/to/src but only a few of them change every day, you can copy all the files which have changed recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
+Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the --metadata
flag.
+Note that the modification time and metadata for the root directory will not be synced. See https://github.com/rclone/rclone/issues/7652 for more info.
Note: Use the -P
/--progress
flag to view real-time transfer statistics.
Note: Use the --dry-run
or the --interactive
/-i
flag to test without copying anything.
rclone copy source:path dest:path [flags]
@@ -627,7 +629,7 @@ destpath/sourcepath/two.txt
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -641,6 +643,7 @@ destpath/sourcepath/two.txt
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -697,12 +700,50 @@ destpath/sourcepath/two.txt
It is always the contents of the directory that is synced, not the directory itself. So when source:path is a directory, it's the contents of source:path that are copied, not the directory name and contents. See extended explanation in the copy command if unsure.
If dest:path doesn't exist, it is created and the source:path contents go there.
It is not possible to sync overlapping remotes. However, you may exclude the destination from the sync with a filter rule or by putting an exclude-if-present file inside the destination directory and sync to a destination that is inside the source directory.
+Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the --metadata
flag.
+Note that the modification time and metadata for the root directory will not be synced. See https://github.com/rclone/rclone/issues/7652 for more info.
Note: Use the -P
/--progress
flag to view real-time transfer statistics
Note: Use the rclone dedupe
command to deal with "Duplicate object/directory found in source/destination - ignoring" errors. See this forum post for more info.
+Logger Flags
+The --differ
, --missing-on-dst
, --missing-on-src
, --match
and --error
flags write paths, one per line, to the file name (or stdout if it is -
) supplied. What they write is described in the help below. For example --differ
will write all paths which are present on both the source and destination but different.
+The --combined
flag will write a file (or stdout) which contains all file paths with a symbol and then a space and then the path to tell you what happened to it. These are reminiscent of diff files.
+
+= path
means path was found in source and destination and was identical
+- `- path` means path was missing on the source, so only in the destination
+- `+ path` means path was missing on the destination, so only in the source
+- `* path` means path was present in source and destination but different.
+! path
means there was an error reading or hashing the source or dest.
+
+The --dest-after
flag writes a list file using the same format flags as lsf
(including customizable options for hash, modtime, etc.) Conceptually it is similar to rsync's --itemize-changes
, but not identical -- it should output an accurate list of what will be on the destination after the sync.
+Note that these logger flags have a few limitations, and certain scenarios are not currently supported:
+
+--max-duration
/ CutoffModeHard
+--compare-dest
/ --copy-dest
+- server-side moves of an entire dir at once
+- High-level retries, because there would be duplicates (use
--retries 1
to disable)
+- Possibly some unusual error scenarios
+
+Note also that each file is logged during the sync, as opposed to after, so it is most useful as a predictor of what SHOULD happen to each file (which may or may not match what actually DID.)
rclone sync source:path dest:path [flags]
Options
- --create-empty-src-dirs Create empty source dirs on destination after sync
- -h, --help help for sync
+ --absolute Put a leading / in front of path names
+ --combined string Make a combined report of changes to this file
+ --create-empty-src-dirs Create empty source dirs on destination after sync
+ --csv Output in CSV format
+ --dest-after string Report all files that exist on the dest post-sync
+ --differ string Report all non-matching files to this file
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --error string Report all files with errors (hashing or reading) to this file
+ --files-only Only list files (default true)
+ -F, --format string Output format - see lsf help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
+ -h, --help help for sync
+ --match string Report all matching files to this file
+ --missing-on-dst string Report all files missing from the destination to this file
+ --missing-on-src string Report all files missing from the source to this file
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
Copy Options
Flags for anything which can Copy a file.
--check-first Do all the checks before starting transfers
@@ -714,7 +755,7 @@ destpath/sourcepath/two.txt
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -728,6 +769,7 @@ destpath/sourcepath/two.txt
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -742,6 +784,7 @@ destpath/sourcepath/two.txt
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -796,6 +839,8 @@ destpath/sourcepath/two.txt
Otherwise for each file in source:path
selected by the filters (if any) this will move it into dest:path
. If possible a server-side move will be used, otherwise it will copy it (server-side if possible) into dest:path
then delete the original (if no errors on copy) in source:path
.
If you want to delete empty source directories after move, use the --delete-empty-src-dirs
flag.
See the --no-traverse option for controlling whether rclone lists the destination directory or not. Supplying this option when moving a small number of files into a large destination can speed transfers up greatly.
+Rclone will sync the modification times of files and directories if the backend supports it. If metadata syncing is required then use the --metadata
flag.
+Note that the modification time and metadata for the root directory will not be synced. See https://github.com/rclone/rclone/issues/7652 for more info.
Important: Since this can cause data loss, test first with the --dry-run
or the --interactive
/-i
flag.
Note: Use the -P
/--progress
flag to view real-time transfer statistics.
rclone move source:path dest:path [flags]
@@ -814,7 +859,7 @@ destpath/sourcepath/two.txt
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -828,6 +873,7 @@ destpath/sourcepath/two.txt
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -1601,23 +1647,35 @@ rclone backend help <backendname>
Synopsis
Perform bidirectional synchronization between two paths.
Bisync provides a bidirectional cloud sync solution in rclone. It retains the Path1 and Path2 filesystem listings from the prior run. On each successive run it will: - list files on Path1 and Path2, and check for changes on each side. Changes include New
, Newer
, Older
, and Deleted
files. - Propagate changes on Path1 to Path2, and vice-versa.
+Bisync is in beta and is considered an advanced command, so use with care. Make sure you have read and understood the entire manual (especially the Limitations section) before using, or data loss can result. Questions can be asked in the Rclone Forum.
See full bisync description for details.
rclone bisync remote1:path1 remote2:path2 [flags]
Options
- --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
- --check-filename string Filename for --check-access (default: RCLONE_TEST)
- --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
- --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
- --filters-file string Read filtering patterns from a file
- --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
- -h, --help help for bisync
- --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
- --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
- -1, --resync Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.
- --workdir string Use custom working dir - useful for testing. (default: $HOME/.cache/rclone/bisync)
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
Copy Options
Flags for anything which can Copy a file.
--check-first Do all the checks before starting transfers
@@ -1629,7 +1687,7 @@ rclone backend help <backendname>
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1643,6 +1701,7 @@ rclone backend help <backendname>
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -2218,7 +2277,7 @@ if src is directory
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -2232,6 +2291,7 @@ if src is directory
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -2279,12 +2339,22 @@ if src is directory
rclone - Show help for rclone commands, flags and backends.
rclone copyurl
-Copy url content to dest.
+Copy the contents of the URL supplied content to dest:path.
Synopsis
Download a URL's content and copy it to the destination without saving it in temporary storage.
-Setting --auto-filename
will attempt to automatically determine the filename from the URL (after any redirections) and used in the destination path. With --auto-filename-header
in addition, if a specific filename is set in HTTP headers, it will be used instead of the name from the URL. With --print-filename
in addition, the resulting file name will be printed.
+Setting --auto-filename
will attempt to automatically determine the filename from the URL (after any redirections) and used in the destination path.
+With --auto-filename-header
in addition, if a specific filename is set in HTTP headers, it will be used instead of the name from the URL. With --print-filename
in addition, the resulting file name will be printed.
Setting --no-clobber
will prevent overwriting file on the destination if there is one with the same name.
Setting --stdout
or making the output file name -
will cause the output to be written to standard output.
+Troublshooting
+If you can't get rclone copyurl
to work then here are some things you can try:
+
+--disable-http2
rclone will use HTTP2 if available - try disabling it
+--bind 0.0.0.0
rclone will use IPv6 if available - try disabling it
+--bind ::0
to disable IPv4
+--user agent curl
- some sites have whitelists for curl's user-agent - try that
+- Make sure the site works with
curl
directly
+
rclone copyurl https://example.com dest:path [flags]
Options
-a, --auto-filename Get the file name from the URL and use it for destination file path
@@ -2570,11 +2640,11 @@ rclone link --expire 1d remote:path/to/file
List all the remotes in the config file and defined in environment variables.
Synopsis
rclone listremotes lists all the available remotes from the config file.
-When used with the --long
flag it lists the types too.
+When used with the --long
flag it lists the types and the descriptions too.
rclone listremotes [flags]
Options
-h, --help help for listremotes
- --long Show the type as well as names
+ --long Show the type and the description as well as names
See the global flags page for global options not listed here.
SEE ALSO
@@ -2639,6 +2709,14 @@ test.sh,449
For example, to find all the files modified within one day and copy those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
+The default time format is '2006-01-02 15:04:05'
. Other formats can be specified with the --time-format
flag. Examples:
+rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
+rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
+rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
+rclone lsf remote:path --format pt --time-format RFC3339
+rclone lsf remote:path --format pt --time-format DateOnly
+rclone lsf remote:path --format pt --time-format max
+--time-format max
will automatically truncate '2006-01-02 15:04:05.000000000
' to the maximum precision supported by the remote.
Any of the filtering options can be applied to this command.
There are several related list commands
@@ -2654,16 +2732,17 @@ rclone copy --files-from-raw new_files /path/to/local remote:path
Listing a nonexistent directory will produce an error except for remotes which can't have empty directories (e.g. s3, swift, or gcs - the bucket-based remotes).
rclone lsf remote:path [flags]
Options
- --absolute Put a leading / in front of path names
- --csv Output in CSV format
- -d, --dir-slash Append a slash to directory names (default true)
- --dirs-only Only list directories
- --files-only Only list files
- -F, --format string Output format - see help for details (default "p")
- --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
- -h, --help help for lsf
- -R, --recursive Recurse into the listing
- -s, --separator string Separator for the items in the format (default ";")
+ --absolute Put a leading / in front of path names
+ --csv Output in CSV format
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --files-only Only list files
+ -F, --format string Output format - see help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
+ -h, --help help for lsf
+ -R, --recursive Recurse into the listing
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
@@ -2857,8 +2936,11 @@ rclone mount remote:path/to/files * --volname \\cloud\remote
Note that mapping to a directory path, instead of a drive letter, does not suffer from the same limitations.
Mounting on macOS
Mounting on macOS can be done either via built-in NFS server, macFUSE (also known as osxfuse) or FUSE-T. macFUSE is a traditional FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which "mounts" via an NFSv4 local server.
-NFS mount
+Unicode Normalization
+It is highly recommended to keep the default of --no-unicode-normalization=false
for all mount
and serve
commands on macOS. For details, see vfs-case-sensitivity.
+NFS mount
This method spins up an NFS server using serve nfs command and mounts it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to send SIGTERM signal to the rclone process using |kill| command to stop the mount.
+Note that --nfs-cache-handle-limit
controls the maximum number of cached file handles stored by the nfsmount
caching handler. This should not be set too low or you may experience errors when trying to access files. The default is 1000000, but consider lowering this limit if the server's system resource usage causes problems.
macFUSE Notes
If installing macFUSE using dmg packages from the website, rclone will locate the macFUSE libraries without any further intervention. If however, macFUSE is installed using the macports package manager, the following addition steps are required.
sudo mkdir /usr/local/lib
@@ -2872,9 +2954,6 @@ sudo ln -s /opt/local/lib/libfuse.2.dylib
File access and modification times cannot be set separately as it seems to be an issue with the NFS client which always modifies both. Can be reproduced with 'touch -m' and 'touch -a' commands
This means that viewing files with various tools, notably macOS Finder, will cause rlcone to update the modification time of the file. This may make rclone upload a full new copy of the file.
-Unicode Normalization
-Rclone includes flags for unicode normalization with macFUSE that should be updated for FUSE-T. See this forum post and FUSE-T issue #16. The following flag should be added to the rclone mount
command.
--o modules=iconv,from_code=UTF-8,to_code=UTF-8
Read Only mounts
When mounting with --read-only
, attempts to write to files will fail silently as opposed to with a clear warning as in macFUSE.
Limitations
@@ -3042,6 +3121,8 @@ WantedBy=multi-user.target
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
@@ -3080,6 +3161,7 @@ WantedBy=multi-user.target
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -3092,7 +3174,7 @@ WantedBy=multi-user.target
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -3158,7 +3240,7 @@ if src is directory
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -3172,6 +3254,7 @@ if src is directory
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -3293,9 +3376,349 @@ if src is directory
- rclone - Show help for rclone commands, flags and backends.
+rclone nfsmount
+Mount the remote as file system on a mountpoint.
+Synopsis
+rclone nfsmount allows Linux, FreeBSD, macOS and Windows to mount any of Rclone's cloud storage systems as a file system with FUSE.
+First set up your remote using rclone config
. Check it works with rclone ls
etc.
+On Linux and macOS, you can run mount in either foreground or background (aka daemon) mode. Mount runs in foreground mode by default. Use the --daemon
flag to force background mode. On Windows you can run mount in foreground only, the flag is ignored.
+In background mode rclone acts as a generic Unix mount program: the main program starts, spawns background rclone process to setup and maintain the mount, waits until success or timeout and exits with appropriate code (killing the child process if it fails).
+On Linux/macOS/FreeBSD start the mount like this, where /path/to/local/mount
is an empty existing directory:
+rclone nfsmount remote:path/to/files /path/to/local/mount
+On Windows you can start a mount in different ways. See below for details. If foreground mount is used interactively from a console window, rclone will serve the mount and occupy the console so another window should be used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
+The following examples will mount to an automatically assigned drive, to specific drive letter X:
, to path C:\path\parent\mount
(where parent directory or drive must exist, and mount must not exist, and is not supported when mounting as a network drive), and the last example will mount as network share \\cloud\remote
and map it to an automatically assigned drive:
+rclone nfsmount remote:path/to/files *
+rclone nfsmount remote:path/to/files X:
+rclone nfsmount remote:path/to/files C:\path\parent\mount
+rclone nfsmount remote:path/to/files \\cloud\remote
+When the program ends while in foreground mode, either via Ctrl+C or receiving a SIGINT or SIGTERM signal, the mount should be automatically stopped.
+When running in background mode the user will have to stop the mount manually:
+# Linux
+fusermount -u /path/to/local/mount
+# OS X
+umount /path/to/local/mount
+The umount operation can fail, for example when the mountpoint is busy. When that happens, it is the user's responsibility to stop the mount manually.
+The size of the mounted file system will be set according to information retrieved from the remote, the same as returned by the rclone about command. Remotes with unlimited storage may report the used size only, then an additional 1 PiB of free space is assumed. If the remote does not support the about feature at all, then 1 PiB is set as both the total and the free size.
+Installing on Windows
+To run rclone nfsmount on Windows, you will need to download and install WinFsp.
+WinFsp is an open-source Windows File System Proxy which makes it easy to write user space file systems for Windows. It provides a FUSE emulation layer which rclone uses combination with cgofuse. Both of these packages are by Bill Zissimopoulos who was very helpful during the implementation of rclone nfsmount for Windows.
+Mounting modes on windows
+Unlike other operating systems, Microsoft Windows provides a different filesystem type for network and fixed drives. It optimises access on the assumption fixed disk drives are fast and reliable, while network drives have relatively high latency and less reliability. Some settings can also be differentiated between the two types, for example that Windows Explorer should just display icons and not create preview thumbnails for image and video files on network drives.
+In most cases, rclone will mount the remote as a normal, fixed disk drive by default. However, you can also choose to mount it as a remote network drive, often described as a network share. If you mount an rclone remote using the default, fixed drive mode and experience unexpected program errors, freezes or other issues, consider mounting as a network drive instead.
+When mounting as a fixed disk drive you can either mount to an unused drive letter, or to a path representing a nonexistent subdirectory of an existing parent directory or drive. Using the special value *
will tell rclone to automatically assign the next available drive letter, starting with Z: and moving backward. Examples:
+rclone nfsmount remote:path/to/files *
+rclone nfsmount remote:path/to/files X:
+rclone nfsmount remote:path/to/files C:\path\parent\mount
+rclone nfsmount remote:path/to/files X:
+Option --volname
can be used to set a custom volume name for the mounted file system. The default is to use the remote name and path.
+To mount as network drive, you can add option --network-mode
to your nfsmount command. Mounting to a directory path is not supported in this mode, it is a limitation Windows imposes on junctions, so the remote must always be mounted to a drive letter.
+rclone nfsmount remote:path/to/files X: --network-mode
+A volume name specified with --volname
will be used to create the network share path. A complete UNC path, such as \\cloud\remote
, optionally with path \\cloud\remote\madeup\path
, will be used as is. Any other string will be used as the share part, after a default prefix \\server\
. If no volume name is specified then \\server\share
will be used. You must make sure the volume name is unique when you are mounting more than one drive, or else the mount command will fail. The share name will treated as the volume label for the mapped drive, shown in Windows Explorer etc, while the complete \\server\share
will be reported as the remote UNC path by net use
etc, just like a normal network drive mapping.
+If you specify a full network share UNC path with --volname
, this will implicitly set the --network-mode
option, so the following two examples have same result:
+rclone nfsmount remote:path/to/files X: --network-mode
+rclone nfsmount remote:path/to/files X: --volname \\server\share
+You may also specify the network share UNC path as the mountpoint itself. Then rclone will automatically assign a drive letter, same as with *
and use that as mountpoint, and instead use the UNC path specified as the volume name, as if it were specified with the --volname
option. This will also implicitly set the --network-mode
option. This means the following two examples have same result:
+rclone nfsmount remote:path/to/files \\cloud\remote
+rclone nfsmount remote:path/to/files * --volname \\cloud\remote
+There is yet another way to enable network mode, and to set the share path, and that is to pass the "native" libfuse/WinFsp option directly: --fuse-flag --VolumePrefix=\server\share
. Note that the path must be with just a single backslash prefix in this case.
+Note: In previous versions of rclone this was the only supported method.
+Read more about drive mapping
+See also Limitations section below.
+Windows filesystem permissions
+The FUSE emulation layer on Windows must convert between the POSIX-based permission model used in FUSE, and the permission model used in Windows, based on access-control lists (ACL).
+The mounted filesystem will normally get three entries in its access-control list (ACL), representing permissions for the POSIX permission scopes: Owner, group and others. By default, the owner and group will be taken from the current user, and the built-in group "Everyone" will be used to represent others. The user/group can be customized with FUSE options "UserName" and "GroupName", e.g. -o UserName=user123 -o GroupName="Authenticated Users"
. The permissions on each entry will be set according to options --dir-perms
and --file-perms
, which takes a value in traditional Unix numeric notation.
+The default permissions corresponds to --file-perms 0666 --dir-perms 0777
, i.e. read and write permissions to everyone. This means you will not be able to start any programs from the mount. To be able to do that you must add execute permissions, e.g. --file-perms 0777 --dir-perms 0777
to add it to everyone. If the program needs to write files, chances are you will have to enable VFS File Caching as well (see also limitations). Note that the default write permission have some restrictions for accounts other than the owner, specifically it lacks the "write extended attributes", as explained next.
+The mapping of permissions is not always trivial, and the result you see in Windows Explorer may not be exactly like you expected. For example, when setting a value that includes write access for the group or others scope, this will be mapped to individual permissions "write attributes", "write data" and "append data", but not "write extended attributes". Windows will then show this as basic permission "Special" instead of "Write", because "Write" also covers the "write extended attributes" permission. When setting digit 0 for group or others, to indicate no permissions, they will still get individual permissions "read attributes", "read extended attributes" and "read permissions". This is done for compatibility reasons, e.g. to allow users without additional permissions to be able to read basic metadata about files like in Unix.
+WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity", that allows the complete specification of file security descriptors using SDDL. With this you get detailed control of the resulting permissions, compared to use of the POSIX permissions described above, and no additional permissions will be added automatically for compatibility with Unix. Some example use cases will following.
+If you set POSIX permissions for only allowing access to the owner, using --file-perms 0600 --dir-perms 0700
, the user group and the built-in "Everyone" group will still be given some special permissions, as described above. Some programs may then (incorrectly) interpret this as the file being accessible by everyone, for example an SSH client may warn about "unprotected private key file". You can work around this by specifying -o FileSecurity="D:P(A;;FA;;;OW)"
, which sets file all access (FA) to the owner (OW), and nothing else.
+When setting write permissions then, except for the owner, this does not include the "write extended attributes" permission, as mentioned above. This may prevent applications from writing to files, giving permission denied error instead. To set working write permissions for the built-in "Everyone" group, similar to what it gets by default but with the addition of the "write extended attributes", you can specify -o FileSecurity="D:P(A;;FRFW;;;WD)"
, which sets file read (FR) and file write (FW) to everyone (WD). If file execute (FX) is also needed, then change to -o FileSecurity="D:P(A;;FRFWFX;;;WD)"
, or set file all access (FA) to get full access permissions, including delete, with -o FileSecurity="D:P(A;;FA;;;WD)"
.
+Windows caveats
+Drives created as Administrator are not visible to other accounts, not even an account that was elevated to Administrator with the User Account Control (UAC) feature. A result of this is that if you mount to a drive letter from a Command Prompt run as Administrator, and then try to access the same drive from Windows Explorer (which does not run as Administrator), you will not be able to see the mounted drive.
+If you don't need to access the drive from applications running with administrative privileges, the easiest way around this is to always create the mount from a non-elevated command prompt.
+To make mapped drives available to the user account that created them regardless if elevated or not, there is a special Windows setting called linked connections that can be enabled.
+It is also possible to make a drive mount available to everyone on the system, by running the process creating it as the built-in SYSTEM account. There are several ways to do this: One is to use the command-line utility PsExec, from Microsoft's Sysinternals suite, which has option -s
to start processes as the SYSTEM account. Another alternative is to run the mount command from a Windows Scheduled Task, or a Windows Service, configured to run as the SYSTEM account. A third alternative is to use the WinFsp.Launcher infrastructure). Read more in the install documentation. Note that when running rclone as another user, it will not use the configuration file from your profile unless you tell it to with the --config
option. Note also that it is now the SYSTEM account that will have the owner permissions, and other accounts will have permissions according to the group or others scopes. As mentioned above, these will then not get the "write extended attributes" permission, and this may prevent writing to files. You can work around this with the FileSecurity option, see example above.
+Note that mapping to a directory path, instead of a drive letter, does not suffer from the same limitations.
+Mounting on macOS
+Mounting on macOS can be done either via built-in NFS server, macFUSE (also known as osxfuse) or FUSE-T. macFUSE is a traditional FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which "mounts" via an NFSv4 local server.
+Unicode Normalization
+It is highly recommended to keep the default of --no-unicode-normalization=false
for all mount
and serve
commands on macOS. For details, see vfs-case-sensitivity.
+NFS mount
+This method spins up an NFS server using serve nfs command and mounts it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to send SIGTERM signal to the rclone process using |kill| command to stop the mount.
+Note that --nfs-cache-handle-limit
controls the maximum number of cached file handles stored by the nfsmount
caching handler. This should not be set too low or you may experience errors when trying to access files. The default is 1000000, but consider lowering this limit if the server's system resource usage causes problems.
+macFUSE Notes
+If installing macFUSE using dmg packages from the website, rclone will locate the macFUSE libraries without any further intervention. If however, macFUSE is installed using the macports package manager, the following addition steps are required.
+sudo mkdir /usr/local/lib
+cd /usr/local/lib
+sudo ln -s /opt/local/lib/libfuse.2.dylib
+FUSE-T Limitations, Caveats, and Notes
+There are some limitations, caveats, and notes about how it works. These are current as of FUSE-T version 1.0.14.
+ModTime update on read
+As per the FUSE-T wiki:
+
+File access and modification times cannot be set separately as it seems to be an issue with the NFS client which always modifies both. Can be reproduced with 'touch -m' and 'touch -a' commands
+
+This means that viewing files with various tools, notably macOS Finder, will cause rlcone to update the modification time of the file. This may make rclone upload a full new copy of the file.
+Read Only mounts
+When mounting with --read-only
, attempts to write to files will fail silently as opposed to with a clear warning as in macFUSE.
+Limitations
+Without the use of --vfs-cache-mode
this can only write files sequentially, it can only seek when reading. This means that many applications won't work with their files on an rclone mount without --vfs-cache-mode writes
or --vfs-cache-mode full
. See the VFS File Caching section for more info. When using NFS mount on macOS, if you don't specify |--vfs-cache-mode| the mount point will be read-only.
+The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2) do not support the concept of empty directories, so empty directories will have a tendency to disappear once they fall out of the directory cache.
+When rclone mount
is invoked on Unix with --daemon
flag, the main rclone program will wait for the background mount to become ready or until the timeout specified by the --daemon-wait
flag. On Linux it can check mount status using ProcFS so the flag in fact sets maximum time to wait, while the real wait can be less. On macOS / BSD the time to wait is constant and the check is performed only at the end. We advise you to set wait time on macOS reasonably.
+Only supported on Linux, FreeBSD, OS X and Windows at the moment.
+rclone nfsmount vs rclone sync/copy
+File systems expect things to be 100% reliable, whereas cloud storage systems are a long way from 100% reliable. The rclone sync/copy commands cope with this with lots of retries. However rclone nfsmount can't use retries in the same way without making local copies of the uploads. Look at the VFS File Caching for solutions to make nfsmount more reliable.
+Attribute caching
+You can use the flag --attr-timeout
to set the time the kernel caches the attributes (size, modification time, etc.) for directory entries.
+The default is 1s
which caches files just long enough to avoid too many callbacks to rclone from the kernel.
+In theory 0s should be the correct value for filesystems which can change outside the control of the kernel. However this causes quite a few problems such as rclone using too much memory, rclone not serving files to samba and excessive time listing directories.
+The kernel can cache the info about a file for the time given by --attr-timeout
. You may see corruption if the remote file changes length during this window. It will show up as either a truncated file or a file with garbage on the end. With --attr-timeout 1s
this is very unlikely but not impossible. The higher you set --attr-timeout
the more likely it is. The default setting of "1s" is the lowest setting which mitigates the problems above.
+If you set it higher (10s
or 1m
say) then the kernel will call back to rclone less often making it more efficient, however there is more chance of the corruption issue above.
+If files don't change on the remote outside of the control of rclone then there is no chance of corruption.
+This is the same as setting the attr_timeout option in mount.fuse.
+Filters
+Note that all the rclone filters can be used to select a subset of the files to be visible in the mount.
+systemd
+When running rclone nfsmount as a systemd service, it is possible to use Type=notify. In this case the service will enter the started state after the mountpoint has been successfully set up. Units having the rclone nfsmount service specified as a requirement will see all files and folders immediately in this mode.
+Note that systemd runs mount units without any environment variables including PATH
or HOME
. This means that tilde (~
) expansion will not work and you should provide --config
and --cache-dir
explicitly as absolute paths via rclone arguments. Since mounting requires the fusermount
program, rclone will use the fallback PATH of /bin:/usr/bin
in this scenario. Please ensure that fusermount
is present on this PATH.
+Rclone as Unix mount helper
+The core Unix program /bin/mount
normally takes the -t FSTYPE
argument then runs the /sbin/mount.FSTYPE
helper program passing it mount options as -o key=val,...
or --opt=...
. Automount (classic or systemd) behaves in a similar way.
+rclone by default expects GNU-style flags --key val
. To run it as a mount helper you should symlink rclone binary to /sbin/mount.rclone
and optionally /usr/bin/rclonefs
, e.g. ln -s /usr/bin/rclone /sbin/mount.rclone
. rclone will detect it and translate command-line arguments appropriately.
+Now you can run classic mounts like this:
+mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
+or create systemd mount units:
+# /etc/systemd/system/mnt-data.mount
+[Unit]
+Description=Mount for /mnt/data
+[Mount]
+Type=rclone
+What=sftp1:subdir
+Where=/mnt/data
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+optionally accompanied by systemd automount unit
+# /etc/systemd/system/mnt-data.automount
+[Unit]
+Description=AutoMount for /mnt/data
+[Automount]
+Where=/mnt/data
+TimeoutIdleSec=600
+[Install]
+WantedBy=multi-user.target
+or add in /etc/fstab
a line like
+sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
+or use classic Automountd. Remember to provide explicit config=...,cache-dir=...
as a workaround for mount units being run without HOME
.
+Rclone in the mount helper mode will split -o
argument(s) by comma, replace _
by -
and prepend --
to get the command-line flags. Options containing commas or spaces can be wrapped in single or double quotes. Any inner quotes inside outer quotes of the same type should be doubled.
+Mount option syntax includes a few extra options treated specially:
+
+env.NAME=VALUE
will set an environment variable for the mount process. This helps with Automountd and Systemd.mount which don't allow setting custom environment for mount helpers. Typically you will use env.HTTPS_PROXY=proxy.host:3128
or env.HOME=/root
+command=cmount
can be used to run cmount
or any other rclone command rather than the default mount
.
+args2env
will pass mount options to the mount helper running in background via environment variables instead of command line arguments. This allows to hide secrets from such commands as ps
or pgrep
.
+vv...
will be transformed into appropriate --verbose=N
+- standard mount options like
x-systemd.automount
, _netdev
, nosuid
and alike are intended only for Automountd and ignored by rclone. ## VFS - Virtual File System
+
+This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
+Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
+The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
+VFS Directory Cache
+Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
+--dir-cache-time duration Time to cache directory entries for (default 5m0s)
+--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
+However, changes made directly on the cloud storage by the web interface or a different copy of rclone will only be picked up once the directory cache expires if the backend configured does not support polling for changes. If the backend supports polling, changes will be picked up within the polling interval.
+You can send a SIGHUP
signal to rclone for it to flush all directory caches, regardless of how old they are. Assuming only one rclone instance is running, you can reset the cache like this:
+kill -SIGHUP $(pidof rclone)
+If you configure rclone with a remote control then you can use rclone rc to flush the whole directory cache:
+rclone rc vfs/forget
+Or individual files or directories:
+rclone rc vfs/forget file=path/to/file dir=path/to/dir
+VFS File Buffering
+The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
+Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
+This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
+The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
+VFS File Caching
+These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
+For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
+Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
+--cache-dir string Directory rclone will use for caching.
+--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
+--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
+--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
+If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
+The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
+Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
+If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
+You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
+--vfs-cache-mode off
+In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
+This will mean some operations are not possible
+
+- Files can't be opened for both read AND write
+- Files opened for write can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files open for read with O_TRUNC will be opened write only
+- Files open for write only will behave as if O_TRUNC was supplied
+- Open modes O_APPEND, O_TRUNC are ignored
+- If an upload fails it can't be retried
+
+--vfs-cache-mode minimal
+This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
+These operations are not possible
+
+- Files opened for write only can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files opened for write only will ignore O_APPEND, O_TRUNC
+- If an upload fails it can't be retried
+
+--vfs-cache-mode writes
+In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
+This mode should support all normal file system operations.
+If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
+--vfs-cache-mode full
+In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
+In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
+So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
+This mode should support all normal file system operations and is otherwise identical to --vfs-cache-mode
writes.
+When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
+When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
+IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
+Fingerprinting
+Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
+
+- size
+- modification time
+- hash
+
+where available on an object.
+On some backends some of these attributes are slow to read (they take an extra API call per object, or extra work per object).
+For example hash
is slow with the local
and sftp
backends as they have to read the entire file and hash it, and modtime
is slow with the s3
, swift
, ftp
and qinqstor
backends because they need to do an extra API call to fetch it.
+If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
+If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
+Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
+VFS Chunked Reading
+When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
+These flags control the chunking:
+--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
+--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
+Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
+With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
+Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
+
+These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
+In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
+--no-checksum Don't compare checksums on up/download.
+--no-modtime Don't read/write the modification time (can speed things up).
+--no-seek Don't allow seeking in files.
+--read-only Only allow read-only access.
+Sometimes rclone is delivered reads or writes out of order. Rather than seeking rclone will wait a short time for the in sequence read or write to come in. These flags only come into effect when not using an on disk cache file.
+--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
+--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
+When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
+--transfers int Number of file transfers to run in parallel (default 4)
+VFS Case Sensitivity
+Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
+File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
+Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
+The --vfs-case-insensitive
VFS flag controls how rclone handles these two cases. If its value is "false", rclone passes file names to the remote as-is. If the flag is "true" (or appears without a value on the command line), rclone may perform a "fixup" as explained below.
+The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
+Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
+If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
+VFS Disk Options
+This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
+--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
+Alternate report of used bytes
+Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
+WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
+rclone nfsmount remote:path /path/to/mountpoint [flags]
+Options
+ --addr string IPaddress:Port or :Port to bind server to
+ --allow-non-empty Allow mounting over a non-empty directory (not supported on Windows)
+ --allow-other Allow access to other users (not supported on Windows)
+ --allow-root Allow access to root user (not supported on Windows)
+ --async-read Use asynchronous reads (not supported on Windows) (default true)
+ --attr-timeout Duration Time for which file/directory attributes are cached (default 1s)
+ --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)
+ --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s)
+ --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
+ --debug-fuse Debug the FUSE internals - needs -v
+ --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
+ --devname string Set the device name - default is remote:path
+ --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
+ --dir-perms FileMode Directory permissions (default 0777)
+ --file-perms FileMode File permissions (default 0666)
+ --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)
+ --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
+ -h, --help help for nfsmount
+ --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
+ --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
+ --no-checksum Don't compare checksums on up/download
+ --no-modtime Don't read/write the modification time (can speed things up)
+ --no-seek Don't allow seeking in files
+ --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true)
+ --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only)
+ -o, --option stringArray Option for libfuse/WinFsp (repeat if required)
+ --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
+ --read-only Only allow read-only access
+ --sudo Use sudo to run the mount command as root.
+ --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
+ --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-case-insensitive If a file name not found, find a case insensitive match
+ --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
+ --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
+ --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
+ --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
+ --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
+ --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
+ --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+ --volname string Set the volume name (supported on Windows and OSX only)
+ --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)
+Filter Options
+Flags for filtering directory listings.
+ --delete-excluded Delete files on dest excluded from sync
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --ignore-case Ignore case in filters (case insensitive)
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+See the global flags page for global options not listed here.
+SEE ALSO
+
+- rclone - Show help for rclone commands, flags and backends.
+
rclone obscure
Obscure password for use in the rclone config file.
-Synopsis
+Synopsis
In the rclone config file, human-readable passwords are obscured. Obscuring them is done by encrypting them and writing them out in base64. This is not a secure way of encrypting these passwords as rclone can decrypt them - it is to prevent "eyedropping" - namely someone seeing a password in the rclone config file by accident.
Many equally important things (like access tokens) are not obscured in the config file. However it is very hard to shoulder surf a 64 character hex token.
This command can also accept a password through STDIN instead of an argument by passing a hyphen as an argument. This will use the first line of STDIN as the password not including the trailing newline.
@@ -3303,16 +3726,16 @@ if src is directory
If there is no data on STDIN to read, rclone obscure will default to obfuscating the hyphen itself.
If you want to encrypt the config file then please use config file encryption - see rclone config for more info.
rclone obscure password [flags]
-Options
+Options
-h, --help help for obscure
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone rc
Run a command against a running rclone.
-Synopsis
+Synopsis
This runs a command against a running rclone. Use the --url
flag to specify an non default URL to connect on. This can be either a ":port" which is taken to mean "http://localhost:port" or a "host:port" which is taken to mean "http://host:port"
A username and password can be passed in with --user
and --pass
.
Note that --rc-addr
, --rc-user
, --rc-pass
will be read also for --url
, --user
, --pass
.
@@ -3331,7 +3754,7 @@ if src is directory
rclone rc --loopback operations/about fs=/
Use rclone rc
to see a list of all possible commands.
rclone rc commands parameter [flags]
-Options
+Options
-a, --arg stringArray Argument placed in the "arg" array
-h, --help help for rc
--json string Input JSON - use instead of key=value args
@@ -3342,13 +3765,13 @@ if src is directory
--url string URL to connect to rclone remote control (default "http://localhost:5572/")
--user string Username to use to rclone remote control
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone rcat
Copies standard input to file on remote.
-Synopsis
+Synopsis
rclone rcat reads from standard input (stdin) and copies it to a single remote file.
echo "hello world" | rclone rcat remote:path/to/file
ffmpeg - | rclone rcat remote:path/to/file
@@ -3358,7 +3781,7 @@ ffmpeg - | rclone rcat remote:path/to/file
--size
should be the exact size of the input stream in bytes. If the size of the stream is different in length to the --size
passed in then the transfer will likely fail.
Note that the upload cannot be retried because the data is not stored. If the backend supports multipart uploading then individual chunks can be retried. If you need to transfer a lot of data, you may be better off caching it locally and then rclone move
it to the destination which can use retries.
rclone rcat remote:path [flags]
-Options
+Options
-h, --help help for rcat
--size int File size hint to preallocate (default -1)
Important Options
@@ -3367,13 +3790,13 @@ ffmpeg - | rclone rcat remote:path/to/file
-i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more)
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone rcd
Run rclone listening to remote control commands only.
-Synopsis
+Synopsis
This runs rclone so that it only listens to remote control commands.
This is useful if you are controlling rclone via the rc API.
If you pass in a path to a directory, rclone will serve that directory for GET requests on the URL passed in. It will also open the URL in the browser when rclone is run.
@@ -3514,7 +3937,7 @@ htpasswd -B htpasswd anotherUser
Use --rc-realm
to set the authentication realm.
Use --rc-salt
to change the password hashing salt from the default.
rclone rcd <path to files to serve>* [flags]
-Options
+Options
-h, --help help for rcd
RC Options
Flags to control the Remote Control API.
@@ -3547,20 +3970,20 @@ htpasswd -B htpasswd anotherUser
--rc-web-gui-no-open-browser Don't open the browser automatically
--rc-web-gui-update Check and update to latest version of web gui
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone rmdirs
Remove empty directories under the path.
-Synopsis
+Synopsis
This recursively removes any empty directories (including directories that only contain empty directories), that it finds under the path. The root path itself will also be removed if it is empty, unless you supply the --leave-root
flag.
Use command rmdir to delete just the empty directory given by path, not recurse.
This is useful for tidying up remotes that rclone has left a lot of empty directories in. For example the delete command will delete files but leave the directory structure (unless used with option --rmdirs
).
This will delete --checkers
directories concurrently so if you have thousands of empty directories consider increasing this number.
To delete a path and any objects in it, use the purge command.
rclone rmdirs remote:path [flags]
-Options
+Options
-h, --help help for rmdirs
--leave-root Do not remove root directory if empty
Important Options
@@ -3569,13 +3992,13 @@ htpasswd -B htpasswd anotherUser
-i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more)
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone selfupdate
Update the rclone binary.
-Synopsis
+Synopsis
This command downloads the latest release of rclone and replaces the currently running binary. The download is verified with a hashsum and cryptographically signed signature; see the release signing docs for details.
If used without flags (or with implied --stable
flag), this command will install the latest stable release. However, some issues may be fixed (or features added) only in the latest beta release. In such cases you should run the command with the --beta
flag, i.e. rclone selfupdate --beta
. You can check in advance what version would be installed by adding the --check
flag, then repeat the command without it when you are satisfied.
Sometimes the rclone team may recommend you a concrete beta or stable rclone release to troubleshoot your issue or add a bleeding edge feature. The --version VER
flag, if given, will update to the concrete version instead of the latest one. If you omit micro version from VER
(for example 1.53
), the latest matching micro version will be used.
@@ -3585,7 +4008,7 @@ htpasswd -B htpasswd anotherUser
Note: Windows forbids deletion of a currently running executable so this command will rename the old executable to 'rclone.old.exe' upon success.
Please note that this command was not available before rclone version 1.55. If it fails for you with the message unknown command "selfupdate"
then you will need to update manually following the install instructions located at https://rclone.org/install/
rclone selfupdate [flags]
-Options
+Options
--beta Install beta release
--check Check for latest release, do not download
-h, --help help for selfupdate
@@ -3594,21 +4017,21 @@ htpasswd -B htpasswd anotherUser
--stable Install stable release (this is the default)
--version string Install the given rclone version (default: latest)
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone serve
Serve a remote over a protocol.
-Synopsis
+Synopsis
Serve a remote over a given protocol. Requires the use of a subcommand to specify the protocol, e.g.
rclone serve http remote:
Each subcommand has its own options which you can see in their help.
rclone serve <protocol> [opts] <remote> [flags]
-Options
+Options
-h, --help help for serve
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
- rclone serve dlna - Serve remote:path over DLNA
@@ -3623,7 +4046,7 @@ htpasswd -B htpasswd anotherUser
rclone serve dlna
Serve remote:path over DLNA
-Synopsis
+Synopsis
Run a DLNA media server for media stored in an rclone remote. Many devices, such as the Xbox and PlayStation, can automatically discover this server in the LAN and play audio/video from it. VLC is also supported. Service discovery uses UDP multicast packets (SSDP) and will thus only work on LANs.
Rclone will list all files present in the remote, without filtering based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly.
Server options
@@ -3633,196 +4056,6 @@ htpasswd -B htpasswd anotherUser
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
-VFS Directory Cache
-Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
---dir-cache-time duration Time to cache directory entries for (default 5m0s)
---poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
-However, changes made directly on the cloud storage by the web interface or a different copy of rclone will only be picked up once the directory cache expires if the backend configured does not support polling for changes. If the backend supports polling, changes will be picked up within the polling interval.
-You can send a SIGHUP
signal to rclone for it to flush all directory caches, regardless of how old they are. Assuming only one rclone instance is running, you can reset the cache like this:
-kill -SIGHUP $(pidof rclone)
-If you configure rclone with a remote control then you can use rclone rc to flush the whole directory cache:
-rclone rc vfs/forget
-Or individual files or directories:
-rclone rc vfs/forget file=path/to/file dir=path/to/dir
-VFS File Buffering
-The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
-Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
-This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
-The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
-VFS File Caching
-These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
-For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
-Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
---cache-dir string Directory rclone will use for caching.
---vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
---vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
---vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
---vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
---vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
-If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
-The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
-Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
-The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
-You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
---vfs-cache-mode off
-In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
-This will mean some operations are not possible
-
-- Files can't be opened for both read AND write
-- Files opened for write can't be seeked
-- Existing files opened for write must have O_TRUNC set
-- Files open for read with O_TRUNC will be opened write only
-- Files open for write only will behave as if O_TRUNC was supplied
-- Open modes O_APPEND, O_TRUNC are ignored
-- If an upload fails it can't be retried
-
---vfs-cache-mode minimal
-This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
-These operations are not possible
-
-- Files opened for write only can't be seeked
-- Existing files opened for write must have O_TRUNC set
-- Files opened for write only will ignore O_APPEND, O_TRUNC
-- If an upload fails it can't be retried
-
---vfs-cache-mode writes
-In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
-This mode should support all normal file system operations.
-If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
---vfs-cache-mode full
-In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
-In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
-So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
-This mode should support all normal file system operations and is otherwise identical to --vfs-cache-mode
writes.
-When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
-When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
-IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
-Fingerprinting
-Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
-
-- size
-- modification time
-- hash
-
-where available on an object.
-On some backends some of these attributes are slow to read (they take an extra API call per object, or extra work per object).
-For example hash
is slow with the local
and sftp
backends as they have to read the entire file and hash it, and modtime
is slow with the s3
, swift
, ftp
and qinqstor
backends because they need to do an extra API call to fetch it.
-If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
-If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
-Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
-VFS Chunked Reading
-When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
-These flags control the chunking:
---vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
---vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
-Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
-With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
-Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
-
-These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
-In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
---no-checksum Don't compare checksums on up/download.
---no-modtime Don't read/write the modification time (can speed things up).
---no-seek Don't allow seeking in files.
---read-only Only allow read-only access.
-Sometimes rclone is delivered reads or writes out of order. Rather than seeking rclone will wait a short time for the in sequence read or write to come in. These flags only come into effect when not using an on disk cache file.
---vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
---vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
-When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
---transfers int Number of file transfers to run in parallel (default 4)
-VFS Case Sensitivity
-Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
-File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
-Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
-The --vfs-case-insensitive
VFS flag controls how rclone handles these two cases. If its value is "false", rclone passes file names to the remote as-is. If the flag is "true" (or appears without a value on the command line), rclone may perform a "fixup" as explained below.
-The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
-Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
-If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
-VFS Disk Options
-This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
---vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
-Alternate report of used bytes
-Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
-WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-rclone serve dlna remote:path [flags]
-Options
- --addr string The ip:port or :port to bind the DLNA http server to (default ":7879")
- --announce-interval Duration The interval between SSDP announcements (default 12m0s)
- --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
- --dir-perms FileMode Directory permissions (default 0777)
- --file-perms FileMode File permissions (default 0666)
- --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for dlna
- --interface stringArray The interface to use for SSDP (repeat as necessary)
- --log-trace Enable trace logging of SOAP traffic
- --name string Name of DLNA server
- --no-checksum Don't compare checksums on up/download
- --no-modtime Don't read/write the modification time (can speed things up)
- --no-seek Don't allow seeking in files
- --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
- --read-only Only allow read-only access
- --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
- --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
- --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
- --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
- --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
- --vfs-case-insensitive If a file name not found, find a case insensitive match
- --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
- --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
- --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
- --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
- --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
- --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
- --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
- --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
- --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
-Filter Options
-Flags for filtering directory listings.
- --delete-excluded Delete files on dest excluded from sync
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --ignore-case Ignore case in filters (case insensitive)
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
-See the global flags page for global options not listed here.
-SEE ALSO
-
-rclone serve docker
-Serve any remote on docker's volume plugin API.
-Synopsis
-This command implements the Docker volume plugin API allowing docker to use rclone as a data storage mechanism for various cloud providers. rclone provides docker volume plugin based on it.
-To create a docker plugin, one must create a Unix or TCP socket that Docker will look for when you use the plugin and then it listens for commands from docker daemon and runs the corresponding code when necessary. Docker plugins can run as a managed plugin under control of the docker daemon or as an independent native service. For testing, you can just run it directly from the command line, for example:
-sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
-Running rclone serve docker
will create the said socket, listening for commands from Docker to create the necessary Volumes. Normally you need not give the --socket-addr
flag. The API will listen on the unix domain socket at /run/docker/plugins/rclone.sock
. In the example above rclone will create a TCP socket and a small file /etc/docker/plugins/rclone.spec
containing the socket address. We use sudo
because both paths are writeable only by the root user.
-If you later decide to change listening socket, the docker daemon must be restarted to reconnect to /run/docker/plugins/rclone.sock
or parse new /etc/docker/plugins/rclone.spec
. Until you restart, any volume related docker commands will timeout trying to access the old socket. Running directly is supported on Linux only, not on Windows or MacOS. This is not a problem with managed plugin mode described in details in the full documentation.
-The command will create volume mounts under the path given by --base-dir
(by default /var/lib/docker-volumes/rclone
available only to root) and maintain the JSON formatted file docker-plugin.state
in the rclone cache directory with book-keeping records of created and mounted volumes.
-All mount and VFS options are submitted by the docker daemon via API, but you can also provide defaults on the command line as well as set path to the config file and cache directory or adjust logging verbosity. ## VFS - Virtual File System
-This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
-Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
-The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
VFS Directory Cache
Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
@@ -3930,49 +4163,34 @@ htpasswd -B htpasswd anotherUser
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-rclone serve docker [flags]
+rclone serve dlna remote:path [flags]
Options
- --allow-non-empty Allow mounting over a non-empty directory (not supported on Windows)
- --allow-other Allow access to other users (not supported on Windows)
- --allow-root Allow access to root user (not supported on Windows)
- --async-read Use asynchronous reads (not supported on Windows) (default true)
- --attr-timeout Duration Time for which file/directory attributes are cached (default 1s)
- --base-dir string Base directory for volumes (default "/var/lib/docker-volumes/rclone")
- --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)
- --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s)
- --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
- --debug-fuse Debug the FUSE internals - needs -v
- --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
- --devname string Set the device name - default is remote:path
+ --addr string The ip:port or :port to bind the DLNA http server to (default ":7879")
+ --announce-interval Duration The interval between SSDP announcements (default 12m0s)
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
--file-perms FileMode File permissions (default 0666)
- --forget-state Skip restoring previous state
- --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for docker
- --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
- --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
- --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
+ -h, --help help for dlna
+ --interface stringArray The interface to use for SSDP (repeat as necessary)
+ --log-trace Enable trace logging of SOAP traffic
+ --name string Name of DLNA server
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
- --no-spec Do not write spec file
- --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true)
- --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only)
- -o, --option stringArray Option for libfuse/WinFsp (repeat if required)
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
--read-only Only allow read-only access
- --socket-addr string Address <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)
- --socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -3985,12 +4203,10 @@ htpasswd -B htpasswd anotherUser
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
- --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
- --volname string Set the volume name (supported on Windows and OSX only)
- --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
@@ -4020,16 +4236,16 @@ htpasswd -B htpasswd anotherUser
-rclone serve ftp
-Serve remote:path over FTP.
+rclone serve docker
+Serve any remote on docker's volume plugin API.
Synopsis
-Run a basic FTP server to serve a remote over FTP protocol. This can be viewed with a FTP client or you can make a remote of type FTP to read and write it.
-Server options
-Use --addr to specify which IP address and port the server should listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
-If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
-Authentication
-By default this will serve files without needing a login.
-You can set a single username and password with the --user and --pass flags. ## VFS - Virtual File System
+This command implements the Docker volume plugin API allowing docker to use rclone as a data storage mechanism for various cloud providers. rclone provides docker volume plugin based on it.
+To create a docker plugin, one must create a Unix or TCP socket that Docker will look for when you use the plugin and then it listens for commands from docker daemon and runs the corresponding code when necessary. Docker plugins can run as a managed plugin under control of the docker daemon or as an independent native service. For testing, you can just run it directly from the command line, for example:
+sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
+Running rclone serve docker
will create the said socket, listening for commands from Docker to create the necessary Volumes. Normally you need not give the --socket-addr
flag. The API will listen on the unix domain socket at /run/docker/plugins/rclone.sock
. In the example above rclone will create a TCP socket and a small file /etc/docker/plugins/rclone.spec
containing the socket address. We use sudo
because both paths are writeable only by the root user.
+If you later decide to change listening socket, the docker daemon must be restarted to reconnect to /run/docker/plugins/rclone.sock
or parse new /etc/docker/plugins/rclone.spec
. Until you restart, any volume related docker commands will timeout trying to access the old socket. Running directly is supported on Linux only, not on Windows or MacOS. This is not a problem with managed plugin mode described in details in the full documentation.
+The command will create volume mounts under the path given by --base-dir
(by default /var/lib/docker-volumes/rclone
available only to root) and maintain the JSON formatted file docker-plugin.state
in the rclone cache directory with book-keeping records of created and mounted volumes.
+All mount and VFS options are submitted by the docker daemon via API, but you can also provide defaults on the command line as well as set path to the config file and cache directory or adjust logging verbosity. ## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
@@ -4140,64 +4356,52 @@ htpasswd -B htpasswd anotherUser
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-Auth Proxy
-If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
-PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
-There is an example program bin/test_proxy.py in the rclone source code.
-The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
-This config generated must have this extra parameter - _root
- root to use for the backend
-And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
-If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "pass": "mypassword"
-}
-If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
-}
-And as an example return this on STDOUT
-{
- "type": "sftp",
- "_root": "",
- "_obscure": "pass",
- "user": "me",
- "pass": "mypassword",
- "host": "sftp.example.com"
-}
-This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
-The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
-Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
-This can be used to build general purpose proxies to any kind of backend that rclone supports.
-rclone serve ftp remote:path [flags]
+rclone serve docker [flags]
Options
- --addr string IPaddress:Port or :Port to bind server to (default "localhost:2121")
- --auth-proxy string A program to use to create the backend from the auth
- --cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --allow-non-empty Allow mounting over a non-empty directory (not supported on Windows)
+ --allow-other Allow access to other users (not supported on Windows)
+ --allow-root Allow access to root user (not supported on Windows)
+ --async-read Use asynchronous reads (not supported on Windows) (default true)
+ --attr-timeout Duration Time for which file/directory attributes are cached (default 1s)
+ --base-dir string Base directory for volumes (default "/var/lib/docker-volumes/rclone")
+ --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)
+ --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s)
+ --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
+ --debug-fuse Debug the FUSE internals - needs -v
+ --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
+ --devname string Set the device name - default is remote:path
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
--file-perms FileMode File permissions (default 0666)
+ --forget-state Skip restoring previous state
+ --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for ftp
- --key string TLS PEM Private key
+ -h, --help help for docker
+ --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
+ --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
- --pass string Password for authentication (empty value allow every password)
- --passive-port string Passive port range to use (default "30000-32000")
+ --no-spec Do not write spec file
+ --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true)
+ --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only)
+ -o, --option stringArray Option for libfuse/WinFsp (repeat if required)
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
- --public-ip string Public IP address to advertise for passive connections
--read-only Only allow read-only access
+ --socket-addr string Address <host:port> or absolute path (default: /run/docker/plugins/rclone.sock)
+ --socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --user string User name for authentication (default "anonymous")
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -4210,10 +4414,12 @@ htpasswd -B htpasswd anotherUser
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
- --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+ --volname string Set the volume name (supported on Windows and OSX only)
+ --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)
Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
@@ -4243,9 +4449,235 @@ htpasswd -B htpasswd anotherUser
+rclone serve ftp
+Serve remote:path over FTP.
+Synopsis
+Run a basic FTP server to serve a remote over FTP protocol. This can be viewed with a FTP client or you can make a remote of type FTP to read and write it.
+Server options
+Use --addr to specify which IP address and port the server should listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
+If you set --addr to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
+Authentication
+By default this will serve files without needing a login.
+You can set a single username and password with the --user and --pass flags. ## VFS - Virtual File System
+This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
+Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
+The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
+VFS Directory Cache
+Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
+--dir-cache-time duration Time to cache directory entries for (default 5m0s)
+--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
+However, changes made directly on the cloud storage by the web interface or a different copy of rclone will only be picked up once the directory cache expires if the backend configured does not support polling for changes. If the backend supports polling, changes will be picked up within the polling interval.
+You can send a SIGHUP
signal to rclone for it to flush all directory caches, regardless of how old they are. Assuming only one rclone instance is running, you can reset the cache like this:
+kill -SIGHUP $(pidof rclone)
+If you configure rclone with a remote control then you can use rclone rc to flush the whole directory cache:
+rclone rc vfs/forget
+Or individual files or directories:
+rclone rc vfs/forget file=path/to/file dir=path/to/dir
+VFS File Buffering
+The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
+Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
+This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
+The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
+VFS File Caching
+These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
+For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
+Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
+--cache-dir string Directory rclone will use for caching.
+--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
+--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
+--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
+If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
+The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
+Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
+If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
+You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
+--vfs-cache-mode off
+In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
+This will mean some operations are not possible
+
+- Files can't be opened for both read AND write
+- Files opened for write can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files open for read with O_TRUNC will be opened write only
+- Files open for write only will behave as if O_TRUNC was supplied
+- Open modes O_APPEND, O_TRUNC are ignored
+- If an upload fails it can't be retried
+
+--vfs-cache-mode minimal
+This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
+These operations are not possible
+
+- Files opened for write only can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files opened for write only will ignore O_APPEND, O_TRUNC
+- If an upload fails it can't be retried
+
+--vfs-cache-mode writes
+In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
+This mode should support all normal file system operations.
+If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
+--vfs-cache-mode full
+In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
+In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
+So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
+This mode should support all normal file system operations and is otherwise identical to --vfs-cache-mode
writes.
+When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
+When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
+IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
+Fingerprinting
+Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
+
+- size
+- modification time
+- hash
+
+where available on an object.
+On some backends some of these attributes are slow to read (they take an extra API call per object, or extra work per object).
+For example hash
is slow with the local
and sftp
backends as they have to read the entire file and hash it, and modtime
is slow with the s3
, swift
, ftp
and qinqstor
backends because they need to do an extra API call to fetch it.
+If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
+If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
+Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
+VFS Chunked Reading
+When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
+These flags control the chunking:
+--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
+--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
+Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
+With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
+Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
+
+These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
+In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
+--no-checksum Don't compare checksums on up/download.
+--no-modtime Don't read/write the modification time (can speed things up).
+--no-seek Don't allow seeking in files.
+--read-only Only allow read-only access.
+Sometimes rclone is delivered reads or writes out of order. Rather than seeking rclone will wait a short time for the in sequence read or write to come in. These flags only come into effect when not using an on disk cache file.
+--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
+--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
+When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
+--transfers int Number of file transfers to run in parallel (default 4)
+VFS Case Sensitivity
+Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
+File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
+Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
+The --vfs-case-insensitive
VFS flag controls how rclone handles these two cases. If its value is "false", rclone passes file names to the remote as-is. If the flag is "true" (or appears without a value on the command line), rclone may perform a "fixup" as explained below.
+The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
+Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
+If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
+VFS Disk Options
+This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
+--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
+Alternate report of used bytes
+Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
+WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
+Auth Proxy
+If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
+PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
+There is an example program bin/test_proxy.py in the rclone source code.
+The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
+This config generated must have this extra parameter - _root
- root to use for the backend
+And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
+If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "pass": "mypassword"
+}
+If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
+}
+And as an example return this on STDOUT
+{
+ "type": "sftp",
+ "_root": "",
+ "_obscure": "pass",
+ "user": "me",
+ "pass": "mypassword",
+ "host": "sftp.example.com"
+}
+This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
+The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
+Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
+This can be used to build general purpose proxies to any kind of backend that rclone supports.
+rclone serve ftp remote:path [flags]
+Options
+ --addr string IPaddress:Port or :Port to bind server to (default "localhost:2121")
+ --auth-proxy string A program to use to create the backend from the auth
+ --cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
+ --dir-perms FileMode Directory permissions (default 0777)
+ --file-perms FileMode File permissions (default 0666)
+ --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
+ -h, --help help for ftp
+ --key string TLS PEM Private key
+ --no-checksum Don't compare checksums on up/download
+ --no-modtime Don't read/write the modification time (can speed things up)
+ --no-seek Don't allow seeking in files
+ --pass string Password for authentication (empty value allow every password)
+ --passive-port string Passive port range to use (default "30000-32000")
+ --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
+ --public-ip string Public IP address to advertise for passive connections
+ --read-only Only allow read-only access
+ --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
+ --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --user string User name for authentication (default "anonymous")
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-case-insensitive If a file name not found, find a case insensitive match
+ --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
+ --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
+ --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
+ --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
+ --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
+ --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
+ --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+Filter Options
+Flags for filtering directory listings.
+ --delete-excluded Delete files on dest excluded from sync
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --ignore-case Ignore case in filters (case insensitive)
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+See the global flags page for global options not listed here.
+SEE ALSO
+
rclone serve http
Serve the remote over HTTP.
-Synopsis
+Synopsis
Run a basic web server to serve a remote over HTTP. This can be viewed in a web browser or you can make a remote of type http read from it.
You can use the filter flags (e.g. --include
, --exclude
) to control what is served.
The server will log errors. Use -v
to see access logs.
@@ -4388,243 +4820,6 @@ htpasswd -B htpasswd anotherUser
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
-VFS Directory Cache
-Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
---dir-cache-time duration Time to cache directory entries for (default 5m0s)
---poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
-However, changes made directly on the cloud storage by the web interface or a different copy of rclone will only be picked up once the directory cache expires if the backend configured does not support polling for changes. If the backend supports polling, changes will be picked up within the polling interval.
-You can send a SIGHUP
signal to rclone for it to flush all directory caches, regardless of how old they are. Assuming only one rclone instance is running, you can reset the cache like this:
-kill -SIGHUP $(pidof rclone)
-If you configure rclone with a remote control then you can use rclone rc to flush the whole directory cache:
-rclone rc vfs/forget
-Or individual files or directories:
-rclone rc vfs/forget file=path/to/file dir=path/to/dir
-VFS File Buffering
-The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
-Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
-This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
-The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
-VFS File Caching
-These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
-For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
-Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
---cache-dir string Directory rclone will use for caching.
---vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
---vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
---vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
---vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
---vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
---vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
-If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
-The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
-Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
-If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
-The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
-You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
---vfs-cache-mode off
-In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
-This will mean some operations are not possible
-
-- Files can't be opened for both read AND write
-- Files opened for write can't be seeked
-- Existing files opened for write must have O_TRUNC set
-- Files open for read with O_TRUNC will be opened write only
-- Files open for write only will behave as if O_TRUNC was supplied
-- Open modes O_APPEND, O_TRUNC are ignored
-- If an upload fails it can't be retried
-
---vfs-cache-mode minimal
-This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
-These operations are not possible
-
-- Files opened for write only can't be seeked
-- Existing files opened for write must have O_TRUNC set
-- Files opened for write only will ignore O_APPEND, O_TRUNC
-- If an upload fails it can't be retried
-
---vfs-cache-mode writes
-In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
-This mode should support all normal file system operations.
-If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
---vfs-cache-mode full
-In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
-In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
-So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
-This mode should support all normal file system operations and is otherwise identical to --vfs-cache-mode
writes.
-When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
-When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
-IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
-Fingerprinting
-Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
-
-- size
-- modification time
-- hash
-
-where available on an object.
-On some backends some of these attributes are slow to read (they take an extra API call per object, or extra work per object).
-For example hash
is slow with the local
and sftp
backends as they have to read the entire file and hash it, and modtime
is slow with the s3
, swift
, ftp
and qinqstor
backends because they need to do an extra API call to fetch it.
-If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
-If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
-Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
-VFS Chunked Reading
-When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
-These flags control the chunking:
---vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
---vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
-Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
-With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
-Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
-
-These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
-In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
---no-checksum Don't compare checksums on up/download.
---no-modtime Don't read/write the modification time (can speed things up).
---no-seek Don't allow seeking in files.
---read-only Only allow read-only access.
-Sometimes rclone is delivered reads or writes out of order. Rather than seeking rclone will wait a short time for the in sequence read or write to come in. These flags only come into effect when not using an on disk cache file.
---vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
---vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
-When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
---transfers int Number of file transfers to run in parallel (default 4)
-VFS Case Sensitivity
-Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
-File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
-Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
-The --vfs-case-insensitive
VFS flag controls how rclone handles these two cases. If its value is "false", rclone passes file names to the remote as-is. If the flag is "true" (or appears without a value on the command line), rclone may perform a "fixup" as explained below.
-The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
-Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
-If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
-VFS Disk Options
-This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
---vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
-Alternate report of used bytes
-Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
-WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-Auth Proxy
-If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
-PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
-There is an example program bin/test_proxy.py in the rclone source code.
-The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
-This config generated must have this extra parameter - _root
- root to use for the backend
-And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
-If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "pass": "mypassword"
-}
-If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
-}
-And as an example return this on STDOUT
-{
- "type": "sftp",
- "_root": "",
- "_obscure": "pass",
- "user": "me",
- "pass": "mypassword",
- "host": "sftp.example.com"
-}
-This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
-The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
-Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
-This can be used to build general purpose proxies to any kind of backend that rclone supports.
-rclone serve http remote:path [flags]
-Options
- --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
- --allow-origin string Origin which cross-domain request (CORS) can be executed from
- --auth-proxy string A program to use to create the backend from the auth
- --baseurl string Prefix for URLs - leave blank for root
- --cert string TLS PEM key (concatenation of certificate and CA certificate)
- --client-ca string Client certificate authority to verify clients with
- --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
- --dir-perms FileMode Directory permissions (default 0777)
- --file-perms FileMode File permissions (default 0666)
- --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for http
- --htpasswd string A htpasswd file - if not provided no authentication is done
- --key string TLS PEM Private key
- --max-header-bytes int Maximum size of request header (default 4096)
- --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --no-checksum Don't compare checksums on up/download
- --no-modtime Don't read/write the modification time (can speed things up)
- --no-seek Don't allow seeking in files
- --pass string Password for authentication
- --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
- --read-only Only allow read-only access
- --realm string Realm for authentication
- --salt string Password hashing salt (default "dlPL2MqE")
- --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --template string User-specified template
- --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
- --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --user string User name for authentication
- --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
- --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
- --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
- --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
- --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
- --vfs-case-insensitive If a file name not found, find a case insensitive match
- --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
- --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
- --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
- --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
- --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
- --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
- --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
- --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
- --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
-Filter Options
-Flags for filtering directory listings.
- --delete-excluded Delete files on dest excluded from sync
- --exclude stringArray Exclude files matching pattern
- --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
- --exclude-if-present stringArray Exclude directories if filename is present
- --files-from stringArray Read list of source-file names from file (use - to read from stdin)
- --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
- -f, --filter stringArray Add a file filtering rule
- --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
- --ignore-case Ignore case in filters (case insensitive)
- --include stringArray Include files matching pattern
- --include-from stringArray Read file include patterns from file (use - to read from stdin)
- --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --max-depth int If set limits the recursion depth to this (default -1)
- --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
- --metadata-exclude stringArray Exclude metadatas matching pattern
- --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
- --metadata-filter stringArray Add a metadata filtering rule
- --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
- --metadata-include stringArray Include metadatas matching pattern
- --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
- --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
- --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
-See the global flags page for global options not listed here.
-SEE ALSO
-
-rclone serve nfs
-Serve the remote as an NFS mount
-Synopsis
-Create an NFS server that serves the given remote over the network.
-The primary purpose for this command is to enable mount command on recent macOS versions where installing FUSE is very cumbersome.
-Since this is running on NFSv3, no authentication method is available. Any client will be able to access the data. To limit access, you can use serve NFS on loopback address and rely on secure tunnels (such as SSH). For this reason, by default, a random TCP port is chosen and loopback interface is used for the listening address; meaning that it is only available to the local machine. If you want other machines to access the NFS mount over local network, you need to specify the listening address and port using --addr
flag.
-Modifying files through NFS protocol requires VFS caching. Usually you will need to specify --vfs-cache-mode
in order to be able to write to the mountpoint (full is recommended). If you don't specify VFS cache mode, the mount will be read-only.
-To serve NFS over the network use following command:
-rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full
-We specify a specific port that we can use in the mount command:
-To mount the server under Linux/macOS, use the following command:
-mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint
-Where $PORT
is the same port number we used in the serve nfs command.
-This feature is only available on Unix platforms.
-VFS - Virtual File System
-This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
-Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
-The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
VFS Directory Cache
Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
@@ -4732,27 +4927,76 @@ htpasswd -B htpasswd anotherUser
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-rclone serve nfs remote:path [flags]
+Auth Proxy
+If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
+PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
+There is an example program bin/test_proxy.py in the rclone source code.
+The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
+This config generated must have this extra parameter - _root
- root to use for the backend
+And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
+If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "pass": "mypassword"
+}
+If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
+}
+And as an example return this on STDOUT
+{
+ "type": "sftp",
+ "_root": "",
+ "_obscure": "pass",
+ "user": "me",
+ "pass": "mypassword",
+ "host": "sftp.example.com"
+}
+This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
+The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
+Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
+This can be used to build general purpose proxies to any kind of backend that rclone supports.
+rclone serve http remote:path [flags]
Options
- --addr string IPaddress:Port or :Port to bind server to
+ --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
+ --allow-origin string Origin which cross-domain request (CORS) can be executed from
+ --auth-proxy string A program to use to create the backend from the auth
+ --baseurl string Prefix for URLs - leave blank for root
+ --cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --client-ca string Client certificate authority to verify clients with
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
--file-perms FileMode File permissions (default 0666)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for nfs
+ -h, --help help for http
+ --htpasswd string A htpasswd file - if not provided no authentication is done
+ --key string TLS PEM Private key
+ --max-header-bytes int Maximum size of request header (default 4096)
+ --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
+ --pass string Password for authentication
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
--read-only Only allow read-only access
+ --realm string Realm for authentication
+ --salt string Password hashing salt (default "dlPL2MqE")
+ --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --template string User-specified template
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -4765,7 +5009,7 @@ htpasswd -B htpasswd anotherUser
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -4798,170 +5042,21 @@ htpasswd -B htpasswd anotherUser
-rclone serve restic
-Serve the remote for restic's REST API.
+rclone serve nfs
+Serve the remote as an NFS mount
Synopsis
-Run a basic web server to serve a remote over restic's REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly.
-Restic is a command-line program for doing backups.
-The server will log errors. Use -v to see access logs.
---bwlimit
will be respected for file transfers. Use --stats
to control the stats printing.
-Setting up rclone for use by restic
-First set up a remote for your chosen cloud provider.
-Once you have set up the remote, check it is working with, for example "rclone lsd remote:". You may have called the remote something other than "remote:" - just substitute whatever you called it in the following instructions.
-Now start the rclone restic server
-rclone serve restic -v remote:backup
-Where you can replace "backup" in the above by whatever path in the remote you wish to use.
-By default this will serve on "localhost:8080" you can change this with use of the --addr
flag.
-You might wish to start this server on boot.
-Adding --cache-objects=false
will cause rclone to stop caching objects returned from the List call. Caching is normally desirable as it speeds up downloading objects, saves transactions and uses very little memory.
-Setting up restic to use rclone
-Now you can follow the restic instructions on setting up restic.
-Note that you will need restic 0.8.2 or later to interoperate with rclone.
-For the example above you will want to use "http://localhost:8080/" as the URL for the REST server.
-For example:
-$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
-$ export RESTIC_PASSWORD=yourpassword
-$ restic init
-created restic backend 8b1a4b56ae at rest:http://localhost:8080/
-
-Please note that knowledge of your password is required to access
-the repository. Losing your password means that your data is
-irrecoverably lost.
-$ restic backup /path/to/files/to/backup
-scan [/path/to/files/to/backup]
-scanned 189 directories, 312 files in 0:00
-[0:00] 100.00% 38.128 MiB / 38.128 MiB 501 / 501 items 0 errors ETA 0:00
-duration: 0:00
-snapshot 45c8fdd8 saved
-Multiple repositories
-Note that you can use the endpoint to host multiple repositories. Do this by adding a directory name or path after the URL. Note that these must end with /. Eg
-$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
-# backup user1 stuff
-$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
-# backup user2 stuff
-Private repositories
-The--private-repos
flag can be used to limit users to repositories starting with a path of /<username>/
.
-Server options
-Use --addr
to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000
or --addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
-If you set --addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
-You can use a unix socket by setting the url to unix:///path/to/socket
or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.
---addr
may be repeated to listen on multiple IPs/ports/sockets.
---server-read-timeout
and --server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
---max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
---baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl
, so --baseurl "rclone"
, --baseurl "/rclone"
and --baseurl "/rclone/"
are all treated identically.
-TLS (SSL)
-By default this will serve over http. If you want you can serve over https. You will need to supply the --cert
and --key
flags. If you wish to do client side certificate validation then you will need to supply --client-ca
also.
---cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key
should be the PEM encoded private key and --client-ca
should be the PEM encoded client certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
-Authentication
-By default this will serve files without needing a login.
-You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
-If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
-Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
-To create an htpasswd file:
-touch htpasswd
-htpasswd -B htpasswd user
-htpasswd -B htpasswd anotherUser
-The password file can be updated while rclone is running.
-Use --realm
to set the authentication realm.
-Use --salt
to change the password hashing salt from the default.
-rclone serve restic remote:path [flags]
-Options
- --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
- --allow-origin string Origin which cross-domain request (CORS) can be executed from
- --append-only Disallow deletion of repository data
- --baseurl string Prefix for URLs - leave blank for root
- --cache-objects Cache listed objects (default true)
- --cert string TLS PEM key (concatenation of certificate and CA certificate)
- --client-ca string Client certificate authority to verify clients with
- -h, --help help for restic
- --htpasswd string A htpasswd file - if not provided no authentication is done
- --key string TLS PEM Private key
- --max-header-bytes int Maximum size of request header (default 4096)
- --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
- --pass string Password for authentication
- --private-repos Users can only access their private repo
- --realm string Realm for authentication
- --salt string Password hashing salt (default "dlPL2MqE")
- --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
- --stdio Run an HTTP2 server on stdin/stdout
- --user string User name for authentication
-See the global flags page for global options not listed here.
-SEE ALSO
-
-rclone serve s3
-Serve remote:path over s3.
-Synopsis
-serve s3
implements a basic s3 server that serves a remote via s3. This can be viewed with an s3 client, or you can make an s3 type remote to read and write to it with rclone.
-serve s3
is considered Experimental so use with care.
-S3 server supports Signature Version 4 authentication. Just use --auth-key accessKey,secretKey
and set the Authorization
header correctly in the request. (See the AWS docs).
---auth-key
can be repeated for multiple auth pairs. If --auth-key
is not provided then serve s3
will allow anonymous access.
-Please note that some clients may require HTTPS endpoints. See the SSL docs for more information.
-This command uses the VFS directory cache. All the functionality will work with --vfs-cache-mode off
. Using --vfs-cache-mode full
(or writes
) can be used to cache objects locally to improve performance.
-Use --force-path-style=false
if you want to use the bucket name as a part of the hostname (such as mybucket.local)
-Use --etag-hash
if you want to change the hash uses for the ETag
. Note that using anything other than MD5
(the default) is likely to cause problems for S3 clients which rely on the Etag being the MD5.
-Quickstart
-For a simple set up, to serve remote:path
over s3, run the server like this:
-rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
-This will be compatible with an rclone remote which is defined like this:
-[serves3]
-type = s3
-provider = Rclone
-endpoint = http://127.0.0.1:8080/
-access_key_id = ACCESS_KEY_ID
-secret_access_key = SECRET_ACCESS_KEY
-use_multipart_uploads = false
-Note that setting disable_multipart_uploads = true
is to work around a bug which will be fixed in due course.
-Bugs
-When uploading multipart files serve s3
holds all the parts in memory (see #7453). This is a limitaton of the library rclone uses for serving S3 and will hopefully be fixed at some point.
-Multipart server side copies do not work (see #7454). These take a very long time and eventually fail. The default threshold for multipart server side copies is 5G which is the maximum it can be, so files above this side will fail to be server side copied.
-For a current list of serve s3
bugs see the serve s3 bug category on GitHub.
-Limitations
-serve s3
will treat all directories in the root as buckets and ignore all files in the root. You can use CreateBucket
to create folders under the root, but you can't create empty folders under other folders not in the root.
-When using PutObject
or DeleteObject
, rclone will automatically create or clean up empty folders. If you don't want to clean up empty folders automatically, use --no-cleanup
.
-When using ListObjects
, rclone will use /
when the delimiter is empty. This reduces backend requests with no effect on most operations, but if the delimiter is something other than /
and empty, rclone will do a full recursive search of the backend, which can take some time.
-Versioning is not currently supported.
-Metadata will only be saved in memory other than the rclone mtime
metadata which will be set as the modification time of the file.
-Supported operations
-serve s3
currently supports the following operations.
-
-- Bucket
-
-ListBuckets
-CreateBucket
-DeleteBucket
-
-- Object
-
-HeadObject
-ListObjects
-GetObject
-PutObject
-DeleteObject
-DeleteObjects
-CreateMultipartUpload
-CompleteMultipartUpload
-AbortMultipartUpload
-CopyObject
-UploadPart
-
-
-Other operations will return error Unimplemented
.
-Server options
-Use --addr
to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000
or --addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
-If you set --addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
-You can use a unix socket by setting the url to unix:///path/to/socket
or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.
---addr
may be repeated to listen on multiple IPs/ports/sockets.
---server-read-timeout
and --server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
---max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
---baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl
, so --baseurl "rclone"
, --baseurl "/rclone"
and --baseurl "/rclone/"
are all treated identically.
-TLS (SSL)
-By default this will serve over http. If you want you can serve over https. You will need to supply the --cert
and --key
flags. If you wish to do client side certificate validation then you will need to supply --client-ca
also.
---cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key
should be the PEM encoded private key and --client-ca
should be the PEM encoded client certificate authority certificate.
---min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). ## VFS - Virtual File System
+Create an NFS server that serves the given remote over the network.
+The primary purpose for this command is to enable mount command on recent macOS versions where installing FUSE is very cumbersome.
+Since this is running on NFSv3, no authentication method is available. Any client will be able to access the data. To limit access, you can use serve NFS on loopback address and rely on secure tunnels (such as SSH). For this reason, by default, a random TCP port is chosen and loopback interface is used for the listening address; meaning that it is only available to the local machine. If you want other machines to access the NFS mount over local network, you need to specify the listening address and port using --addr
flag.
+Modifying files through NFS protocol requires VFS caching. Usually you will need to specify --vfs-cache-mode
in order to be able to write to the mountpoint (full is recommended). If you don't specify VFS cache mode, the mount will be read-only. Note also that --nfs-cache-handle-limit
controls the maximum number of cached file handles stored by the caching handler. This should not be set too low or you may experience errors when trying to access files. The default is 1000000
, but consider lowering this limit if the server's system resource usage causes problems.
+To serve NFS over the network use following command:
+rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full
+We specify a specific port that we can use in the mount command:
+To mount the server under Linux/macOS, use the following command:
+mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint
+Where $PORT
is the same port number we used in the serve nfs command.
+This feature is only available on Unix platforms.
+VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
@@ -5072,40 +5167,31 @@ use_multipart_uploads = false
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-rclone serve s3 remote:path [flags]
-Options
- --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
- --allow-origin string Origin which cross-domain request (CORS) can be executed from
- --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key
- --baseurl string Prefix for URLs - leave blank for root
- --cert string TLS PEM key (concatenation of certificate and CA certificate)
- --client-ca string Client certificate authority to verify clients with
+rclone serve nfs remote:path [flags]
+Options
+ --addr string IPaddress:Port or :Port to bind server to
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
- --etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5")
--file-perms FileMode File permissions (default 0666)
- --force-path-style If true use path style access if false use virtual hosted style (default true) (default true)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for s3
- --key string TLS PEM Private key
- --max-header-bytes int Maximum size of request header (default 4096)
- --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ -h, --help help for nfs
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
--no-checksum Don't compare checksums on up/download
- --no-cleanup Not to cleanup empty folder after object is deleted
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
--read-only Only allow read-only access
- --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
- --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -5118,7 +5204,7 @@ use_multipart_uploads = false
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -5147,28 +5233,174 @@ use_multipart_uploads = false
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
See the global flags page for global options not listed here.
+SEE ALSO
+
+rclone serve restic
+Serve the remote for restic's REST API.
+Synopsis
+Run a basic web server to serve a remote over restic's REST backend API over HTTP. This allows restic to use rclone as a data storage mechanism for cloud providers that restic does not support directly.
+Restic is a command-line program for doing backups.
+The server will log errors. Use -v to see access logs.
+--bwlimit
will be respected for file transfers. Use --stats
to control the stats printing.
+Setting up rclone for use by restic
+First set up a remote for your chosen cloud provider.
+Once you have set up the remote, check it is working with, for example "rclone lsd remote:". You may have called the remote something other than "remote:" - just substitute whatever you called it in the following instructions.
+Now start the rclone restic server
+rclone serve restic -v remote:backup
+Where you can replace "backup" in the above by whatever path in the remote you wish to use.
+By default this will serve on "localhost:8080" you can change this with use of the --addr
flag.
+You might wish to start this server on boot.
+Adding --cache-objects=false
will cause rclone to stop caching objects returned from the List call. Caching is normally desirable as it speeds up downloading objects, saves transactions and uses very little memory.
+Setting up restic to use rclone
+Now you can follow the restic instructions on setting up restic.
+Note that you will need restic 0.8.2 or later to interoperate with rclone.
+For the example above you will want to use "http://localhost:8080/" as the URL for the REST server.
+For example:
+$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
+$ export RESTIC_PASSWORD=yourpassword
+$ restic init
+created restic backend 8b1a4b56ae at rest:http://localhost:8080/
+
+Please note that knowledge of your password is required to access
+the repository. Losing your password means that your data is
+irrecoverably lost.
+$ restic backup /path/to/files/to/backup
+scan [/path/to/files/to/backup]
+scanned 189 directories, 312 files in 0:00
+[0:00] 100.00% 38.128 MiB / 38.128 MiB 501 / 501 items 0 errors ETA 0:00
+duration: 0:00
+snapshot 45c8fdd8 saved
+Multiple repositories
+Note that you can use the endpoint to host multiple repositories. Do this by adding a directory name or path after the URL. Note that these must end with /. Eg
+$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
+# backup user1 stuff
+$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
+# backup user2 stuff
+Private repositories
+The--private-repos
flag can be used to limit users to repositories starting with a path of /<username>/
.
+Server options
+Use --addr
to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000
or --addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
+If you set --addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
+You can use a unix socket by setting the url to unix:///path/to/socket
or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.
+--addr
may be repeated to listen on multiple IPs/ports/sockets.
+--server-read-timeout
and --server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
+--max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
+--baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl
, so --baseurl "rclone"
, --baseurl "/rclone"
and --baseurl "/rclone/"
are all treated identically.
+TLS (SSL)
+By default this will serve over http. If you want you can serve over https. You will need to supply the --cert
and --key
flags. If you wish to do client side certificate validation then you will need to supply --client-ca
also.
+--cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key
should be the PEM encoded private key and --client-ca
should be the PEM encoded client certificate authority certificate.
+--min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0").
+Authentication
+By default this will serve files without needing a login.
+You can either use an htpasswd file which can take lots of users, or set a single username and password with the --user
and --pass
flags.
+If no static users are configured by either of the above methods, and client certificates are required by the --client-ca
flag passed to the server, the client certificate common name will be considered as the username.
+Use --htpasswd /path/to/htpasswd
to provide an htpasswd file. This is in standard apache format and supports MD5, SHA1 and BCrypt for basic authentication. Bcrypt is recommended.
+To create an htpasswd file:
+touch htpasswd
+htpasswd -B htpasswd user
+htpasswd -B htpasswd anotherUser
+The password file can be updated while rclone is running.
+Use --realm
to set the authentication realm.
+Use --salt
to change the password hashing salt from the default.
+rclone serve restic remote:path [flags]
+Options
+ --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
+ --allow-origin string Origin which cross-domain request (CORS) can be executed from
+ --append-only Disallow deletion of repository data
+ --baseurl string Prefix for URLs - leave blank for root
+ --cache-objects Cache listed objects (default true)
+ --cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --client-ca string Client certificate authority to verify clients with
+ -h, --help help for restic
+ --htpasswd string A htpasswd file - if not provided no authentication is done
+ --key string TLS PEM Private key
+ --max-header-bytes int Maximum size of request header (default 4096)
+ --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
+ --pass string Password for authentication
+ --private-repos Users can only access their private repo
+ --realm string Realm for authentication
+ --salt string Password hashing salt (default "dlPL2MqE")
+ --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
+ --stdio Run an HTTP2 server on stdin/stdout
+ --user string User name for authentication
+See the global flags page for global options not listed here.
SEE ALSO
-rclone serve sftp
-Serve the remote over SFTP.
+rclone serve s3
+Serve remote:path over s3.
Synopsis
-Run an SFTP server to serve a remote over SFTP. This can be used with an SFTP client or you can make a remote of type sftp to use with it.
-You can use the filter flags (e.g. --include
, --exclude
) to control what is served.
-The server will respond to a small number of shell commands, mainly md5sum, sha1sum and df, which enable it to provide support for checksums and the about feature when accessed from an sftp remote.
-Note that this server uses standard 32 KiB packet payload size, which means you must not configure the client to expect anything else, e.g. with the chunk_size option on an sftp remote.
-The server will log errors. Use -v
to see access logs.
---bwlimit
will be respected for file transfers. Use --stats
to control the stats printing.
-You must provide some means of authentication, either with --user
/--pass
, an authorized keys file (specify location with --authorized-keys
- the default is the same as ssh), an --auth-proxy
, or set the --no-auth
flag for no authentication when logging in.
-If you don't supply a host --key
then rclone will generate rsa, ecdsa and ed25519 variants, and cache them for later use in rclone's cache directory (see rclone help flags cache-dir
) in the "serve-sftp" directory.
-By default the server binds to localhost:2022 - if you want it to be reachable externally then supply --addr :2022
for example.
-Note that the default of --vfs-cache-mode off
is fine for the rclone sftp backend, but it may not be with other SFTP clients.
-If --stdio
is specified, rclone will serve SFTP over stdio, which can be used with sshd via ~/.ssh/authorized_keys, for example:
-restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
-On the client you need to set --transfers 1
when using --stdio
. Otherwise multiple instances of the rclone server are started by OpenSSH which can lead to "corrupted on transfer" errors. This is the case because the client chooses indiscriminately which server to send commands to while the servers all have different views of the state of the filing system.
-The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being used. Omitting "restrict" and using --sftp-path-override
to enable checksumming is possible but less secure and you could use the SFTP server provided by OpenSSH in this case.
-VFS - Virtual File System
+serve s3
implements a basic s3 server that serves a remote via s3. This can be viewed with an s3 client, or you can make an s3 type remote to read and write to it with rclone.
+serve s3
is considered Experimental so use with care.
+S3 server supports Signature Version 4 authentication. Just use --auth-key accessKey,secretKey
and set the Authorization
header correctly in the request. (See the AWS docs).
+--auth-key
can be repeated for multiple auth pairs. If --auth-key
is not provided then serve s3
will allow anonymous access.
+Please note that some clients may require HTTPS endpoints. See the SSL docs for more information.
+This command uses the VFS directory cache. All the functionality will work with --vfs-cache-mode off
. Using --vfs-cache-mode full
(or writes
) can be used to cache objects locally to improve performance.
+Use --force-path-style=false
if you want to use the bucket name as a part of the hostname (such as mybucket.local)
+Use --etag-hash
if you want to change the hash uses for the ETag
. Note that using anything other than MD5
(the default) is likely to cause problems for S3 clients which rely on the Etag being the MD5.
+Quickstart
+For a simple set up, to serve remote:path
over s3, run the server like this:
+rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
+This will be compatible with an rclone remote which is defined like this:
+[serves3]
+type = s3
+provider = Rclone
+endpoint = http://127.0.0.1:8080/
+access_key_id = ACCESS_KEY_ID
+secret_access_key = SECRET_ACCESS_KEY
+use_multipart_uploads = false
+Note that setting disable_multipart_uploads = true
is to work around a bug which will be fixed in due course.
+Bugs
+When uploading multipart files serve s3
holds all the parts in memory (see #7453). This is a limitaton of the library rclone uses for serving S3 and will hopefully be fixed at some point.
+Multipart server side copies do not work (see #7454). These take a very long time and eventually fail. The default threshold for multipart server side copies is 5G which is the maximum it can be, so files above this side will fail to be server side copied.
+For a current list of serve s3
bugs see the serve s3 bug category on GitHub.
+Limitations
+serve s3
will treat all directories in the root as buckets and ignore all files in the root. You can use CreateBucket
to create folders under the root, but you can't create empty folders under other folders not in the root.
+When using PutObject
or DeleteObject
, rclone will automatically create or clean up empty folders. If you don't want to clean up empty folders automatically, use --no-cleanup
.
+When using ListObjects
, rclone will use /
when the delimiter is empty. This reduces backend requests with no effect on most operations, but if the delimiter is something other than /
and empty, rclone will do a full recursive search of the backend, which can take some time.
+Versioning is not currently supported.
+Metadata will only be saved in memory other than the rclone mtime
metadata which will be set as the modification time of the file.
+Supported operations
+serve s3
currently supports the following operations.
+
+- Bucket
+
+ListBuckets
+CreateBucket
+DeleteBucket
+
+- Object
+
+HeadObject
+ListObjects
+GetObject
+PutObject
+DeleteObject
+DeleteObjects
+CreateMultipartUpload
+CompleteMultipartUpload
+AbortMultipartUpload
+CopyObject
+UploadPart
+
+
+Other operations will return error Unimplemented
.
+Server options
+Use --addr
to specify which IP address and port the server should listen on, eg --addr 1.2.3.4:8000
or --addr :8080
to listen to all IPs. By default it only listens on localhost. You can use port :0 to let the OS choose an available port.
+If you set --addr
to listen on a public or LAN accessible IP address then using Authentication is advised - see the next section for info.
+You can use a unix socket by setting the url to unix:///path/to/socket
or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions.
+--addr
may be repeated to listen on multiple IPs/ports/sockets.
+--server-read-timeout
and --server-write-timeout
can be used to control the timeouts on the server. Note that this is the total time for a transfer.
+--max-header-bytes
controls the maximum number of bytes the server will accept in the HTTP header.
+--baseurl
controls the URL prefix that rclone serves from. By default rclone will serve from the root. If you used --baseurl "/rclone"
then rclone would serve from a URL starting with "/rclone/". This is useful if you wish to proxy rclone serve. Rclone automatically inserts leading and trailing "/" on --baseurl
, so --baseurl "rclone"
, --baseurl "/rclone"
and --baseurl "/rclone/"
are all treated identically.
+TLS (SSL)
+By default this will serve over http. If you want you can serve over https. You will need to supply the --cert
and --key
flags. If you wish to do client side certificate validation then you will need to supply --client-ca
also.
+--cert
should be a either a PEM encoded certificate or a concatenation of that with the CA certificate. --key
should be the PEM encoded private key and --client-ca
should be the PEM encoded client certificate authority certificate.
+--min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). ## VFS - Virtual File System
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
@@ -5279,64 +5511,43 @@ use_multipart_uploads = false
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
-Auth Proxy
-If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
-PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
-There is an example program bin/test_proxy.py in the rclone source code.
-The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
-This config generated must have this extra parameter - _root
- root to use for the backend
-And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
-If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "pass": "mypassword"
-}
-If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
-{
- "user": "me",
- "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
-}
-And as an example return this on STDOUT
-{
- "type": "sftp",
- "_root": "",
- "_obscure": "pass",
- "user": "me",
- "pass": "mypassword",
- "host": "sftp.example.com"
-}
-This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
-The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
-Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
-This can be used to build general purpose proxies to any kind of backend that rclone supports.
-rclone serve sftp remote:path [flags]
+rclone serve s3 remote:path [flags]
Options
- --addr string IPaddress:Port or :Port to bind server to (default "localhost:2022")
- --auth-proxy string A program to use to create the backend from the auth
- --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys")
+ --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
+ --allow-origin string Origin which cross-domain request (CORS) can be executed from
+ --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key
+ --baseurl string Prefix for URLs - leave blank for root
+ --cert string TLS PEM key (concatenation of certificate and CA certificate)
+ --client-ca string Client certificate authority to verify clients with
--dir-cache-time Duration Time to cache directory entries for (default 5m0s)
--dir-perms FileMode Directory permissions (default 0777)
+ --etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5")
--file-perms FileMode File permissions (default 0666)
+ --force-path-style If true use path style access if false use virtual hosted style (default true) (default true)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
- -h, --help help for sftp
- --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate)
- --no-auth Allow connections with no authentication if set
+ -h, --help help for s3
+ --key string TLS PEM Private key
+ --max-header-bytes int Maximum size of request header (default 4096)
+ --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0")
--no-checksum Don't compare checksums on up/download
+ --no-cleanup Not to cleanup empty folder after object is deleted
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
- --pass string Password for authentication
--poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
--read-only Only allow read-only access
- --stdio Run an sftp server on stdin/stdout
+ --server-read-timeout Duration Timeout for server reading data (default 1h0m0s)
+ --server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
- --user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -5349,7 +5560,7 @@ use_multipart_uploads = false
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -5382,9 +5593,243 @@ use_multipart_uploads = false
+rclone serve sftp
+Serve the remote over SFTP.
+Synopsis
+Run an SFTP server to serve a remote over SFTP. This can be used with an SFTP client or you can make a remote of type sftp to use with it.
+You can use the filter flags (e.g. --include
, --exclude
) to control what is served.
+The server will respond to a small number of shell commands, mainly md5sum, sha1sum and df, which enable it to provide support for checksums and the about feature when accessed from an sftp remote.
+Note that this server uses standard 32 KiB packet payload size, which means you must not configure the client to expect anything else, e.g. with the chunk_size option on an sftp remote.
+The server will log errors. Use -v
to see access logs.
+--bwlimit
will be respected for file transfers. Use --stats
to control the stats printing.
+You must provide some means of authentication, either with --user
/--pass
, an authorized keys file (specify location with --authorized-keys
- the default is the same as ssh), an --auth-proxy
, or set the --no-auth
flag for no authentication when logging in.
+If you don't supply a host --key
then rclone will generate rsa, ecdsa and ed25519 variants, and cache them for later use in rclone's cache directory (see rclone help flags cache-dir
) in the "serve-sftp" directory.
+By default the server binds to localhost:2022 - if you want it to be reachable externally then supply --addr :2022
for example.
+Note that the default of --vfs-cache-mode off
is fine for the rclone sftp backend, but it may not be with other SFTP clients.
+If --stdio
is specified, rclone will serve SFTP over stdio, which can be used with sshd via ~/.ssh/authorized_keys, for example:
+restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
+On the client you need to set --transfers 1
when using --stdio
. Otherwise multiple instances of the rclone server are started by OpenSSH which can lead to "corrupted on transfer" errors. This is the case because the client chooses indiscriminately which server to send commands to while the servers all have different views of the state of the filing system.
+The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from being used. Omitting "restrict" and using --sftp-path-override
to enable checksumming is possible but less secure and you could use the SFTP server provided by OpenSSH in this case.
+VFS - Virtual File System
+This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
+Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
+The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
+VFS Directory Cache
+Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
+--dir-cache-time duration Time to cache directory entries for (default 5m0s)
+--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
+However, changes made directly on the cloud storage by the web interface or a different copy of rclone will only be picked up once the directory cache expires if the backend configured does not support polling for changes. If the backend supports polling, changes will be picked up within the polling interval.
+You can send a SIGHUP
signal to rclone for it to flush all directory caches, regardless of how old they are. Assuming only one rclone instance is running, you can reset the cache like this:
+kill -SIGHUP $(pidof rclone)
+If you configure rclone with a remote control then you can use rclone rc to flush the whole directory cache:
+rclone rc vfs/forget
+Or individual files or directories:
+rclone rc vfs/forget file=path/to/file dir=path/to/dir
+VFS File Buffering
+The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
+Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
+This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
+The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
+VFS File Caching
+These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
+For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
+Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
+--cache-dir string Directory rclone will use for caching.
+--vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+--vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
+--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+--vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
+--vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
+If run with -vv
rclone will print the location of the file cache. The files are stored in the user cache file area which is OS dependent but can be controlled with --cache-dir
or setting the appropriate environment variable.
+The cache has 4 different modes selected by --vfs-cache-mode
. The higher the cache mode the more compatible rclone becomes at the cost of using disk space.
+Note that files are written back to the remote only when they are closed and if they haven't been accessed for --vfs-write-back
seconds. If rclone is quit or dies with files that haven't been uploaded, these will be uploaded next time rclone is run with the same flags.
+If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
+The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
+You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
+--vfs-cache-mode off
+In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
+This will mean some operations are not possible
+
+- Files can't be opened for both read AND write
+- Files opened for write can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files open for read with O_TRUNC will be opened write only
+- Files open for write only will behave as if O_TRUNC was supplied
+- Open modes O_APPEND, O_TRUNC are ignored
+- If an upload fails it can't be retried
+
+--vfs-cache-mode minimal
+This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
+These operations are not possible
+
+- Files opened for write only can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files opened for write only will ignore O_APPEND, O_TRUNC
+- If an upload fails it can't be retried
+
+--vfs-cache-mode writes
+In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
+This mode should support all normal file system operations.
+If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
+--vfs-cache-mode full
+In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
+In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
+So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
+This mode should support all normal file system operations and is otherwise identical to --vfs-cache-mode
writes.
+When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
+When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
+IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
+Fingerprinting
+Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
+
+- size
+- modification time
+- hash
+
+where available on an object.
+On some backends some of these attributes are slow to read (they take an extra API call per object, or extra work per object).
+For example hash
is slow with the local
and sftp
backends as they have to read the entire file and hash it, and modtime
is slow with the s3
, swift
, ftp
and qinqstor
backends because they need to do an extra API call to fetch it.
+If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
+If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
+Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
+VFS Chunked Reading
+When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
+These flags control the chunking:
+--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
+--vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
+Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
+With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
+Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
+
+These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
+In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
+--no-checksum Don't compare checksums on up/download.
+--no-modtime Don't read/write the modification time (can speed things up).
+--no-seek Don't allow seeking in files.
+--read-only Only allow read-only access.
+Sometimes rclone is delivered reads or writes out of order. Rather than seeking rclone will wait a short time for the in sequence read or write to come in. These flags only come into effect when not using an on disk cache file.
+--vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
+--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
+When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
+--transfers int Number of file transfers to run in parallel (default 4)
+VFS Case Sensitivity
+Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
+File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
+Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
+The --vfs-case-insensitive
VFS flag controls how rclone handles these two cases. If its value is "false", rclone passes file names to the remote as-is. If the flag is "true" (or appears without a value on the command line), rclone may perform a "fixup" as explained below.
+The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
+Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
+If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
+VFS Disk Options
+This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
+--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
+Alternate report of used bytes
+Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
+WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
+Auth Proxy
+If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
+PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
+There is an example program bin/test_proxy.py in the rclone source code.
+The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
+This config generated must have this extra parameter - _root
- root to use for the backend
+And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
+If password authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "pass": "mypassword"
+}
+If public-key authentication was used by the client, input to the proxy process (on STDIN) would look similar to this:
+{
+ "user": "me",
+ "public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
+}
+And as an example return this on STDOUT
+{
+ "type": "sftp",
+ "_root": "",
+ "_obscure": "pass",
+ "user": "me",
+ "pass": "mypassword",
+ "host": "sftp.example.com"
+}
+This would mean that an SFTP backend would be created on the fly for the user
and pass
/public_key
returned in the output to the host given. Note that since _obscure
is set to pass
, rclone will obscure the pass
parameter before creating the backend (which is required for sftp backends).
+The program can manipulate the supplied user
in any way, for example to make proxy to many different sftp backends, you could make the user
be user@example.com
and then set the host
to example.com
in the output and the user to user
. For security you'd probably want to restrict the host
to a limited list.
+Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
+This can be used to build general purpose proxies to any kind of backend that rclone supports.
+rclone serve sftp remote:path [flags]
+Options
+ --addr string IPaddress:Port or :Port to bind server to (default "localhost:2022")
+ --auth-proxy string A program to use to create the backend from the auth
+ --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys")
+ --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
+ --dir-perms FileMode Directory permissions (default 0777)
+ --file-perms FileMode File permissions (default 0666)
+ --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
+ -h, --help help for sftp
+ --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate)
+ --no-auth Allow connections with no authentication if set
+ --no-checksum Don't compare checksums on up/download
+ --no-modtime Don't read/write the modification time (can speed things up)
+ --no-seek Don't allow seeking in files
+ --pass string Password for authentication
+ --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
+ --read-only Only allow read-only access
+ --stdio Run an sftp server on stdin/stdout
+ --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
+ --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-case-insensitive If a file name not found, find a case insensitive match
+ --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
+ --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
+ --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
+ --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
+ --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
+ --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
+ --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+Filter Options
+Flags for filtering directory listings.
+ --delete-excluded Delete files on dest excluded from sync
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --ignore-case Ignore case in filters (case insensitive)
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+See the global flags page for global options not listed here.
+SEE ALSO
+
rclone serve webdav
Serve remote:path over WebDAV.
-Synopsis
+Synopsis
Run a basic WebDAV server to serve a remote over HTTP via the WebDAV protocol. This can be viewed with a WebDAV client, through a web browser, or you can make a remote of type WebDAV to read and write it.
WebDAV options
--etag-hash
@@ -5533,7 +5978,7 @@ htpasswd -B htpasswd anotherUser
This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing system.
Cloud storage objects have lots of properties which aren't like disk files - you can't extend them or write to the middle of them, so the VFS layer has to deal with that. Because there is no one right way of doing this there are various options explained below.
The VFS layer also implements a directory cache - this caches info about files and directories (but not the data) in memory.
-VFS Directory Cache
+VFS Directory Cache
Using the --dir-cache-time
flag, you can control how long a directory should be considered up to date and not refreshed from the backend. Changes made through the VFS will appear immediately or invalidate the cache.
--dir-cache-time duration Time to cache directory entries for (default 5m0s)
--poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
@@ -5544,12 +5989,12 @@ htpasswd -B htpasswd anotherUser
rclone rc vfs/forget
Or individual files or directories:
rclone rc vfs/forget file=path/to/file dir=path/to/dir
-VFS File Buffering
+VFS File Buffering
The --buffer-size
flag determines the amount of memory, that will be used to buffer data in advance.
Each open file will try to keep the specified amount of data in memory at all times. The buffered data is bound to one open file and won't be shared.
This flag is a upper limit for the used memory per open file. The buffer will only use memory for data that is downloaded but not not yet read. If the buffer is empty, only a small amount of memory will be used.
The maximum memory used by rclone for buffering can be up to --buffer-size * open files
.
-VFS File Caching
+VFS File Caching
These flags control the VFS file caching options. File caching is necessary to make the VFS layer appear compatible with a normal file system. It can be disabled at the cost of some compatibility.
For example you'll need to enable VFS caching if you want to read and write simultaneously to a file. See below for more details.
Note that the VFS cache is separate from the cache backend and you may find that you need one or the other or both.
@@ -5566,7 +6011,7 @@ htpasswd -B htpasswd anotherUser
If using --vfs-cache-max-size
or --vfs-cache-min-free-size
note that the cache may exceed these quotas for two reasons. Firstly because it is only checked every --vfs-cache-poll-interval
. Secondly because open files cannot be evicted from the cache. When --vfs-cache-max-size
or --vfs-cache-min-free-size
is exceeded, rclone will attempt to evict the least accessed files from the cache first. rclone will start with files that haven't been accessed for the longest. This cache flushing strategy is efficient and more relevant files are likely to remain cached.
The --vfs-cache-max-age
will evict files from the cache after the set time since last access has passed. The default value of 1 hour will start evicting files from cache that haven't been accessed for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0 and will wait for 1 more hour before evicting. Specify the time with standard notation, s, m, h, d, w .
You should not run two copies of rclone using the same VFS cache with the same or overlapping remotes if using --vfs-cache-mode > off
. This can potentially cause data corruption if you do. You can work around this by giving each rclone its own cache hierarchy with --cache-dir
. You don't need to worry about this if the remotes in use don't overlap.
---vfs-cache-mode off
+--vfs-cache-mode off
In this mode (the default) the cache will read directly from the remote and write directly to the remote without caching anything on disk.
This will mean some operations are not possible
@@ -5578,7 +6023,7 @@ htpasswd -B htpasswd anotherUser
- Open modes O_APPEND, O_TRUNC are ignored
- If an upload fails it can't be retried
---vfs-cache-mode minimal
+--vfs-cache-mode minimal
This is very similar to "off" except that files opened for read AND write will be buffered to disk. This means that files opened for write will be a lot more compatible, but uses the minimal disk space.
These operations are not possible
@@ -5587,11 +6032,11 @@ htpasswd -B htpasswd anotherUser
- Files opened for write only will ignore O_APPEND, O_TRUNC
- If an upload fails it can't be retried
---vfs-cache-mode writes
+--vfs-cache-mode writes
In this mode files opened for read only are still read directly from the remote, write only and read/write files are buffered to disk first.
This mode should support all normal file system operations.
If an upload fails it will be retried at exponentially increasing intervals up to 1 minute.
---vfs-cache-mode full
+--vfs-cache-mode full
In this mode all reads and writes are buffered to and from disk. When data is read from the remote this is buffered to disk as well.
In this mode the files in the cache will be sparse files and rclone will keep track of which bits of the files it has downloaded.
So if an application only reads the starts of each file, then rclone will only buffer the start of the file. These files will appear to be their full size in the cache, but they will be sparse files with only the data that has been downloaded present in them.
@@ -5599,7 +6044,7 @@ htpasswd -B htpasswd anotherUser
When reading a file rclone will read --buffer-size
plus --vfs-read-ahead
bytes ahead. The --buffer-size
is buffered in memory whereas the --vfs-read-ahead
is buffered on disk.
When using this mode it is recommended that --buffer-size
is not set too large and --vfs-read-ahead
is set large if required.
IMPORTANT not all file systems support sparse files. In particular FAT/exFAT do not. Rclone will perform very badly if the cache directory is on a filesystem which doesn't support sparse files and it will log an ERROR message if one is detected.
-Fingerprinting
+Fingerprinting
Various parts of the VFS use fingerprinting to see if a local file copy has changed relative to a remote file. Fingerprints are made from:
- size
@@ -5612,7 +6057,7 @@ htpasswd -B htpasswd anotherUser
If you use the --vfs-fast-fingerprint
flag then rclone will not include the slow operations in the fingerprint. This makes the fingerprinting less accurate but much faster and will improve the opening time of cached files.
If you are running a vfs cache over local
, s3
or swift
backends then using this flag is recommended.
Note that if you change the value of this flag, the fingerprints of the files in the cache may be invalidated and the files will need to be downloaded again.
-VFS Chunked Reading
+VFS Chunked Reading
When rclone reads files from a remote it reads them in chunks. This means that rather than requesting the whole file rclone reads the chunk specified. This can reduce the used download quota for some remotes by requesting only chunks from the remote that are actually read, at the cost of an increased number of requests.
These flags control the chunking:
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
@@ -5620,7 +6065,7 @@ htpasswd -B htpasswd anotherUser
Rclone will start reading a chunk of size --vfs-read-chunk-size
, and then double the size for each read. When --vfs-read-chunk-size-limit
is specified, and greater than --vfs-read-chunk-size
, the chunk size for each open file will get doubled only until the specified value is reached. If the value is "off", which is the default, the limit is disabled and the chunk size will grow indefinitely.
With --vfs-read-chunk-size 100M
and --vfs-read-chunk-size-limit 0
the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on. When --vfs-read-chunk-size-limit 500M
is specified, the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
Setting --vfs-read-chunk-size
to 0
or "off" disables chunked reading.
-
+
These flags may be used to enable/disable features of the VFS for performance or other reasons. See also the chunked reading feature.
In particular S3 and Swift benefit hugely from the --no-modtime
flag (or use --use-server-modtime
for a slightly different effect) as each read of the modification time takes a transaction.
--no-checksum Don't compare checksums on up/download.
@@ -5632,7 +6077,7 @@ htpasswd -B htpasswd anotherUser
--vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
When using VFS write caching (--vfs-cache-mode
with value writes or full), the global flag --transfers
can be set to adjust the number of parallel uploads of modified files from the cache (the related global flag --checkers
has no effect on the VFS).
--transfers int Number of file transfers to run in parallel (default 4)
-VFS Case Sensitivity
+VFS Case Sensitivity
Linux file systems are case-sensitive: two files can differ only by case, and the exact case must be used when opening a file.
File systems in modern Windows are case-insensitive but case-preserving: although existing files can be opened using any case, the exact case used to create the file is preserved and available for programs to query. It is not allowed for two files in the same directory to differ only by case.
Usually file systems on macOS are case-insensitive. It is possible to make macOS file systems case-sensitive but that is not the default.
@@ -5640,16 +6085,18 @@ htpasswd -B htpasswd anotherUser
The user may specify a file name to open/delete/rename/etc with a case different than what is stored on the remote. If an argument refers to an existing file with exactly the same name, then the case of the existing file on the disk will be used. However, if a file name with exactly the same name is not found but a name differing only by case exists, rclone will transparently fixup the name. This fixup happens only when an existing file is requested. Case sensitivity of file names created anew by rclone is controlled by the underlying remote.
Note that case sensitivity of the operating system running rclone (the target) may differ from case sensitivity of a file system presented by rclone (the source). The flag controls whether "fixup" is performed to satisfy the target.
If the flag is not provided on the command line, then its default value depends on the operating system where rclone runs: "true" on Windows and macOS, "false" otherwise. If the flag is provided without a value, then it is "true".
-VFS Disk Options
+The --no-unicode-normalization
flag controls whether a similar "fixup" is performed for filenames that differ but are canonically equivalent with respect to unicode. Unicode normalization can be particularly helpful for users of macOS, which prefers form NFD instead of the NFC used by most other platforms. It is therefore highly recommended to keep the default of false
on macOS, to avoid encoding compatibility issues.
+In the (probably unlikely) event that a directory has multiple duplicate filenames after applying case and unicode normalization, the --vfs-block-norm-dupes
flag allows hiding these duplicates. This comes with a performance tradeoff, as rclone will have to scan the entire directory for duplicates when listing a directory. For this reason, it is recommended to leave this disabled if not needed. However, macOS users may wish to consider using it, as otherwise, if a remote directory contains both NFC and NFD versions of the same filename, an odd situation will occur: both versions of the file will be visible in the mount, and both will appear to be editable, however, editing either version will actually result in only the NFD version getting edited under the hood. --vfs-block- norm-dupes
prevents this confusion by detecting this scenario, hiding the duplicates, and logging an error, similar to how this is handled in rclone sync
.
+VFS Disk Options
This flag allows you to manually set the statistics about the filing system. It can be useful when those statistics cannot be read correctly automatically.
--vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
-Alternate report of used bytes
+Alternate report of used bytes
Some backends, most notably S3, do not report the amount of bytes used. If you need this information to be available when running df
on the filesystem, then pass the flag --vfs-used-is-size
to rclone. With this flag set, instead of relying on the backend to report this information, rclone will scan the whole remote similar to rclone size
and compute the total used space itself.
WARNING. Contrary to rclone size
, this flag ignores filters so that the result is accurate. However, this is very inefficient and may cost lots of API calls resulting in extra charges. Use it as a last resort and only with caching.
Auth Proxy
If you supply the parameter --auth-proxy /path/to/program
then rclone will use that program to generate backends on the fly which then are used to authenticate incoming requests. This uses a simple JSON based protocol with input on STDIN and output on STDOUT.
PLEASE NOTE: --auth-proxy
and --authorized-keys
cannot be used together, if --auth-proxy
is set the authorized keys option will be ignored.
-There is an example program bin/test_proxy.py in the rclone source code.
+There is an example program bin/test_proxy.py in the rclone source code.
The program's job is to take a user
and pass
on the input and turn those into the config for a backend on STDOUT in JSON format. This config will have any default parameters for the backend added, but it won't use configuration from environment variables or command line options - it is the job of the proxy program to make a complete config.
This config generated must have this extra parameter - _root
- root to use for the backend
And it may have this parameter - _obscure
- comma separated strings for parameters to obscure
@@ -5677,7 +6124,7 @@ htpasswd -B htpasswd anotherUser
Note that an internal cache is keyed on user
so only use that for configuration, don't use pass
or public_key
. This also means that if a user's password or public-key is changed the cache will need to expire (which takes 5 mins) before it takes effect.
This can be used to build general purpose proxies to any kind of backend that rclone supports.
rclone serve webdav remote:path [flags]
-Options
+Options
--addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080])
--allow-origin string Origin which cross-domain request (CORS) can be executed from
--auth-proxy string A program to use to create the backend from the auth
@@ -5709,6 +6156,7 @@ htpasswd -B htpasswd anotherUser
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -5721,11 +6169,11 @@ htpasswd -B htpasswd anotherUser
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
-Filter Options
+Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern
@@ -5750,13 +6198,13 @@ htpasswd -B htpasswd anotherUser
--min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
--min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone settier
Changes storage class/tier of objects in remote.
-Synopsis
+Synopsis
rclone settier changes storage tier or class at remote if supported. Few cloud storage services provides different storage classes on objects, for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline etc.
Note that, certain tier changes make objects not available to access immediately. For example tiering to archive in azure blob storage makes objects in frozen state, user can restore by setting tier to Hot/Cool, similarly S3 to Glacier makes object inaccessible.true
You can use it to tier single object
@@ -5766,25 +6214,25 @@ htpasswd -B htpasswd anotherUser
Or just provide remote directory and all files in directory will be tiered
rclone settier tier remote:path/dir
rclone settier tier remote:path [flags]
-Options
+Options
-h, --help help for settier
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone test
Run a test command
-Synopsis
+Synopsis
Rclone test is used to run test commands.
Select which test command you want with the subcommand, eg
rclone test memory remote:
Each subcommand has its own options which you can see in their help.
NB Be careful running these commands, they may do strange things so reading their documentation first is recommended.
-Options
+Options
-h, --help help for test
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone test changenotify remote: [flags]
-Options
+Options
-h, --help help for changenotify
--poll-interval Duration Time to wait between polling for changes (default 10s)
See the global flags page for global options not listed here.
-SEE ALSO
-
-rclone test histogram
-Makes a histogram of file name characters.
-Synopsis
-This command outputs JSON which shows the histogram of characters used in filenames in the remote:path specified.
-The data doesn't contain any identifying information but is useful for the rclone developers when developing filename compression.
-rclone test histogram [remote:path] [flags]
-Options
- -h, --help help for histogram
-See the global flags page for global options not listed here.
SEE ALSO
+rclone test histogram
+Makes a histogram of file name characters.
+Synopsis
+This command outputs JSON which shows the histogram of characters used in filenames in the remote:path specified.
+The data doesn't contain any identifying information but is useful for the rclone developers when developing filename compression.
+rclone test histogram [remote:path] [flags]
+Options
+ -h, --help help for histogram
+See the global flags page for global options not listed here.
+SEE ALSO
+
rclone test info
Discovers file name or other limitations for paths.
-Synopsis
+Synopsis
rclone info discovers what filenames and upload methods are possible to write to the paths passed in and how long they can be. It can take some time. It will write test files into the remote:path passed in. It outputs a bit of go code for each one.
NB this can create undeletable files and other hazards - use with care
rclone test info [remote:path]+ [flags]
-Options
+Options
--all Run all tests
--check-base32768 Check can store all possible base32768 characters
--check-control Check control characters
@@ -5835,14 +6283,14 @@ htpasswd -B htpasswd anotherUser
--upload-wait Duration Wait after writing a file (default 0s)
--write-json string Write results to file
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone test makefile
Make files with random contents of the size given
rclone test makefile <size> [<file>]+ [flags]
-Options
+Options
--ascii Fill files with random ASCII printable bytes only
--chargen Fill files with a ASCII chargen pattern
-h, --help help for makefile
@@ -5851,14 +6299,14 @@ htpasswd -B htpasswd anotherUser
--sparse Make the files sparse (appear to be filled with ASCII 0x00)
--zero Fill files with ASCII 0x00
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone test makefiles
Make a random file hierarchy in a directory
rclone test makefiles <dir> [flags]
-Options
+Options
--ascii Fill files with random ASCII printable bytes only
--chargen Fill files with a ASCII chargen pattern
--files int Number of files to create (default 1000)
@@ -5874,23 +6322,23 @@ htpasswd -B htpasswd anotherUser
--sparse Make the files sparse (appear to be filled with ASCII 0x00)
--zero Fill files with ASCII 0x00
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone test memory
Load all the objects at remote:path into memory and report memory stats.
rclone test memory remote:path [flags]
-Options
+Options
-h, --help help for memory
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
rclone touch
Create new file or change file modification time.
-Synopsis
+Synopsis
Set the modification time on file(s) as specified by remote:path to have the current time.
If remote:path does not exist then a zero sized file will be created, unless --no-create
or --recursive
is provided.
If --recursive
is used then recursively sets the modification time on all existing files that is found under the path. Filters are supported, and you can test with the --dry-run
or the --interactive
/-i
flag.
@@ -5902,7 +6350,7 @@ htpasswd -B htpasswd anotherUser
Note that value of --timestamp
is in UTC. If you want local time then add the --localtime
flag.
rclone touch remote:path [flags]
-Options
+Options
-h, --help help for touch
--localtime Use localtime for timestamp, not UTC
-C, --no-create Do not create the file if it does not exist (implied with --recursive)
@@ -5913,7 +6361,7 @@ htpasswd -B htpasswd anotherUser
-n, --dry-run Do a trial run with no permanent changes
-i, --interactive Enable interactive mode
-v, --verbose count Print lots more stuff (repeat for more)
-Filter Options
+Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern
@@ -5942,13 +6390,13 @@ htpasswd -B htpasswd anotherUser
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
rclone tree
List the contents of the remote in a tree like fashion.
-Synopsis
+Synopsis
rclone tree lists the contents of a remote in a similar way to the unix tree command.
For example
$ rclone tree remote:path
@@ -5965,7 +6413,7 @@ htpasswd -B htpasswd anotherUser
The tree command has many options for controlling the listing which are compatible with the tree command, for example you can include file sizes with --size
. Note that not all of them have short options as they conflict with rclone's short options.
For a more interactive navigation of the remote see the ncdu command.
rclone tree remote:path [flags]
-Options
+Options
-a, --all All files are listed (list . files too)
-d, --dirs-only List directories only
--dirsfirst List directories before files (-U disables)
@@ -5985,7 +6433,7 @@ htpasswd -B htpasswd anotherUser
-r, --sort-reverse Reverse the order of the sort
-U, --unsorted Leave files unsorted
--version Sort files alphanumerically by version
-Filter Options
+Filter Options
Flags for filtering directory listings.
--delete-excluded Delete files on dest excluded from sync
--exclude stringArray Exclude files matching pattern
@@ -6014,7 +6462,7 @@ htpasswd -B htpasswd anotherUser
--default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
--fast-list Use recursive list if available; uses more memory but fewer transactions
See the global flags page for global options not listed here.
-SEE ALSO
+SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
@@ -6129,13 +6577,15 @@ rclone copy :sftp,host=example.com:path/to/dir /tmp/dir
rclone sync --interactive remote:current-backup remote:previous-backup
rclone sync --interactive /path/to/files remote:current-backup
-Metadata is data about a file which isn't the contents of the file. Normally rclone only preserves the modification time and the content (MIME) type where possible.
-Rclone supports preserving all the available metadata on files (not directories) when using the --metadata
or -M
flag.
+Metadata is data about a file (or directory) which isn't the contents of the file (or directory). Normally rclone only preserves the modification time and the content (MIME) type where possible.
+Rclone supports preserving all the available metadata on files and directories when using the --metadata
or -M
flag.
Exactly what metadata is supported and what that support means depends on the backend. Backends that support metadata have a metadata section in their docs and are listed in the features table (Eg local, s3)
+Some backends don't support metadata, some only support metadata on files and some support metadata on both files and directories.
Rclone only supports a one-time sync of metadata. This means that metadata will be synced from the source object to the destination object only when the source object has changed and needs to be re-uploaded. If the metadata subsequently changes on the source object without changing the object itself then it won't be synced to the destination object. This is in line with the way rclone syncs Content-Type
without the --metadata
flag.
Using --metadata
when syncing from local to local will preserve file attributes such as file mode, owner, extended attributes (not Windows).
Note that arbitrary metadata may be added to objects using the --metadata-set key=value
flag when the object is first uploaded. This flag can be repeated as many times as necessary.
The --metadata-mapper flag can be used to pass the name of a program in which can transform metadata when it is being copied from source to destination.
+Rclone supports --metadata-set
and --metadata-mapper
when doing sever side Move
and server side Copy
, but not when doing server side DirMove
(renaming a directory) as this would involve recursing into the directory. Note that you can disable DirMove
with --disable DirMove
and rclone will revert back to using Move
for each individual object where --metadata-set
and --metadata-mapper
are supported.
Metadata is divided into two type. System metadata and User metadata.
Metadata which the backend uses itself is called system metadata. For example on the local backend the system metadata uid
will store the user ID of the file when used on a unix based platform.
@@ -6246,7 +6696,7 @@ rclone sync --interactive /path/to/files remote:current-backup
The metadata keys mtime
and content-type
will take precedence if supplied in the metadata over reading the Content-Type
or modification time of the source object.
Hashes are not included in system metadata as there is a well defined way of reading those already.
-Options
+Options
Rclone has a number of options to control its behaviour.
Options that take parameters can have the values passed in two ways, --option=value
or --option value
. However boolean (true/false) options behave slightly differently to the other options in that --boolean
sets the option to true
and the absence of the flag sets it to false
. It is also possible to specify --boolean=false
or --boolean=true
. Note that --boolean false
is not valid - this is parsed as --boolean
and the false
is parsed as an extra command line argument for rclone.
Time or duration options
@@ -6441,6 +6891,9 @@ See the dedupe command for more information as to what these options mean.
By default, rclone will exit with return code 0 if there were no errors.
This option allows rclone to return exit code 9 if no files were transferred between the source and destination. This allows using rclone in scripts, and triggering follow-on actions if data was copied, or skipping if not.
NB: Enabling this option turns a usually non-fatal error into a potentially fatal one - please check and adjust your scripts accordingly!
+--fix-case
+Normally, a sync to a case insensitive dest (such as macOS / Windows) will not result in a matching filename if the source and dest filenames have casing differences but are otherwise identical. For example, syncing hello.txt
to HELLO.txt
will normally result in the dest filename remaining HELLO.txt
. If --fix-case
is set, then HELLO.txt
will be renamed to hello.txt
to match the source.
+NB: - directory names with incorrect casing will also be fixed - --fix-case
will be ignored if --immutable
is set - using --local-case-sensitive
instead is not advisable; it will cause HELLO.txt
to get deleted! - the old dest filename must not be excluded by filters. Be especially careful with --files-from
, which does not respect --ignore-case
! - on remotes that do not support server-side move, --fix-case
will require downloading the file and re-uploading it. To avoid this, do not use --fix-case
.
--fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5 minutes by default in the "fs cache". This means that if you do repeated actions on the same remote then rclone won't have to build it again from scratch, which makes it more efficient.
This flag sets the time that the remotes are cached for. If you set it to 0
(or negative) then rclone won't cache the remotes at all.
@@ -6592,62 +7045,62 @@ y/n/s/!/q> n
SrcFsType
is the name of the source backend.
DstFs
is the config string for the remote that the object is being copied to
DstFsType
is the name of the destination backend.
-Remote
is the path of the file relative to the root.
-Size
, MimeType
, ModTime
are attributes of the file.
+Remote
is the path of the object relative to the root.
+Size
, MimeType
, ModTime
are attributes of the object.
IsDir
is true
if this is a directory (not yet implemented).
-ID
is the source ID
of the file if known.
+ID
is the source ID
of the object if known.
Metadata
is the backend specific metadata as described in the backend docs.
-{
- "SrcFs": "gdrive:",
- "SrcFsType": "drive",
- "DstFs": "newdrive:user",
- "DstFsType": "onedrive",
- "Remote": "test.txt",
- "Size": 6,
- "MimeType": "text/plain; charset=utf-8",
- "ModTime": "2022-10-11T17:53:10.286745272+01:00",
- "IsDir": false,
- "ID": "xyz",
- "Metadata": {
- "btime": "2022-10-11T16:53:11Z",
- "content-type": "text/plain; charset=utf-8",
- "mtime": "2022-10-11T17:53:10.286745272+01:00",
- "owner": "user1@domain1.com",
- "permissions": "...",
- "description": "my nice file",
- "starred": "false"
- }
-}
+{
+ "SrcFs": "gdrive:",
+ "SrcFsType": "drive",
+ "DstFs": "newdrive:user",
+ "DstFsType": "onedrive",
+ "Remote": "test.txt",
+ "Size": 6,
+ "MimeType": "text/plain; charset=utf-8",
+ "ModTime": "2022-10-11T17:53:10.286745272+01:00",
+ "IsDir": false,
+ "ID": "xyz",
+ "Metadata": {
+ "btime": "2022-10-11T16:53:11Z",
+ "content-type": "text/plain; charset=utf-8",
+ "mtime": "2022-10-11T17:53:10.286745272+01:00",
+ "owner": "user1@domain1.com",
+ "permissions": "...",
+ "description": "my nice file",
+ "starred": "false"
+ }
+}
The program should then modify the input as desired and send it to STDOUT. The returned Metadata
field will be used in its entirety for the destination object. Any other fields will be ignored. Note in this example we translate user names and permissions and add something to the description:
-{
- "Metadata": {
- "btime": "2022-10-11T16:53:11Z",
- "content-type": "text/plain; charset=utf-8",
- "mtime": "2022-10-11T17:53:10.286745272+01:00",
- "owner": "user1@domain2.com",
- "permissions": "...",
- "description": "my nice file [migrated from domain1]",
- "starred": "false"
- }
-}
+{
+ "Metadata": {
+ "btime": "2022-10-11T16:53:11Z",
+ "content-type": "text/plain; charset=utf-8",
+ "mtime": "2022-10-11T17:53:10.286745272+01:00",
+ "owner": "user1@domain2.com",
+ "permissions": "...",
+ "description": "my nice file [migrated from domain1]",
+ "starred": "false"
+ }
+}
Metadata can be removed here too.
An example python program might look something like this to implement the above transformations.
-import sys, json
-
-i = json.load(sys.stdin)
-metadata = i["Metadata"]
-# Add tag to description
-if "description" in metadata:
- metadata["description"] += " [migrated from domain1]"
-else:
- metadata["description"] = "[migrated from domain1]"
-# Modify owner
-if "owner" in metadata:
- metadata["owner"] = metadata["owner"].replace("domain1.com", "domain2.com")
-o = { "Metadata": metadata }
-json.dump(o, sys.stdout, indent="\t")
-You can find this example (slightly expanded) in the rclone source code at bin/test_metadata_mapper.py.
+import sys, json
+
+i = json.load(sys.stdin)
+metadata = i["Metadata"]
+# Add tag to description
+if "description" in metadata:
+ metadata["description"] += " [migrated from domain1]"
+else:
+ metadata["description"] = "[migrated from domain1]"
+# Modify owner
+if "owner" in metadata:
+ metadata["owner"] = metadata["owner"].replace("domain1.com", "domain2.com")
+o = { "Metadata": metadata }
+json.dump(o, sys.stdout, indent="\t")
+You can find this example (slightly expanded) in the rclone source code at bin/test_metadata_mapper.py.
If you want to see the input to the metadata mapper and the output returned from it in the log you can use -vv --dump mapper
.
See the metadata section for more info.
@@ -6666,7 +7119,7 @@ y/n/s/!/q> n
In this case the value of this option is used (default 64Mi).
--multi-thread-cutoff=SIZE
When transferring files above SIZE to capable backends, rclone will use multiple threads to transfer the file (default 256M).
-Capable backends are marked in the overview as MultithreadUpload
. (They need to implement either the OpenWriterAt
or OpenChunkedWriter
internal interfaces). These include include, local
, s3
, azureblob
, b2
, oracleobjectstorage
and smb
at the time of writing.
+Capable backends are marked in the overview as MultithreadUpload
. (They need to implement either the OpenWriterAt
or OpenChunkWriter
internal interfaces). These include include, local
, s3
, azureblob
, b2
, oracleobjectstorage
and smb
at the time of writing.
On the local disk, rclone preallocates the file (using fallocate(FALLOC_FL_KEEP_SIZE)
on unix or NTSetInformationFile
on Windows both of which takes no time) then each thread writes directly into the file at the correct place. This means that rclone won't create fragmented or sparse files and there won't be any assembly time at the end of the transfer.
The number of threads used to transfer is controlled by --multi-thread-streams
.
Use -vv
if you wish to see info about the threads.
@@ -6703,6 +7156,8 @@ y/n/s/!/q> n
--no-update-modtime
When using this flag, rclone won't update modification times of remote files if they are incorrect as it would normally.
This can be used if the remote is being synced with another tool also (e.g. the Google Drive client).
+--no-update-dir-modtime
+When using this flag, rclone won't update modification times of remote directories if they are incorrect as it would normally.
--order-by string
The --order-by
flag controls the order in which files in the backlog are processed in rclone sync
, rclone copy
and rclone move
.
The order by string is constructed like this. The first part describes what aspect is being measured:
@@ -6726,7 +7181,7 @@ y/n/s/!/q> n
--order-by name
- send the files with alphabetically by path first
If the --order-by
flag is not supplied or it is supplied with an empty string then the default ordering will be used which is as scanned. With --checkers 1
this is mostly alphabetical, however with the default --checkers 8
it is somewhat random.
-Limitations
+Limitations
The --order-by
flag does not do a separate pass over the data. This means that it may transfer some files out of the order specified if
- there are no files in the backlog or the source has not been fully scanned yet
@@ -7057,7 +7512,7 @@ export RCLONE_CONFIG_PASS
Environment Variables
Rclone can be configured entirely using environment variables. These can be used to set defaults for options or config file entries.
-Options
+Options
Every option in rclone can have its default set by environment variable.
To find the name of the environment variable, first, take the long option name, strip the leading --
, change -
to _
, make upper case and prepend RCLONE_
.
For example, to always set --stats 5s
, set the environment variable RCLONE_STATS=5s
. If you set stats on the command line this will override the environment variable setting.
@@ -7138,12 +7593,12 @@ For more help and alternate methods see: https://rclone.org/remote_setup/
Execute the following on the machine with the web browser (same rclone
version recommended):
- rclone authorize "amazon cloud drive"
+ rclone authorize "dropbox"
Then paste the result below:
result>
Then on your main desktop machine
-rclone authorize "amazon cloud drive"
+rclone authorize "dropbox"
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
@@ -7525,7 +7980,7 @@ file2.avi
+ *.png
+ file2.avi
- *
-Files file1.jpg
, file3.png
and file2.avi
are listed whilst secret17.jpg
and files without the suffix .jpgor
.png` are excluded.
+Files file1.jpg
, file3.png
and file2.avi
are listed whilst secret17.jpg
and files without the suffix .jpg
or .png
are excluded.
E.g. for an alternative filter-file.txt
:
+ *.jpg
+ *.gif
@@ -8034,6 +8489,21 @@ rclone rc cache/expire remote=/ withData=true
See the config password command for more information on the above.
Authentication is required for this call.
+config/paths: Reads the config file path and other important paths.
+Returns a JSON object with the following keys:
+
+- config: path to config file
+- cache: path to root of cache directory
+- temp: path to root of temporary directory
+
+Eg
+{
+ "cache": "/home/USER/.cache/rclone",
+ "config": "/home/USER/.rclone.conf",
+ "temp": "/tmp"
+}
+See the config paths command for more information on the above.
+Authentication is required for this call.
config/providers: Shows how providers are configured in the config file.
Returns a JSON object: - providers - array of objects
See the config providers command for more information on the above.
@@ -8602,6 +9072,37 @@ rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"Cache
}
This command does not have a command line equivalent so use this instead:
rclone rc --loopback operations/fsinfo fs=remote:
+operations/hashsum: Produces a hashsum file for all the objects in the path.
+Produces a hash file for all the objects in the path using the hash named. The output is in the same format as the standard md5sum/sha1sum tool.
+This takes the following parameters:
+
+- fs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
+
+- this can point to a file and just that file will be returned in the listing.
+
+- hashType - type of hash to be used
+- download - check by downloading rather than with hash (boolean)
+- base64 - output the hashes in base64 rather than hex (boolean)
+
+If you supply the download flag, it will download the data from the remote and create the hash on the fly. This can be useful for remotes that don't support the given hash or if you really want to check all the data.
+Note that if you wish to supply a checkfile to check hashes against the current files then you should use operations/check instead of operations/hashsum.
+Returns:
+
+- hashsum - array of strings of the hashes
+- hashType - type of hash used
+
+Example:
+$ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
+{
+ "hashType": "md5",
+ "hashsum": [
+ "WTSVLpuiXyJO_kGzJerRLg== backend-versions.sh",
+ "v1b_OlWCJO9LtNq3EIKkNQ== bisect-go-rclone.sh",
+ "VHbmHzHh4taXzgag8BAIKQ== bisect-rclone.sh",
+ ]
+}
+See the hashsum command for more information on the above.
+Authentication is required for this call.
operations/list: List the given remote and path in JSON format
This takes the following parameters:
@@ -8855,7 +9356,9 @@ rclone rc options/set --json '{"main": {"LogLevel": 8}}&
- filtersFile - read filtering patterns from a file
- ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync. Use at your own risk!
-- workdir - server directory for history files (default: /home/ncw/.cache/rclone/bisync)
+- workdir - server directory for history files (default:
~/.cache/rclone/bisync
)
+- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
- noCleanup - retain working files
See bisync command help and full bisync description for more information.
@@ -9100,15 +9603,6 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
-Amazon Drive |
-MD5 |
-- |
-Yes |
-No |
-R |
-- |
-
-
Amazon S3 (or S3 compatible) |
MD5 |
R/W |
@@ -9117,7 +9611,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
RWU |
-
+
Backblaze B2 |
SHA1 |
R/W |
@@ -9126,7 +9620,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Box |
SHA1 |
R/W |
@@ -9135,7 +9629,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Citrix ShareFile |
MD5 |
R/W |
@@ -9144,7 +9638,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Dropbox |
DBHASH ¹ |
R |
@@ -9153,7 +9647,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Enterprise File Fabric |
- |
R/W |
@@ -9162,7 +9656,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
FTP |
- |
R/W ¹⁰ |
@@ -9171,7 +9665,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Google Cloud Storage |
MD5 |
R/W |
@@ -9180,16 +9674,16 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Google Drive |
MD5, SHA1, SHA256 |
-R/W |
+DR/W |
No |
Yes |
R/W |
-- |
+DRWU |
-
+
Google Photos |
- |
- |
@@ -9198,7 +9692,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
HDFS |
- |
R/W |
@@ -9207,7 +9701,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
HiDrive |
HiDrive ¹² |
R/W |
@@ -9216,7 +9710,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
HTTP |
- |
R |
@@ -9225,7 +9719,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
Internet Archive |
MD5, SHA1, CRC32 |
R/W ¹¹ |
@@ -9234,7 +9728,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
RWU |
-
+
Jottacloud |
MD5 |
R/W |
@@ -9243,7 +9737,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
RW |
-
+
Koofr |
MD5 |
- |
@@ -9252,7 +9746,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Linkbox |
- |
R |
@@ -9261,7 +9755,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Mail.ru Cloud |
Mailru ⁶ |
R/W |
@@ -9270,7 +9764,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Mega |
- |
- |
@@ -9279,7 +9773,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Memory |
MD5 |
R/W |
@@ -9288,7 +9782,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Microsoft Azure Blob Storage |
MD5 |
R/W |
@@ -9297,7 +9791,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Microsoft Azure Files Storage |
MD5 |
R/W |
@@ -9306,16 +9800,16 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Microsoft OneDrive |
QuickXorHash ⁵ |
-R/W |
+DR/W |
Yes |
No |
R |
-- |
+DRW |
-
+
OpenDrive |
MD5 |
R/W |
@@ -9324,7 +9818,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
OpenStack Swift |
MD5 |
R/W |
@@ -9333,7 +9827,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Oracle Object Storage |
MD5 |
R/W |
@@ -9342,7 +9836,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
pCloud |
MD5, SHA1 ⁷ |
R |
@@ -9351,7 +9845,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
W |
- |
-
+
PikPak |
MD5 |
R |
@@ -9360,7 +9854,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
premiumize.me |
- |
- |
@@ -9369,7 +9863,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
put.io |
CRC-32 |
R/W |
@@ -9378,7 +9872,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
Proton Drive |
SHA1 |
R/W |
@@ -9387,7 +9881,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
QingStor |
MD5 |
- ⁹ |
@@ -9396,7 +9890,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R/W |
- |
-
+
Quatrix by Maytech |
- |
R/W |
@@ -9405,7 +9899,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Seafile |
- |
- |
@@ -9414,16 +9908,16 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SFTP |
MD5, SHA1 ² |
-R/W |
+DR/W |
Depends |
No |
- |
- |
-
+
Sia |
- |
- |
@@ -9432,7 +9926,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SMB |
- |
R/W |
@@ -9441,7 +9935,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
SugarSync |
- |
- |
@@ -9450,7 +9944,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Storj |
- |
R |
@@ -9459,7 +9953,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Uptobox |
- |
- |
@@ -9468,7 +9962,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
WebDAV |
MD5, SHA1 ³ |
R ⁴ |
@@ -9477,7 +9971,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
Yandex Disk |
MD5 |
R/W |
@@ -9486,7 +9980,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
R |
- |
-
+
Zoho WorkDrive |
- |
- |
@@ -9495,14 +9989,14 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
- |
- |
-
+
The local filesystem |
All |
-R/W |
+DR/W |
Depends |
No |
- |
-RWU |
+DRWU |
@@ -9522,10 +10016,45 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
The cloud storage system supports various hash types of the objects. The hashes are used when transferring data as an integrity check and can be specifically used with the --checksum
flag in syncs and in the check
command.
To use the verify checksums when transferring between cloud storage systems they must support a common hash type.
ModTime
-Almost all cloud storage systems store some sort of timestamp on objects, but several of them not something that is appropriate to use for syncing. E.g. some backends will only write a timestamp that represent the time of the upload. To be relevant for syncing it should be able to store the modification time of the source object. If this is not the case, rclone will only check the file size by default, though can be configured to check the file hash (with the --checksum
flag). Ideally it should also be possible to change the timestamp of an existing file without having to re-upload it.
+Almost all cloud storage systems store some sort of timestamp on objects, but several of them not something that is appropriate to use for syncing. E.g. some backends will only write a timestamp that represents the time of the upload. To be relevant for syncing it should be able to store the modification time of the source object. If this is not the case, rclone will only check the file size by default, though can be configured to check the file hash (with the --checksum
flag). Ideally it should also be possible to change the timestamp of an existing file without having to re-upload it.
+
+
+
+
+
+
+
+
+
+
+- |
+ModTimes not supported - times likely the upload time |
+
+
+R |
+ModTimes supported on files but can't be changed without re-upload |
+
+
+R/W |
+Read and Write ModTimes fully supported on files |
+
+
+DR |
+ModTimes supported on files and directories but can't be changed without re-upload |
+
+
+DR/W |
+Read and Write ModTimes fully supported on files and directories |
+
+
+
Storage systems with a -
in the ModTime column, means the modification read on objects is not the modification time of the file when uploaded. It is most likely the time the file was uploaded, or possibly something else (like the time the picture was taken in Google Photos).
Storage systems with a R
(for read-only) in the ModTime column, means the it keeps modification times on objects, and updates them when uploading objects, but it does not support changing only the modification time (SetModTime
operation) without re-uploading, possibly not even without deleting existing first. Some operations in rclone, such as copy
and sync
commands, will automatically check for SetModTime
support and re-upload if necessary to keep the modification times in sync. Other commands will not work without SetModTime
support, e.g. touch
command on an existing file will fail, and changes to modification time only on a files in a mount
will be silently ignored.
Storage systems with R/W
(for read/write) in the ModTime column, means they do also support modtime-only operations.
+Storage systems with D
in the ModTime column means that the following symbols apply to directories as well as files.
Case Insensitive
If a cloud storage systems is case sensitive then it is possible to have two files which differ only in case, e.g. file.txt
and FILE.txt
. If a cloud storage system is case insensitive then that isn't possible.
This can cause problems when syncing between a case insensitive system and a case sensitive system. The symptom of this is that no matter how many times you run the sync it never completes fully.
@@ -9947,6 +10476,10 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Backends may or may support reading or writing metadata. They may support reading and writing system metadata (metadata intrinsic to that backend) and/or user metadata (general purpose metadata).
The levels of metadata support are
+
+
+
+
R |
-Read only System Metadata |
+Read only System Metadata on files only |
RW |
-Read and write System Metadata |
+Read and write System Metadata on files only |
RWU |
-Read and write System Metadata and read and write User Metadata |
+Read and write System Metadata and read and write User Metadata on files only |
+
+
+DR |
+Read only System Metadata on files and directories |
+
+
+DRW |
+Read and write System Metadata on files and directories |
+
+
+DRWU |
+Read and write System Metadata and read and write User Metadata on files and directories |
@@ -10032,20 +10577,6 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
-Amazon Drive |
-Yes |
-No |
-Yes |
-Yes |
-No |
-No |
-No |
-No |
-No |
-No |
-Yes |
-
-
Amazon S3 (or S3 compatible) |
No |
Yes |
@@ -10059,7 +10590,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
Backblaze B2 |
No |
Yes |
@@ -10073,7 +10604,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
Box |
Yes |
Yes |
@@ -10087,7 +10618,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Citrix ShareFile |
Yes |
Yes |
@@ -10101,7 +10632,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
Dropbox |
Yes |
Yes |
@@ -10115,7 +10646,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Enterprise File Fabric |
Yes |
Yes |
@@ -10129,7 +10660,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
FTP |
No |
No |
@@ -10143,7 +10674,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
Google Cloud Storage |
Yes |
Yes |
@@ -10157,7 +10688,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
Google Drive |
Yes |
Yes |
@@ -10171,7 +10702,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
Google Photos |
No |
No |
@@ -10185,7 +10716,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
No |
-
+
HDFS |
Yes |
No |
@@ -10199,7 +10730,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
Yes |
Yes |
-
+
HiDrive |
Yes |
Yes |
@@ -10213,7 +10744,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
-
+
HTTP |
No |
No |
@@ -10227,6 +10758,20 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
No |
Yes |
+
+ImageKit |
+Yes |
+Yes |
+Yes |
+No |
+No |
+No |
+No |
+No |
+No |
+No |
+Yes |
+
Internet Archive |
No |
@@ -10635,7 +11180,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
The local filesystem |
-Yes |
+No |
No |
Yes |
Yes |
@@ -10696,7 +11241,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -10710,6 +11255,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -10724,6 +11270,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -10761,7 +11308,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--tpslimit float Limit HTTP transactions per second to this
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
--use-cookies Enable session cookiejar
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.65.0")
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.66.0")
Flags helpful for increasing performance.
--buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi)
@@ -10891,14 +11438,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--rc-web-gui-update Check and update to latest version of web gui
Backend
Backend only flags. These can be set in the config file also.
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-description string Description of the remote
--alias-remote string Remote or path to alias
--azureblob-access-tier string Access tier of blob: hot, cool, cold or archive
--azureblob-account string Azure Storage Account Name
@@ -10909,6 +11449,8 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--azureblob-client-id string The ID of the client in use
--azureblob-client-secret string One of the service principal's client secrets
--azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-delete-snapshots string Set to specify how to deal with snapshots on blob deletion
+ --azureblob-description string Description of the remote
--azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
--azureblob-disable-checksum Don't store MD5 checksum with object metadata
--azureblob-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
@@ -10939,6 +11481,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--azurefiles-client-secret string One of the service principal's client secrets
--azurefiles-client-send-certificate-chain Send the certificate chain when using certificate auth
--azurefiles-connection-string string Azure Files Connection String
+ --azurefiles-description string Description of the remote
--azurefiles-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot)
--azurefiles-endpoint string Endpoint for the service
--azurefiles-env-auth Read credentials from runtime (environment variables, CLI or MSI)
@@ -10958,8 +11501,9 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-description string Description of the remote
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-auth-duration Duration Time before the public link authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
--b2-download-url string Custom endpoint for downloads
--b2-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--b2-endpoint string Endpoint for the service
@@ -10978,6 +11522,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--box-client-id string OAuth Client Id
--box-client-secret string OAuth Client Secret
--box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-description string Description of the remote
--box-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
--box-impersonate string Impersonate this user ID when using a service account
--box-list-chunk int Size of listing chunk 1-1000 (default 1000)
@@ -10994,6 +11539,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-description string Description of the remote
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
--cache-plex-password string The password of the Plex user (obscured)
@@ -11007,15 +11553,19 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--cache-workers int How many workers should run in parallel to download chunks (default 4)
--cache-writes Cache file data on writes through the FS
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-description string Description of the remote
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
--chunker-hash-type string Choose how chunker handles hash sums (default "md5")
--chunker-remote string Remote to chunk/unchunk
+ --combine-description string Description of the remote
--combine-upstreams SpaceSepList Upstreams for combining
+ --compress-description string Description of the remote
--compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
--compress-remote string Remote to compress
-L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-description string Description of the remote
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
--crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
--crypt-filename-encryption string How to encrypt the filenames (default "standard")
@@ -11026,6 +11576,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--crypt-remote string Remote to encrypt/decrypt
--crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
--crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-strict-names If set, this will raise an error when crypt comes across a filename that can't be decrypted
--crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs
@@ -11035,6 +11586,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--drive-client-id string Google Application Client Id
--drive-client-secret string OAuth Client Secret
--drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-description string Description of the remote
--drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding Encoding The encoding for the backend (default InvalidUtf8)
--drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
@@ -11083,6 +11635,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
--dropbox-client-id string OAuth Client Id
--dropbox-client-secret string OAuth Client Secret
+ --dropbox-description string Description of the remote
--dropbox-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
--dropbox-impersonate string Impersonate this user when using a business account
--dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
@@ -11092,10 +11645,12 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--dropbox-token-url string Token server url
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
--fichier-cdn Set if you wish to use CDN download links
+ --fichier-description string Description of the remote
--fichier-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
--fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
--fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
--fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-description string Description of the remote
--filefabric-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--filefabric-permanent-token string Permanent Authentication Token
--filefabric-root-folder-id string ID of the root folder
@@ -11106,6 +11661,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--ftp-ask-password Allow asking for FTP password when needed
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-description string Description of the remote
--ftp-disable-epsv Disable using EPSV even if server advertises support
--ftp-disable-mlsd Disable using MLSD even if server advertises support
--ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
@@ -11131,6 +11687,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--gcs-client-id string OAuth Client Id
--gcs-client-secret string OAuth Client Secret
--gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-description string Description of the remote
--gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
--gcs-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gcs-endpoint string Endpoint for the service
@@ -11151,6 +11708,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--gphotos-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
--gphotos-client-id string OAuth Client Id
--gphotos-client-secret string OAuth Client Secret
+ --gphotos-description string Description of the remote
--gphotos-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gphotos-include-archived Also view and download archived media
--gphotos-read-only Set to make the Google Photos backend read only
@@ -11159,10 +11717,12 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--gphotos-token string OAuth Access Token as a JSON blob
--gphotos-token-url string Token server url
--hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-description string Description of the remote
--hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
--hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
--hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
--hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-description string Description of the remote
--hdfs-encoding Encoding The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
--hdfs-namenode CommaSepList Hadoop name nodes and ports
--hdfs-service-principal-name string Kerberos service principal name for the namenode
@@ -11171,6 +11731,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
--hidrive-client-id string OAuth Client Id
--hidrive-client-secret string OAuth Client Secret
+ --hidrive-description string Description of the remote
--hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
--hidrive-encoding Encoding The encoding for the backend (default Slash,Dot)
--hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
@@ -11181,10 +11742,12 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--hidrive-token-url string Token server url
--hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
--hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-description string Description of the remote
--http-headers CommaSepList Set HTTP headers for all transactions
--http-no-head Don't use HEAD requests
--http-no-slash Set this if the site doesn't end directories with /
--http-url string URL of HTTP host to connect to
+ --imagekit-description string Description of the remote
--imagekit-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket)
--imagekit-endpoint string You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
--imagekit-only-signed Restrict unsigned image URLs If you have configured Restrict unsigned image URLs in your dashboard settings, set this to true
@@ -11193,6 +11756,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--imagekit-upload-tags string Tags to add to the uploaded files, e.g. "tag1,tag2"
--imagekit-versions Include old versions in directory listings
--internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-description string Description of the remote
--internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
--internetarchive-encoding Encoding The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
--internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
@@ -11202,6 +11766,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--jottacloud-auth-url string Auth server URL
--jottacloud-client-id string OAuth Client Id
--jottacloud-client-secret string OAuth Client Secret
+ --jottacloud-description string Description of the remote
--jottacloud-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
@@ -11210,6 +11775,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--jottacloud-token-url string Token server url
--jottacloud-trashed-only Only show files that are in the trash
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-description string Description of the remote
--koofr-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--koofr-endpoint string The Koofr API endpoint to use
--koofr-mountid string Mount ID of the mount to use
@@ -11217,10 +11783,12 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--koofr-provider string Choose your storage provider
--koofr-setmtime Does the backend support setting modification time (default true)
--koofr-user string Your user name
+ --linkbox-description string Description of the remote
--linkbox-token string Token from https://www.linkbox.to/admin/account
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-case-insensitive Force the filesystem to report itself as case insensitive
--local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-description string Description of the remote
--local-encoding Encoding The encoding for the backend (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload
--local-no-preallocate Disable preallocation of disk space for transferred files
@@ -11233,6 +11801,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
--mailru-client-id string OAuth Client Id
--mailru-client-secret string OAuth Client Secret
+ --mailru-description string Description of the remote
--mailru-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--mailru-pass string Password (obscured)
--mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
@@ -11243,12 +11812,15 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--mailru-token-url string Token server url
--mailru-user string User name (usually email)
--mega-debug Output more debug from Mega
+ --mega-description string Description of the remote
--mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--mega-hard-delete Delete files permanently rather than putting them into the trash
--mega-pass string Password (obscured)
--mega-use-https Use HTTPS for transfers
--mega-user string User name
+ --memory-description string Description of the remote
--netstorage-account string Set the NetStorage account name
+ --netstorage-description string Description of the remote
--netstorage-host string Domain+path of NetStorage host to connect to
--netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
--netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
@@ -11260,6 +11832,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--onedrive-client-id string OAuth Client Id
--onedrive-client-secret string OAuth Client Secret
--onedrive-delta If set rclone will use delta listing to implement recursive listings
+ --onedrive-description string Description of the remote
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
--onedrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
@@ -11269,6 +11842,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
--onedrive-link-type string Set the type of the links created by the link command (default "view")
--onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-metadata-permissions Bits Control whether permissions should be read or written in metadata (default off)
--onedrive-no-versions Remove all versions on modifying operations
--onedrive-region string Choose national cloud region for OneDrive (default "global")
--onedrive-root-folder-id string ID of the root folder
@@ -11282,6 +11856,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--oos-config-profile string Profile name inside the oci config file (default "Default")
--oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-description string Description of the remote
--oos-disable-checksum Don't store MD5 checksum with object metadata
--oos-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--oos-endpoint string Endpoint for Object storage API
@@ -11300,12 +11875,14 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-description string Description of the remote
--opendrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--opendrive-password string Password (obscured)
--opendrive-username string Username
--pcloud-auth-url string Auth server URL
--pcloud-client-id string OAuth Client Id
--pcloud-client-secret string OAuth Client Secret
+ --pcloud-description string Description of the remote
--pcloud-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
--pcloud-password string Your pcloud password (obscured)
@@ -11316,6 +11893,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--pikpak-auth-url string Auth server URL
--pikpak-client-id string OAuth Client Id
--pikpak-client-secret string OAuth Client Secret
+ --pikpak-description string Description of the remote
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
--pikpak-pass string Pikpak password (obscured)
@@ -11328,11 +11906,13 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--premiumizeme-auth-url string Auth server URL
--premiumizeme-client-id string OAuth Client Id
--premiumizeme-client-secret string OAuth Client Secret
+ --premiumizeme-description string Description of the remote
--premiumizeme-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--premiumizeme-token string OAuth Access Token as a JSON blob
--premiumizeme-token-url string Token server url
--protondrive-2fa string The 2FA code
--protondrive-app-version string The app version string (default "macos-drive@1.0.0-alpha.1+rclone")
+ --protondrive-description string Description of the remote
--protondrive-enable-caching Caches the files and folders metadata to reduce API calls (default true)
--protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot)
--protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured)
@@ -11343,12 +11923,14 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--putio-auth-url string Auth server URL
--putio-client-id string OAuth Client Id
--putio-client-secret string OAuth Client Secret
+ --putio-description string Description of the remote
--putio-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--putio-token string OAuth Access Token as a JSON blob
--putio-token-url string Token server url
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
--qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-description string Description of the remote
--qingstor-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8)
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API
--qingstor-env-auth Get QingStor credentials from runtime
@@ -11357,18 +11939,21 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--qingstor-zone string Zone to connect to
--quatrix-api-key string API key for accessing Quatrix account
+ --quatrix-description string Description of the remote
--quatrix-effective-upload-time string Wanted upload time for one chunk (default "4s")
--quatrix-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--quatrix-hard-delete Delete files permanently rather than putting them into the trash
--quatrix-host string Host name of Quatrix account
--quatrix-maximal-summary-chunk-size SizeSuffix The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size' (default 95.367Mi)
--quatrix-minimal-chunk-size SizeSuffix The minimal size for one chunk (default 9.537Mi)
+ --quatrix-skip-project-folders Skip project folders in operations
--s3-access-key-id string AWS Access Key ID
--s3-acl string Canned ACL used when creating buckets and storing or copying objects
--s3-bucket-acl string Canned ACL used when creating buckets
--s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--s3-decompress If set this will decompress gzip encoded objects
+ --s3-description string Description of the remote
--s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-disable-http2 Disable usage of http2 for S3 backends
@@ -11403,19 +11988,22 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
--s3-storage-class string The storage class to use when storing new objects in S3
--s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
--s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
--s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset)
+ --s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support)
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
--s3-v2-auth If true use v2 authentication
--s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-version-deleted Show deleted file markers when using versions
--s3-versions Include old versions in directory listings
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
--seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-description string Description of the remote
--seafile-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
--seafile-library string Name of the library
--seafile-library-key string Library password (for encrypted libraries only) (obscured)
@@ -11427,6 +12015,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
--sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
--sftp-copy-is-hardlink Set to enable server side copies using hardlinks
+ --sftp-description string Description of the remote
--sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-concurrent-writes If set don't use concurrent writes
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
@@ -11461,6 +12050,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
--sharefile-client-id string OAuth Client Id
--sharefile-client-secret string OAuth Client Secret
+ --sharefile-description string Description of the remote
--sharefile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
--sharefile-endpoint string Endpoint for API calls
--sharefile-root-folder-id string ID of the root folder
@@ -11469,10 +12059,12 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
--sia-api-password string Sia Daemon API Password (obscured)
--sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-description string Description of the remote
--sia-encoding Encoding The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
--sia-user-agent string Siad User Agent (default "Sia-Agent")
--skip-links Don't warn about skipped symlinks
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-description string Description of the remote
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
--smb-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
--smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
@@ -11484,6 +12076,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--smb-user string SMB username (default "$USER")
--storj-access-grant string Access grant
--storj-api-key string API key
+ --storj-description string Description of the remote
--storj-passphrase string Encryption passphrase
--storj-provider string Choose an authentication method (default "existing")
--storj-satellite-address string Satellite address (default "us1.storj.io")
@@ -11492,6 +12085,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--sugarsync-authorization string Sugarsync authorization
--sugarsync-authorization-expiry string Sugarsync authorization expiry
--sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-description string Description of the remote
--sugarsync-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
--sugarsync-hard-delete Permanently delete files if true
--sugarsync-private-access-key string Sugarsync Private Access Key
@@ -11505,6 +12099,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-description string Description of the remote
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
@@ -11524,17 +12119,21 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--union-action-policy string Policy to choose upstream on ACTION category (default "epall")
--union-cache-time int Cache time of usage and free space (in seconds) (default 120)
--union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-description string Description of the remote
--union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
--union-upstreams string List of space separated upstreams
--uptobox-access-token string Your access token
+ --uptobox-description string Description of the remote
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
--uptobox-private Set to make uploaded files private
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-description string Description of the remote
--webdav-encoding string The encoding for the backend
--webdav-headers CommaSepList Set HTTP headers for all transactions
--webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-owncloud-exclude-shares Exclude ownCloud shares
--webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
--webdav-pass string Password (obscured)
--webdav-url string URL of http host to connect to
@@ -11543,6 +12142,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--yandex-auth-url string Auth server URL
--yandex-client-id string OAuth Client Id
--yandex-client-secret string OAuth Client Secret
+ --yandex-description string Description of the remote
--yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-hard-delete Delete files permanently rather than putting them into the trash
--yandex-token string OAuth Access Token as a JSON blob
@@ -11550,6 +12150,7 @@ Showing nodes accounting for 1537.03kB, 100% of 1537.03kB total
--zoho-auth-url string Auth server URL
--zoho-client-id string OAuth Client Id
--zoho-client-secret string OAuth Client Secret
+ --zoho-description string Description of the remote
--zoho-encoding Encoding The encoding for the backend (default Del,Ctl,InvalidUtf8)
--zoho-region string Zoho region to connect to
--zoho-token string OAuth Access Token as a JSON blob
@@ -11735,16 +12336,21 @@ docker volume create my_vol -d rclone -o opt1=new_val1 ...
docker volume list
docker volume inspect my_vol
If docker refuses to remove the volume, you should find containers or swarm services that use it and stop them first.
+Bisync
+bisync
is in beta and is considered an advanced command, so use with care. Make sure you have read and understood the entire manual (especially the Limitations section) before using, or data loss can result. Questions can be asked in the Rclone Forum.
Getting started
- Install rclone and setup your remotes.
-- Bisync will create its working directory at
~/.cache/rclone/bisync
on Linux or C:\Users\MyLogin\AppData\Local\rclone\bisync
on Windows. Make sure that this location is writable.
+- Bisync will create its working directory at
~/.cache/rclone/bisync
on Linux, /Users/yourusername/Library/Caches/rclone/bisync
on Mac, or C:\Users\MyLogin\AppData\Local\rclone\bisync
on Windows. Make sure that this location is writable.
- Run bisync with the
--resync
flag, specifying the paths to the local and remote sync directory roots.
-- For successive sync runs, leave off the
--resync
flag.
+- For successive sync runs, leave off the
--resync
flag. (Important!)
- Consider using a filters file for excluding unnecessary files and directories from the sync.
- Consider setting up the --check-access feature for safety.
-- On Linux, consider setting up a crontab entry. bisync can safely run in concurrent cron jobs thanks to lock files it maintains.
+- On Linux or Mac, consider setting up a crontab entry. bisync can safely run in concurrent cron jobs thanks to lock files it maintains.
+For example, your first command might look like this:
+rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
+If all looks good, run it again without --dry-run
. After that, remove --resync
as well.
Here is a typical run log (with timestamps removed for clarity):
rclone bisync /testdir/path1/ /testdir/path2/ --verbose
INFO : Synching Path1 "/testdir/path1/" with Path2 "/testdir/path2/"
@@ -11797,36 +12403,36 @@ Positional arguments:
Type 'rclone listremotes' for list of configured remotes.
Optional Flags:
- --check-access Ensure expected `RCLONE_TEST` files are found on
- both Path1 and Path2 filesystems, else abort.
- --check-filename FILENAME Filename for `--check-access` (default: `RCLONE_TEST`)
- --check-sync CHOICE Controls comparison of final listings:
- `true | false | only` (default: true)
- If set to `only`, bisync will only compare listings
- from the last run but skip actual sync.
- --filters-file PATH Read filtering patterns from a file
- --max-delete PERCENT Safety check on maximum percentage of deleted files allowed.
- If exceeded, the bisync run will abort. (default: 50%)
- --force Bypass `--max-delete` safety check and run the sync.
- Consider using with `--verbose`
- --create-empty-src-dirs Sync creation and deletion of empty directories.
- (Not compatible with --remove-empty-dirs)
- --remove-empty-dirs Remove empty directories at the final cleanup step.
- -1, --resync Performs the resync run.
- Warning: Path1 files may overwrite Path2 versions.
- Consider using `--verbose` or `--dry-run` first.
- --ignore-listing-checksum Do not use checksums for listings
- (add --ignore-checksum to additionally skip post-copy checksum checks)
- --resilient Allow future runs to retry after certain less-serious errors,
- instead of requiring --resync. Use at your own risk!
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --workdir PATH Use custom working directory (useful for testing).
- (default: `~/.cache/rclone/bisync`)
- -n, --dry-run Go through the motions - No files are copied/deleted.
- -v, --verbose Increases logging verbosity.
- May be specified more than once for more details.
- -h, --help help for bisync
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --retries int Retry operations this many times if they fail (requires --resilient). (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
+ --max-delete PERCENT Safety check on maximum percentage of deleted files allowed. If exceeded, the bisync run will abort. (default: 50%)
+ -n, --dry-run Go through the motions - No files are copied/deleted.
+ -v, --verbose Increases logging verbosity. May be specified more than once for more details.
Arbitrary rclone flags may be specified on the bisync command line, for example rclone bisync ./testdir/path1/ gdrive:testdir/path2/ --drive-skip-gdocs -v -v --timeout 10s
Note that interactions of various rclone flags with bisync process flow has not been fully tested yet.
Paths
Path1 and Path2 arguments may be references to any mix of local directory paths (absolute or relative), UNC paths (//server/share/path
), Windows drive paths (with a drive letter and :
) or configured remotes with optional subdirectory paths. Cloud references are distinguished by having a :
in the argument (see Windows support below).
@@ -11834,50 +12440,153 @@ Optional Flags:
The listings in bisync working directory (default: ~/.cache/rclone/bisync
) are named based on the Path1 and Path2 arguments so that separate syncs to individual directories within the tree may be set up, e.g.: path_to_local_tree..dropbox_subdir.lst
.
Any empty directories after the sync on both the Path1 and Path2 filesystems are not deleted by default, unless --create-empty-src-dirs
is specified. If the --remove-empty-dirs
flag is specified, then both paths will have ALL empty directories purged as the last step in the process.
Command-line flags
---resync
-This will effectively make both Path1 and Path2 filesystems contain a matching superset of all files. Path2 files that do not exist in Path1 will be copied to Path1, and the process will then copy the Path1 tree to Path2.
-The --resync
sequence is roughly equivalent to:
-rclone copy Path2 Path1 --ignore-existing
-rclone copy Path1 Path2
-Or, if using --create-empty-src-dirs
:
-rclone copy Path2 Path1 --ignore-existing
-rclone copy Path1 Path2 --create-empty-src-dirs
-rclone copy Path2 Path1 --create-empty-src-dirs
+--resync
+This will effectively make both Path1 and Path2 filesystems contain a matching superset of all files. By default, Path2 files that do not exist in Path1 will be copied to Path1, and the process will then copy the Path1 tree to Path2.
+The --resync
sequence is roughly equivalent to the following (but see --resync-mode
for other options):
+rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
+rclone copy Path1 Path2 [--create-empty-src-dirs]
The base directories on both Path1 and Path2 filesystems must exist or bisync will fail. This is required for safety - that bisync can verify that both paths are valid.
-When using --resync
, a newer version of a file on the Path2 filesystem will be overwritten by the Path1 filesystem version. (Note that this is NOT entirely symmetrical.) Carefully evaluate deltas using --dry-run.
+When using --resync
, a newer version of a file on the Path2 filesystem will (by default) be overwritten by the Path1 filesystem version. (Note that this is NOT entirely symmetrical, and more symmetrical options can be specified with the --resync-mode
flag.) Carefully evaluate deltas using --dry-run.
For a resync run, one of the paths may be empty (no files in the path tree). The resync run should result in files on both paths, else a normal non-resync run will fail.
For a non-resync run, either path being empty (no files in the tree) fails with Empty current PathN listing. Cannot sync to an empty directory: X.pathN.lst
This is a safety check that an unexpected empty path does not result in deleting everything in the other path.
---check-access
+Note that --resync
implies --resync-mode path1
unless a different --resync-mode
is explicitly specified. It is not necessary to use both the --resync
and --resync-mode
flags -- either one is sufficient without the other.
+Note: --resync
(including --resync-mode
) should only be used under three specific (rare) circumstances: 1. It is your first bisync run (between these two paths) 2. You've just made changes to your bisync settings (such as editing the contents of your --filters-file
) 3. There was an error on the prior run, and as a result, bisync now requires --resync
to recover
+The rest of the time, you should omit --resync
. The reason is because --resync
will only copy (not sync) each side to the other. Therefore, if you included --resync
for every bisync run, it would never be possible to delete a file -- the deleted file would always keep reappearing at the end of every run (because it's being copied from the other side where it still exists). Similarly, renaming a file would always result in a duplicate copy (both old and new name) on both sides.
+If you find that frequent interruptions from #3 are an issue, rather than automatically running --resync
, the recommended alternative is to use the --resilient
, --recover
, and --conflict-resolve
flags, (along with Graceful Shutdown mode, when needed) for a very robust "set-it-and-forget-it" bisync setup that can automatically bounce back from almost any interruption it might encounter. Consider adding something like the following:
+--resilient --recover --max-lock 2m --conflict-resolve newer
+--resync-mode CHOICE
+In the event that a file differs on both sides during a --resync
, --resync-mode
controls which version will overwrite the other. The supported options are similar to --conflict-resolve
. For all of the following options, the version that is kept is referred to as the "winner", and the version that is overwritten (deleted) is referred to as the "loser". The options are named after the "winner":
+
+path1
- (the default) - the version from Path1 is unconditionally considered the winner (regardless of modtime
and size
, if any). This can be useful if one side is more trusted or up-to-date than the other, at the time of the --resync
.
+path2
- same as path1
, except the path2 version is considered the winner.
+newer
- the newer file (by modtime
) is considered the winner, regardless of which side it came from. This may result in having a mix of some winners from Path1, and some winners from Path2. (The implementation is analogous to running rclone copy --update
in both directions.)
+older
- same as newer
, except the older file is considered the winner, and the newer file is considered the loser.
+larger
- the larger file (by size
) is considered the winner (regardless of modtime
, if any). This can be a useful option for remotes without modtime
support, or with the kinds of files (such as logs) that tend to grow but not shrink, over time.
+smaller
- the smaller file (by size
) is considered the winner (regardless of modtime
, if any).
+
+For all of the above options, note the following: - If either of the underlying remotes lacks support for the chosen method, it will be ignored and will fall back to the default of path1
. (For example, if --resync-mode newer
is set, but one of the paths uses a remote that doesn't support modtime
.) - If a winner can't be determined because the chosen method's attribute is missing or equal, it will be ignored, and bisync will instead try to determine whether the files differ by looking at the other --compare
methods in effect. (For example, if --resync-mode newer
is set, but the Path1 and Path2 modtimes are identical, bisync will compare the sizes.) If bisync concludes that they differ, preference is given to whichever is the "source" at that moment. (In practice, this gives a slight advantage to Path2, as the 2to1 copy comes before the 1to2 copy.) If the files do not differ, nothing is copied (as both sides are already correct). - These options apply only to files that exist on both sides (with the same name and relative path). Files that exist only on one side and not the other are always copied to the other, during --resync
(this is one of the main differences between resync and non-resync runs.). - --conflict-resolve
, --conflict-loser
, and --conflict-suffix
do not apply during --resync
, and unlike these flags, nothing is renamed during --resync
. When a file differs on both sides during --resync
, one version always overwrites the other (much like in rclone copy
.) (Consider using --backup-dir
to retain a backup of the losing version.) - Unlike for --conflict-resolve
, --resync-mode none
is not a valid option (or rather, it will be interpreted as "no resync", unless --resync
has also been specified, in which case it will be ignored.) - Winners and losers are decided at the individual file-level only (there is not currently an option to pick an entire winning directory atomically, although the path1
and path2
options typically produce a similar result.) - To maintain backward-compatibility, the --resync
flag implies --resync-mode path1
unless a different --resync-mode
is explicitly specified. Similarly, all --resync-mode
options (except none
) imply --resync
, so it is not necessary to use both the --resync
and --resync-mode
flags simultaneously -- either one is sufficient without the other.
+--check-access
Access check files are an additional safety measure against data loss. bisync will ensure it can find matching RCLONE_TEST
files in the same places in the Path1 and Path2 filesystems. RCLONE_TEST
files are not generated automatically. For --check-access
to succeed, you must first either: A) Place one or more RCLONE_TEST
files in both systems, or B) Set --check-filename
to a filename already in use in various locations throughout your sync'd fileset. Recommended methods for A) include: * rclone touch Path1/RCLONE_TEST
(create a new file) * rclone copyto Path1/RCLONE_TEST Path2/RCLONE_TEST
(copy an existing file) * rclone copy Path1/RCLONE_TEST Path2/RCLONE_TEST --include "RCLONE_TEST"
(copy multiple files at once, recursively) * create the files manually (outside of rclone) * run bisync
once without --check-access
to set matching files on both filesystems will also work, but is not preferred, due to potential for user error (you are temporarily disabling the safety feature).
Note that --check-access
is still enforced on --resync
, so bisync --resync --check-access
will not work as a method of initially setting the files (this is to ensure that bisync can't inadvertently circumvent its own safety switch.)
Time stamps and file contents for RCLONE_TEST
files are not important, just the names and locations. If you have symbolic links in your sync tree it is recommended to place RCLONE_TEST
files in the linked-to directory tree to protect against bisync assuming a bunch of deleted files if the linked-to tree should not be accessible. See also the --check-filename flag.
---check-filename
+--check-filename
Name of the file(s) used in access health validation. The default --check-filename
is RCLONE_TEST
. One or more files having this filename must exist, synchronized between your source and destination filesets, in order for --check-access
to succeed. See --check-access for additional details.
---max-delete
+--compare
+As of v1.66
, bisync fully supports comparing based on any combination of size, modtime, and checksum (lifting the prior restriction on backends without modtime support.)
+By default (without the --compare
flag), bisync inherits the same comparison options as sync
(that is: size
and modtime
by default, unless modified with flags such as --checksum
or --size-only
.)
+If the --compare
flag is set, it will override these defaults. This can be useful if you wish to compare based on combinations not currently supported in sync
, such as comparing all three of size
AND modtime
AND checksum
simultaneously (or just modtime
AND checksum
).
+--compare
takes a comma-separated list, with the currently supported values being size
, modtime
, and checksum
. For example, if you want to compare size and checksum, but not modtime, you would do:
+--compare size,checksum
+Or if you want to compare all three:
+--compare size,modtime,checksum
+--compare
overrides any conflicting flags. For example, if you set the conflicting flags --compare checksum --size-only
, --size-only
will be ignored, and bisync will compare checksum and not size. To avoid confusion, it is recommended to use either --compare
or the normal sync
flags, but not both.
+If --compare
includes checksum
and both remotes support checksums but have no hash types in common with each other, checksums will be considered only for comparisons within the same side (to determine what has changed since the prior sync), but not for comparisons against the opposite side. If one side supports checksums and the other does not, checksums will only be considered on the side that supports them.
+When comparing with checksum
and/or size
without modtime
, bisync cannot determine whether a file is newer
or older
-- only whether it is changed
or unchanged
. (If it is changed
on both sides, bisync still does the standard equality-check to avoid declaring a sync conflict unless it absolutely has to.)
+It is recommended to do a --resync
when changing --compare
settings, as otherwise your prior listing files may not contain the attributes you wish to compare (for example, they will not have stored checksums if you were not previously comparing checksums.)
+--ignore-listing-checksum
+When --checksum
or --compare checksum
is set, bisync will retrieve (or generate) checksums (for backends that support them) when creating the listings for both paths, and store the checksums in the listing files. --ignore-listing-checksum
will disable this behavior, which may speed things up considerably, especially on backends (such as local) where hashes must be computed on the fly instead of retrieved. Please note the following:
+
+- As of
v1.66
, --ignore-listing-checksum
is now automatically set when neither --checksum
nor --compare checksum
are in use (as the checksums would not be used for anything.)
+--ignore-listing-checksum
is NOT the same as --ignore-checksum
, and you may wish to use one or the other, or both. In a nutshell: --ignore-listing-checksum
controls whether checksums are considered when scanning for diffs, while --ignore-checksum
controls whether checksums are considered during the copy/sync operations that follow, if there ARE diffs.
+- Unless
--ignore-listing-checksum
is passed, bisync currently computes hashes for one path even when there's no common hash with the other path (for example, a crypt remote.) This can still be beneficial, as the hashes will still be used to detect changes within the same side (if --checksum
or --compare checksum
is set), even if they can't be used to compare against the opposite side.
+- If you wish to ignore listing checksums only on remotes where they are slow to compute, consider using
--no-slow-hash
(or --slow-hash-sync-only
) instead of --ignore-listing-checksum
.
+- If
--ignore-listing-checksum
is used simultaneously with --compare checksum
(or --checksum
), checksums will be ignored for bisync deltas, but still considered during the sync operations that follow (if deltas are detected based on modtime and/or size.)
+
+--no-slow-hash
+On some remotes (notably local
), checksums can dramatically slow down a bisync run, because hashes cannot be stored and need to be computed in real-time when they are requested. On other remotes (such as drive
), they add practically no time at all. The --no-slow-hash
flag will automatically skip checksums on remotes where they are slow, while still comparing them on others (assuming --compare
includes checksum
.) This can be useful when one of your bisync paths is slow but you still want to check checksums on the other, for a more robust sync.
+--slow-hash-sync-only
+Same as --no-slow-hash
, except slow hashes are still considered during sync calls. They are still NOT considered for determining deltas, nor or they included in listings. They are also skipped during --resync
. The main use case for this flag is when you have a large number of files, but relatively few of them change from run to run -- so you don't want to check your entire tree every time (it would take too long), but you still want to consider checksums for the smaller group of files for which a modtime
or size
change was detected. Keep in mind that this speed savings comes with a safety trade-off: if a file's content were to change without a change to its modtime
or size
, bisync would not detect it, and it would not be synced.
+--slow-hash-sync-only
is only useful if both remotes share a common hash type (if they don't, bisync will automatically fall back to --no-slow-hash
.) Both --no-slow-hash
and --slow-hash-sync-only
have no effect without --compare checksum
(or --checksum
).
+--download-hash
+If --download-hash
is set, bisync will use best efforts to obtain an MD5 checksum by downloading and computing on-the-fly, when checksums are not otherwise available (for example, a remote that doesn't support them.) Note that since rclone has to download the entire file, this may dramatically slow down your bisync runs, and is also likely to use a lot of data, so it is probably not practical for bisync paths with a large total file size. However, it can be a good option for syncing small-but-important files with maximum accuracy (for example, a source code repo on a crypt
remote.) An additional advantage over methods like cryptcheck
is that the original file is not required for comparison (for example, --download-hash
can be used to bisync two different crypt remotes with different passwords.)
+When --download-hash
is set, bisync still looks for more efficient checksums first, and falls back to downloading only when none are found. It takes priority over conflicting flags such as --no-slow-hash
. --download-hash
is not suitable for Google Docs and other files of unknown size, as their checksums would change from run to run (due to small variances in the internals of the generated export file.) Therefore, bisync automatically skips --download-hash
for files with a size less than 0.
+See also: Hasher
backend, cryptcheck
command, rclone check --download
option, md5sum
command
+--max-delete
As a safety check, if greater than the --max-delete
percent of files were deleted on either the Path1 or Path2 filesystem, then bisync will abort with a warning message, without making any changes. The default --max-delete
is 50%
. One way to trigger this limit is to rename a directory that contains more than half of your files. This will appear to bisync as a bunch of deleted files and a bunch of new files. This safety check is intended to block bisync from deleting all of the files on both filesystems due to a temporary network access issue, or if the user had inadvertently deleted the files on one side or the other. To force the sync, either set a different delete percentage limit, e.g. --max-delete 75
(allows up to 75% deletion), or use --force
to bypass the check.
Also see the all files changed check.
---filters-file
+--filters-file
By using rclone filter features you can exclude file types or directory sub-trees from the sync. See the bisync filters section and generic --filter-from documentation. An example filters file contains filters for non-allowed files for synching with Dropbox.
If you make changes to your filters file then bisync requires a run with --resync
. This is a safety feature, which prevents existing files on the Path1 and/or Path2 side from seeming to disappear from view (since they are excluded in the new listings), which would fool bisync into seeing them as deleted (as compared to the prior run listings), and then bisync would proceed to delete them for real.
To block this from happening, bisync calculates an MD5 hash of the filters file and stores the hash in a .md5
file in the same place as your filters file. On the next run with --filters-file
set, bisync re-calculates the MD5 hash of the current filters file and compares it to the hash stored in the .md5
file. If they don't match, the run aborts with a critical error and thus forces you to do a --resync
, likely avoiding a disaster.
---check-sync
+--conflict-resolve CHOICE
+In bisync, a "conflict" is a file that is new or changed on both sides (relative to the prior run) AND is not currently identical on both sides. --conflict-resolve
controls how bisync handles such a scenario. The currently supported options are:
+
+none
- (the default) - do not attempt to pick a winner, keep and rename both files according to --conflict-loser
and --conflict-suffix
settings. For example, with the default settings, file.txt
on Path1 is renamed file.txt.conflict1
and file.txt
on Path2 is renamed file.txt.conflict2
. Both are copied to the opposite path during the run, so both sides end up with a copy of both files. (As none
is the default, it is not necessary to specify --conflict-resolve none
-- you can just omit the flag.)
+newer
- the newer file (by modtime
) is considered the winner and is copied without renaming. The older file (the "loser") is handled according to --conflict-loser
and --conflict-suffix
settings (either renamed or deleted.) For example, if file.txt
on Path1 is newer than file.txt
on Path2, the result on both sides (with other default settings) will be file.txt
(winner from Path1) and file.txt.conflict1
(loser from Path2).
+older
- same as newer
, except the older file is considered the winner, and the newer file is considered the loser.
+larger
- the larger file (by size
) is considered the winner (regardless of modtime
, if any).
+smaller
- the smaller file (by size
) is considered the winner (regardless of modtime
, if any).
+path1
- the version from Path1 is unconditionally considered the winner (regardless of modtime
and size
, if any). This can be useful if one side is usually more trusted or up-to-date than the other.
+path2
- same as path1
, except the path2 version is considered the winner.
+
+For all of the above options, note the following: - If either of the underlying remotes lacks support for the chosen method, it will be ignored and fall back to none
. (For example, if --conflict-resolve newer
is set, but one of the paths uses a remote that doesn't support modtime
.) - If a winner can't be determined because the chosen method's attribute is missing or equal, it will be ignored and fall back to none
. (For example, if --conflict-resolve newer
is set, but the Path1 and Path2 modtimes are identical, even if the sizes may differ.) - If the file's content is currently identical on both sides, it is not considered a "conflict", even if new or changed on both sides since the prior sync. (For example, if you made a change on one side and then synced it to the other side by other means.) Therefore, none of the conflict resolution flags apply in this scenario. - The conflict resolution flags do not apply during a --resync
, as there is no "prior run" to speak of (but see --resync-mode
for similar options.)
+--conflict-loser CHOICE
+--conflict-loser
determines what happens to the "loser" of a sync conflict (when --conflict-resolve
determines a winner) or to both files (when there is no winner.) The currently supported options are:
+
+num
- (the default) - auto-number the conflicts by automatically appending the next available number to the --conflict-suffix
, in chronological order. For example, with the default settings, the first conflict for file.txt
will be renamed file.txt.conflict1
. If file.txt.conflict1
already exists, file.txt.conflict2
will be used instead (etc., up to a maximum of 9223372036854775807 conflicts.)
+pathname
- rename the conflicts according to which side they came from, which was the default behavior prior to v1.66
. For example, with --conflict-suffix path
, file.txt
from Path1 will be renamed file.txt.path1
, and file.txt
from Path2 will be renamed file.txt.path2
. If two non-identical suffixes are provided (ex. --conflict-suffix cloud,local
), the trailing digit is omitted. Importantly, note that with pathname
, there is no auto-numbering beyond 2
, so if file.txt.path2
somehow already exists, it will be overwritten. Using a dynamic date variable in your --conflict-suffix
(see below) is one possible way to avoid this. Note also that conflicts-of-conflicts are possible, if the original conflict is not manually resolved -- for example, if for some reason you edited file.txt.path1
on both sides, and those edits were different, the result would be file.txt.path1.path1
and file.txt.path1.path2
(in addition to file.txt.path2
.)
+delete
- keep the winner only and delete the loser, instead of renaming it. If a winner cannot be determined (see --conflict-resolve
for details on how this could happen), delete
is ignored and the default num
is used instead (i.e. both versions are kept and renamed, and neither is deleted.) delete
is inherently the most destructive option, so use it only with care.
+
+For all of the above options, note that if a winner cannot be determined (see --conflict-resolve
for details on how this could happen), or if --conflict-resolve
is not in use, both files will be renamed.
+--conflict-suffix STRING[,STRING]
+--conflict-suffix
controls the suffix that is appended when bisync renames a --conflict-loser
(default: conflict
). --conflict-suffix
will accept either one string or two comma-separated strings to assign different suffixes to Path1 vs. Path2. This may be helpful later in identifying the source of the conflict. (For example, --conflict-suffix dropboxconflict,laptopconflict
)
+With --conflict-loser num
, a number is always appended to the suffix. With --conflict-loser pathname
, a number is appended only when one suffix is specified (or when two identical suffixes are specified.) i.e. with --conflict-loser pathname
, all of the following would produce exactly the same result:
+--conflict-suffix path
+--conflict-suffix path,path
+--conflict-suffix path1,path2
+Suffixes may be as short as 1 character. By default, the suffix is appended after any other extensions (ex. file.jpg.conflict1
), however, this can be changed with the --suffix-keep-extension
flag (i.e. to instead result in file.conflict1.jpg
).
+--conflict-suffix
supports several dynamic date variables when enclosed in curly braces as globs. This can be helpful to track the date and/or time that each conflict was handled by bisync. For example:
+--conflict-suffix {DateOnly}-conflict
+// result: myfile.txt.2006-01-02-conflict1
+All of the formats described here and here are supported, but take care to ensure that your chosen format does not use any characters that are illegal on your remotes (for example, macOS does not allow colons in filenames, and slashes are also best avoided as they are often interpreted as directory separators.) To address this particular issue, an additional {MacFriendlyTime}
(or just {mac}
) option is supported, which results in 2006-01-02 0304PM
.
+Note that --conflict-suffix
is entirely separate from rclone's main --sufix
flag. This is intentional, as users may wish to use both flags simultaneously, if also using --backup-dir
.
+Finally, note that the default in bisync prior to v1.66
was to rename conflicts with ..path1
and ..path2
(with two periods, and path
instead of conflict
.) Bisync now defaults to a single dot instead of a double dot, but additional dots can be added by including them in the specified suffix string. For example, for behavior equivalent to the previous default, use:
+[--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
+--check-sync
Enabled by default, the check-sync function checks that all of the same files exist in both the Path1 and Path2 history listings. This check-sync integrity check is performed at the end of the sync run by default. Any untrapped failing copy/deletes between the two paths might result in differences between the two listings and in the untracked file content differences between the two paths. A resync run would correct the error.
Note that the default-enabled integrity check locally executes a load of both the final Path1 and Path2 listings, and thus adds to the run time of a sync. Using --check-sync=false
will disable it and may significantly reduce the sync run times for very large numbers of files.
The check may be run manually with --check-sync=only
. It runs only the integrity check and terminates without actually synching.
-See also: Concurrent modifications
---ignore-listing-checksum
-By default, bisync will retrieve (or generate) checksums (for backends that support them) when creating the listings for both paths, and store the checksums in the listing files. --ignore-listing-checksum
will disable this behavior, which may speed things up considerably, especially on backends (such as local) where hashes must be computed on the fly instead of retrieved. Please note the following:
-
-- While checksums are (by default) generated and stored in the listing files, they are NOT currently used for determining diffs (deltas). It is anticipated that full checksum support will be added in a future version.
---ignore-listing-checksum
is NOT the same as --ignore-checksum
, and you may wish to use one or the other, or both. In a nutshell: --ignore-listing-checksum
controls whether checksums are considered when scanning for diffs, while --ignore-checksum
controls whether checksums are considered during the copy/sync operations that follow, if there ARE diffs.
-- Unless
--ignore-listing-checksum
is passed, bisync currently computes hashes for one path even when there's no common hash with the other path (for example, a crypt remote.)
-- If both paths support checksums and have a common hash, AND
--ignore-listing-checksum
was not specified when creating the listings, --check-sync=only
can be used to compare Path1 vs. Path2 checksums (as of the time the previous listings were created.) However, --check-sync=only
will NOT include checksums if the previous listings were generated on a run using --ignore-listing-checksum
. For a more robust integrity check of the current state, consider using check
(or cryptcheck
, if at least one path is a crypt
remote.)
-
---resilient
+Note that currently, --check-sync
only checks listing snapshots and NOT the actual files on the remotes. Note also that the listing snapshots will not know about any changes that happened during or after the latest bisync run, as those will be discovered on the next run. Therefore, while listings should always match each other at the end of a bisync run, it is expected that they will not match the underlying remotes, nor will the remotes match each other, if there were changes during or after the run. This is normal, and any differences will be detected and synced on the next run.
+For a robust integrity check of the current state of the remotes (as opposed to just their listing snapshots), consider using check
(or cryptcheck
, if at least one path is a crypt
remote) instead of --check-sync
, keeping in mind that differences are expected if files changed during or after your last bisync run.
+For example, a possible sequence could look like this:
+
+- Normally scheduled bisync run:
+
+rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
+
+- Periodic independent integrity check (perhaps scheduled nightly or weekly):
+
+rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
+
+- If diffs are found, you have some choices to correct them. If one side is more up-to-date and you want to make the other side match it, you could run:
+
+rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
+(or switch Path1 and Path2 to make Path2 the source-of-truth)
+Or, if neither side is totally up-to-date, you could run a --resync
to bring them back into agreement (but remember that this could cause deleted files to re-appear.)
+*Note also that rclone check
does not currently include empty directories, so if you want to know if any empty directories are out of sync, consider alternatively running the above rclone sync
command with --dry-run
added.
+See also: Concurrent modifications, --resilient
+--resilient
Caution: this is an experimental feature. Use at your own risk!
By default, most errors or interruptions will cause bisync to abort and require --resync
to recover. This is a safety feature, to prevent bisync from running again until a user checks things out. However, in some cases, bisync can go too far and enforce a lockout when one isn't actually necessary, like for certain less-serious errors that might resolve themselves on the next run. When --resilient
is specified, bisync tries its best to recover and self-correct, and only requires --resync
as a last resort when a human's involvement is absolutely necessary. The intended use case is for running bisync as a background process (such as via scheduled cron).
When using --resilient
mode, bisync will still report the error and abort, however it will not lock out future runs -- allowing the possibility of retrying at the next normally scheduled time, without requiring a --resync
first. Examples of such retryable errors include access test failures, missing listing files, and filter change detections. These safety features will still prevent the current run from proceeding -- the difference is that if conditions have improved by the time of the next run, that next run will be allowed to proceed. Certain more serious errors will still enforce a --resync
lockout, even in --resilient
mode, to prevent data loss.
-Behavior of --resilient
may change in a future version.
+Behavior of --resilient
may change in a future version. (See also: --recover
, --max-lock
, Graceful Shutdown)
+--recover
+If --recover
is set, in the event of a sudden interruption or other un-graceful shutdown, bisync will attempt to automatically recover on the next run, instead of requiring --resync
. Bisync is able to recover robustly by keeping one "backup" listing at all times, representing the state of both paths after the last known successful sync. Bisync can then compare the current state with this snapshot to determine which changes it needs to retry. Changes that were synced after this snapshot (during the run that was later interrupted) will appear to bisync as if they are "new or changed on both sides", but in most cases this is not a problem, as bisync will simply do its usual "equality check" and learn that no action needs to be taken on these files, since they are already identical on both sides.
+In the rare event that a file is synced successfully during a run that later aborts, and then that same file changes AGAIN before the next run, bisync will think it is a sync conflict, and handle it accordingly. (From bisync's perspective, the file has changed on both sides since the last trusted sync, and the files on either side are not currently identical.) Therefore, --recover
carries with it a slightly increased chance of having conflicts -- though in practice this is pretty rare, as the conditions required to cause it are quite specific. This risk can be reduced by using bisync's "Graceful Shutdown" mode (triggered by sending SIGINT
or Ctrl+C
), when you have the choice, instead of forcing a sudden termination.
+--recover
and --resilient
are similar, but distinct -- the main difference is that --resilient
is about retrying, while --recover
is about recovering. Most users will probably want both. --resilient
allows retrying when bisync has chosen to abort itself due to safety features such as failing --check-access
or detecting a filter change. --resilient
does not cover external interruptions such as a user shutting down their computer in the middle of a sync -- that is what --recover
is for.
+--max-lock
+Bisync uses lock files as a safety feature to prevent interference from other bisync runs while it is running. Bisync normally removes these lock files at the end of a run, but if bisync is abruptly interrupted, these files will be left behind. By default, they will lock out all future runs, until the user has a chance to manually check things out and remove the lock. As an alternative, --max-lock
can be used to make them automatically expire after a certain period of time, so that future runs are not locked out forever, and auto-recovery is possible. --max-lock
can be any duration 2m
or greater (or 0
to disable). If set, lock files older than this will be considered "expired", and future runs will be allowed to disregard them and proceed. (Note that the --max-lock
duration must be set by the process that left the lock file -- not the later one interpreting it.)
+If set, bisync will also "renew" these lock files every --max-lock minus one minute
throughout a run, for extra safety. (For example, with --max-lock 5m
, bisync would renew the lock file (for another 5 minutes) every 4 minutes until the run has completed.) In other words, it should not be possible for a lock file to pass its expiration time while the process that created it is still running -- and you can therefore be reasonably sure that any expired lock file you may find was left there by an interrupted run, not one that is still running and just taking awhile.
+If --max-lock
is 0
or not set, the default is that lock files will never expire, and will block future runs (of these same two bisync paths) indefinitely.
+For maximum resilience from disruptions, consider setting a relatively short duration like --max-lock 2m
along with --resilient
and --recover
, and a relatively frequent cron schedule. The result will be a very robust "set-it-and-forget-it" bisync run that can automatically bounce back from almost any interruption it might encounter, without requiring the user to get involved and run a --resync
. (See also: Graceful Shutdown mode)
+--backup-dir1 and --backup-dir2
+As of v1.66
, --backup-dir
is supported in bisync. Because --backup-dir
must be a non-overlapping path on the same remote, Bisync has introduced new --backup-dir1
and --backup-dir2
flags to support separate backup-dirs for Path1
and Path2
(bisyncing between different remotes with --backup-dir
would not otherwise be possible.) --backup-dir1
and --backup-dir2
can use different remotes from each other, but --backup-dir1
must use the same remote as Path1
, and --backup-dir2
must use the same remote as Path2
. Each backup directory must not overlap its respective bisync Path without being excluded by a filter rule.
+The standard --backup-dir
will also work, if both paths use the same remote (but note that deleted files from both paths would be mixed together in the same dir). If either --backup-dir1
and --backup-dir2
are set, they will override --backup-dir
.
+Example:
+rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
+In this example, if the user deletes a file in /Users/someuser/some/local/path/Bisync
, bisync will propagate the delete to the other side by moving the corresponding file from gdrive:Bisync
to gdrive:BackupDir
. If the user deletes a file from gdrive:Bisync
, bisync moves it from /Users/someuser/some/local/path/Bisync
to /Users/someuser/some/local/path/BackupDir
.
+In the event of a rename due to a sync conflict, the rename is not considered a delete, unless a previous conflict with the same name already exists and would get overwritten.
+See also: --suffix
, --suffix-keep-extension
Operation
Runtime flow details
bisync retains the listings of the Path1
and Path2
filesystems from the prior run. On each successive run it will:
@@ -11888,7 +12597,7 @@ rclone copy Path2 Path1 --create-empty-src-dirs
Safety measures
- Lock file prevents multiple simultaneous runs when taking a while. This can be particularly useful if bisync is run by cron scheduler.
-- Handle change conflicts non-destructively by creating
..path1
and ..path2
file versions.
+- Handle change conflicts non-destructively by creating
.conflict1
, .conflict2
, etc. file versions, according to --conflict-resolve
, --conflict-loser
, and --conflict-suffix
settings.
- File system access health check using
RCLONE_TEST
files (see the --check-access
flag).
- Abort on excessive deletes - protects against a failed listing being interpreted as all the files were deleted. See the
--max-delete
and --force
flags.
- If something evil happens, bisync goes into a safe state to block damage by later runs. (See Error Handling)
@@ -11986,14 +12695,14 @@ rclone copy Path2 Path1 --create-empty-src-dirs
Path1 new AND Path2 new |
File is new on Path1 AND new on Path2 (and Path1 version is NOT identical to Path2) |
-Files renamed to _Path1 and _Path2 |
-rclone copy _Path2 file to Path1, rclone copy _Path1 file to Path2 |
+Conflicts handled according to --conflict-resolve & --conflict-loser settings |
+default: rclone copy renamed Path2.conflict2 file to Path1, rclone copy renamed Path1.conflict1 file to Path2 |
Path2 newer AND Path1 changed |
File is newer on Path2 AND also changed (newer/older/size) on Path1 (and Path1 version is NOT identical to Path2) |
-Files renamed to _Path1 and _Path2 |
-rclone copy _Path2 file to Path1, rclone copy _Path1 file to Path2 |
+Conflicts handled according to --conflict-resolve & --conflict-loser settings |
+default: rclone copy renamed Path2.conflict2 file to Path1, rclone copy renamed Path1.conflict1 file to Path2 |
Path2 newer AND Path1 deleted |
@@ -12015,59 +12724,45 @@ rclone copy Path2 Path1 --create-empty-src-dirs
-As of rclone v1.64
, bisync is now better at detecting false positive sync conflicts, which would previously have resulted in unnecessary renames and duplicates. Now, when bisync comes to a file that it wants to rename (because it is new/changed on both sides), it first checks whether the Path1 and Path2 versions are currently identical (using the same underlying function as check
.) If bisync concludes that the files are identical, it will skip them and move on. Otherwise, it will create renamed ..Path1
and ..Path2
duplicates, as before. This behavior also improves the experience of renaming directories, as a --resync
is no longer required, so long as the same change has been made on both sides.
+As of rclone v1.64
, bisync is now better at detecting false positive sync conflicts, which would previously have resulted in unnecessary renames and duplicates. Now, when bisync comes to a file that it wants to rename (because it is new/changed on both sides), it first checks whether the Path1 and Path2 versions are currently identical (using the same underlying function as check
.) If bisync concludes that the files are identical, it will skip them and move on. Otherwise, it will create renamed duplicates, as before. This behavior also improves the experience of renaming directories, as a --resync
is no longer required, so long as the same change has been made on both sides.
All files changed check
If all prior existing files on either of the filesystems have changed (e.g. timestamps have changed due to changing the system's timezone) then bisync will abort without making any changes. Any new files are not considered for this check. You could use --force
to force the sync (whichever side has the changed timestamp files wins). Alternately, a --resync
may be used (Path1 versions will be pushed to Path2). Consider the situation carefully and perhaps use --dry-run
before you commit to the changes.
Modification times
-Bisync relies on file timestamps to identify changed files and will refuse to operate if backend lacks the modification time support.
-If you or your application should change the content of a file without changing the modification time then bisync will not notice the change, and thus will not copy it to the other side.
-Note that on some cloud storage systems it is not possible to have file timestamps that match precisely between the local and other filesystems.
-Bisync's approach to this problem is by tracking the changes on each side separately over time with a local database of files in that side then applying the resulting changes on the other side.
+By default, bisync compares files by modification time and size. If you or your application should change the content of a file without changing the modification time and size, then bisync will not notice the change, and thus will not copy it to the other side. As an alternative, consider comparing by checksum (if your remotes support it). See --compare
for details.
Error handling
Certain bisync critical errors, such as file copy/move failing, will result in a bisync lockout of following runs. The lockout is asserted because the sync status and history of the Path1 and Path2 filesystems cannot be trusted, so it is safer to block any further changes until someone checks things out. The recovery is to do a --resync
again.
It is recommended to use --resync --dry-run --verbose
initially and carefully review what changes will be made before running the --resync
without --dry-run
.
Most of these events come up due to an error status from an internal call. On such a critical error the {...}.path1.lst
and {...}.path2.lst
listing files are renamed to extension .lst-err
, which blocks any future bisync runs (since the normal .lst
files are not found). Bisync keeps them under bisync
subdirectory of the rclone cache directory, typically at ${HOME}/.cache/rclone/bisync/
on Linux.
Some errors are considered temporary and re-running the bisync is not blocked. The critical return blocks further bisync runs.
-See also: --resilient
+See also: --resilient
, --recover
, --max-lock
, Graceful Shutdown
Lock file
-When bisync is running, a lock file is created in the bisync working directory, typically at ~/.cache/rclone/bisync/PATH1..PATH2.lck
on Linux. If bisync should crash or hang, the lock file will remain in place and block any further runs of bisync for the same paths. Delete the lock file as part of debugging the situation. The lock file effectively blocks follow-on (e.g., scheduled by cron) runs when the prior invocation is taking a long time. The lock file contains PID of the blocking process, which may help in debug.
+When bisync is running, a lock file is created in the bisync working directory, typically at ~/.cache/rclone/bisync/PATH1..PATH2.lck
on Linux. If bisync should crash or hang, the lock file will remain in place and block any further runs of bisync for the same paths. Delete the lock file as part of debugging the situation. The lock file effectively blocks follow-on (e.g., scheduled by cron) runs when the prior invocation is taking a long time. The lock file contains PID of the blocking process, which may help in debug. Lock files can be set to automatically expire after a certain amount of time, using the --max-lock
flag.
Note that while concurrent bisync runs are allowed, be very cautious that there is no overlap in the trees being synched between concurrent runs, lest there be replicated files, deleted files and general mayhem.
Return codes
rclone bisync
returns the following codes to calling program: - 0
on a successful run, - 1
for a non-critical failing run (a rerun may be successful), - 2
for a critically aborted run (requires a --resync
to recover).
-Limitations
+Graceful Shutdown
+Bisync has a "Graceful Shutdown" mode which is activated by sending SIGINT
or pressing Ctrl+C
during a run. Once triggered, bisync will use best efforts to exit cleanly before the timer runs out. If bisync is in the middle of transferring files, it will attempt to cleanly empty its queue by finishing what it has started but not taking more. If it cannot do so within 30 seconds, it will cancel the in-progress transfers at that point and then give itself a maximum of 60 seconds to wrap up, save its state for next time, and exit. With the -vP
flags you will see constant status updates and a final confirmation of whether or not the graceful shutdown was successful.
+At any point during the "Graceful Shutdown" sequence, a second SIGINT
or Ctrl+C
will trigger an immediate, un-graceful exit, which will leave things in a messier state. Usually a robust recovery will still be possible if using --recover
mode, otherwise you will need to do a --resync
.
+If you plan to use Graceful Shutdown mode, it is recommended to use --resilient
and --recover
, and it is important to NOT use --inplace
, otherwise you risk leaving partially-written files on one side, which may be confused for real files on the next run. Note also that in the event of an abrupt interruption, a lock file will be left behind to block concurrent runs. You will need to delete it before you can proceed with the next run (or wait for it to expire on its own, if using --max-lock
.)
+Limitations
Supported backends
-Bisync is considered BETA and has been tested with the following backends: - Local filesystem - Google Drive - Dropbox - OneDrive - S3 - SFTP - Yandex Disk
+Bisync is considered BETA and has been tested with the following backends: - Local filesystem - Google Drive - Dropbox - OneDrive - S3 - SFTP - Yandex Disk - Crypt
It has not been fully tested with other services yet. If it works, or sorta works, please let us know and we'll update the list. Run the test suite to check for proper operation as described below.
-First release of rclone bisync
requires that underlying backend supports the modification time feature and will refuse to run otherwise. This limitation will be lifted in a future rclone bisync
release.
+The first release of rclone bisync
required both underlying backends to support modification times, and refused to run otherwise. This limitation has been lifted as of v1.66
, as bisync now supports comparing checksum and/or size instead of (or in addition to) modtime. See --compare
for details.
Concurrent modifications
-When using Local, FTP or SFTP remotes rclone does not create temporary files at the destination when copying, and thus if the connection is lost the created file may be corrupt, which will likely propagate back to the original path on the next sync, resulting in data loss. This will be solved in a future release, there is no workaround at the moment.
-Files that change during a bisync run may result in data loss. This has been seen in a highly dynamic environment, where the filesystem is getting hammered by running processes during the sync. The currently recommended solution is to sync at quiet times or filter out unnecessary directories and files.
-As an alternative approach, consider using --check-sync=false
(and possibly --resilient
) to make bisync more forgiving of filesystems that change during the sync. Be advised that this may cause bisync to miss events that occur during a bisync run, so it is a good idea to supplement this with a periodic independent integrity check, and corrective sync if diffs are found. For example, a possible sequence could look like this:
-
-- Normally scheduled bisync run:
-
-
rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --check-sync=false --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
-
-- Periodic independent integrity check (perhaps scheduled nightly or weekly):
-
-rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
-
-- If diffs are found, you have some choices to correct them. If one side is more up-to-date and you want to make the other side match it, you could run:
-
-rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
-(or switch Path1 and Path2 to make Path2 the source-of-truth)
-Or, if neither side is totally up-to-date, you could run a --resync
to bring them back into agreement (but remember that this could cause deleted files to re-appear.)
-*Note also that rclone check
does not currently include empty directories, so if you want to know if any empty directories are out of sync, consider alternatively running the above rclone sync
command with --dry-run
added.
+When using Local, FTP or SFTP remotes with --inplace
, rclone does not create temporary files at the destination when copying, and thus if the connection is lost the created file may be corrupt, which will likely propagate back to the original path on the next sync, resulting in data loss. It is therefore recommended to omit --inplace
.
+Files that change during a bisync run may result in data loss. Prior to rclone v1.66
, this was commonly seen in highly dynamic environments, where the filesystem was getting hammered by running processes during the sync. As of rclone v1.66
, bisync was redesigned to use a "snapshot" model, greatly reducing the risks from changes during a sync. Changes that are not detected during the current sync will now be detected during the following sync, and will no longer cause the entire run to throw a critical error. There is additionally a mechanism to mark files as needing to be internally rechecked next time, for added safety. It should therefore no longer be necessary to sync only at quiet times -- however, note that an error can still occur if a file happens to change at the exact moment it's being read/written by bisync (same as would happen in rclone sync
.) (See also: --ignore-checksum
, --local-no-check-updated
)
Empty directories
By default, new/deleted empty directories on one path are not propagated to the other side. This is because bisync (and rclone) natively works on files, not directories. However, this can be changed with the --create-empty-src-dirs
flag, which works in much the same way as in sync
and copy
. When used, empty directories created or deleted on one side will also be created or deleted on the other side. The following should be noted: * --create-empty-src-dirs
is not compatible with --remove-empty-dirs
. Use only one or the other (or neither). * It is not recommended to switch back and forth between --create-empty-src-dirs
and the default (no --create-empty-src-dirs
) without running --resync
. This is because it may appear as though all directories (not just the empty ones) were created/deleted, when actually you've just toggled between making them visible/invisible to bisync. It looks scarier than it is, but it's still probably best to stick to one or the other, and use --resync
when you need to switch.
Renamed directories
-Renaming a folder on the Path1 side results in deleting all files on the Path2 side and then copying all files again from Path1 to Path2. Bisync sees this as all files in the old directory name as deleted and all files in the new directory name as new. Currently, the most effective and efficient method of renaming a directory is to rename it to the same name on both sides. (As of rclone v1.64
, a --resync
is no longer required after doing so, as bisync will automatically detect that Path1 and Path2 are in agreement.)
+By default, renaming a folder on the Path1 side results in deleting all files on the Path2 side and then copying all files again from Path1 to Path2. Bisync sees this as all files in the old directory name as deleted and all files in the new directory name as new.
+A recommended solution is to use --track-renames
, which is now supported in bisync as of rclone v1.66
. Note that --track-renames
is not available during --resync
, as --resync
does not delete anything (--track-renames
only supports sync
, not copy
.)
+Otherwise, the most effective and efficient method of renaming a directory is to rename it to the same name on both sides. (As of rclone v1.64
, a --resync
is no longer required after doing so, as bisync will automatically detect that Path1 and Path2 are in agreement.)
--fast-list
used by default
Unlike most other rclone commands, bisync uses --fast-list
by default, for backends that support it. In many cases this is desirable, however, there are some scenarios in which bisync could be faster without --fast-list
, and there is also a known issue concerning Google Drive users with many empty directories. For now, the recommended way to avoid using --fast-list
is to add --disable ListR
to all bisync commands. The default behavior may change in a future version.
-Overridden Configs
-When rclone detects an overridden config, it adds a suffix like {ABCDE}
on the fly to the internal name of the remote. Bisync follows suit by including this suffix in its listing filenames. However, this suffix does not necessarily persist from run to run, especially if different flags are provided. So if next time the suffix assigned is {FGHIJ}
, bisync will get confused, because it's looking for a listing file with {FGHIJ}
, when the file it wants has {ABCDE}
. As a result, it throws Bisync critical error: cannot find prior Path1 or Path2 listings, likely due to critical error on prior run
and refuses to run again until the user runs a --resync
(unless using --resilient
). The best workaround at the moment is to set any backend-specific flags in the config file instead of specifying them with command flags. (You can still override them as needed for other rclone commands.)
-Case sensitivity
-Synching with case-insensitive filesystems, such as Windows or Box
, can result in file name conflicts. This will be fixed in a future release. The near-term workaround is to make sure that files on both sides don't have spelling case differences (Smile.jpg
vs. smile.jpg
).
+Case (and unicode) sensitivity
+As of v1.66
, case and unicode form differences no longer cause critical errors, and normalization (when comparing between filesystems) is handled according to the same flags and defaults as rclone sync
. See the following options (all of which are supported by bisync) to control this behavior more granularly: - --fix-case
- --ignore-case-sync
- --no-unicode-normalization
- --local-unicode-normalization
and --local-case-sensitive
(caution: these are normally not what you want.)
+Note that in the (probably rare) event that --fix-case
is used AND a file is new/changed on both sides AND the checksums match AND the filename case does not match, the Path1 filename is considered the winner, for the purposes of --fix-case
(Path2 will be renamed to match it).
Windows support
Bisync has been tested on Windows 8.1, Windows 10 Pro 64-bit and on Windows GitHub runners.
Drive letters are allowed, including drive letters mapped to network drives (rclone bisync J:\localsync GDrive:
). If a drive letter is omitted, the shell current drive is the default. Drive letters are a single character follows by :
, so cloud names must be more than one character long.
@@ -12220,12 +12915,15 @@ rclone copy Path2 Path1 --create-empty-src-dirs
2021/05/12 00:49:40 ERROR : Bisync aborted. Must run --resync to recover.
Denied downloads of "infected" or "abusive" files
Google Drive has a filter for certain file types (.exe
, .apk
, et cetera) that by default cannot be copied from Google Drive to the local filesystem. If you are having problems, run with --verbose
to see specifically which files are generating complaints. If the error is This file has been identified as malware or spam and cannot be downloaded
, consider using the flag --drive-acknowledge-abuse.
-Google Doc files
-Google docs exist as virtual files on Google Drive and cannot be transferred to other filesystems natively. While it is possible to export a Google doc to a normal file (with .xlsx
extension, for example), it is not possible to import a normal file back into a Google document.
-Bisync's handling of Google Doc files is to flag them in the run log output for user's attention and ignore them for any file transfers, deletes, or syncs. They will show up with a length of -1
in the listings. This bisync run is otherwise successful:
-2021/05/11 08:23:15 INFO : Synching Path1 "/path/to/local/tree/base/" with Path2 "GDrive:"
-2021/05/11 08:23:15 INFO : ...path2.lst-new: Ignoring incorrect line: "- -1 - - 2018-07-29T08:49:30.136000000+0000 GoogleDoc.docx"
-2021/05/11 08:23:15 INFO : Bisync successful
+Google Docs (and other files of unknown size)
+As of v1.66
, Google Docs (including Google Sheets, Slides, etc.) are now supported in bisync, subject to the same options, defaults, and limitations as in rclone sync
. When bisyncing drive with non-drive backends, the drive -> non-drive direction is controlled by --drive-export-formats
(default "docx,xlsx,pptx,svg"
) and the non-drive -> drive direction is controlled by --drive-import-formats
(default none.)
+For example, with the default export/import formats, a Google Sheet on the drive side will be synced to an .xlsx
file on the non-drive side. In the reverse direction, .xlsx
files with filenames that match an existing Google Sheet will be synced to that Google Sheet, while .xlsx
files that do NOT match an existing Google Sheet will be copied to drive as normal .xlsx
files (without conversion to Sheets, although the Google Drive web browser UI may still give you the option to open it as one.)
+If --drive-import-formats
is set (it's not, by default), then all of the specified formats will be converted to Google Docs, if there is no existing Google Doc with a matching name. Caution: such conversion can be quite lossy, and in most cases it's probably not what you want!
+To bisync Google Docs as URL shortcut links (in a manner similar to "Drive for Desktop"), use: --drive-export-formats url
(or alternatives.)
+Note that these link files cannot be edited on the non-drive side -- you will get errors if you try to sync an edited link file back to drive. They CAN be deleted (it will result in deleting the corresponding Google Doc.) If you create a .url
file on the non-drive side that does not match an existing Google Doc, bisyncing it will just result in copying the literal .url
file over to drive (no Google Doc will be created.) So, as a general rule of thumb, think of them as read-only placeholders on the non-drive side, and make all your changes on the drive side.
+Likewise, even with other export-formats, it is best to only move/rename Google Docs on the drive side. This is because otherwise, bisync will interpret this as a file deleted and another created, and accordingly, it will delete the Google Doc and create a new file at the new path. (Whether or not that new file is a Google Doc depends on --drive-import-formats
.)
+Lastly, take note that all Google Docs on the drive side have a size of -1
and no checksum. Therefore, they cannot be reliably synced with the --checksum
or --size-only
flags. (To be exact: they will still get created/deleted, and bisync's delta engine will notice changes and queue them for syncing, but the underlying sync function will consider them identical and skip them.) To work around this, use the default (modtime and size) instead of --checksum
or --size-only
.
+To ignore Google Docs entirely, use --drive-skip-gdocs
.
Usage examples
Cron
Rclone does not yet have a built-in capability to monitor the local file system for changes and must be blindly run periodically. On Windows this can be done using a Task Scheduler, on Linux you can use Cron which is described below.
@@ -12440,6 +13138,29 @@ Options:
Bisync adopts the differential synchronization technique, which is based on keeping history of changes performed by both synchronizing sides. See the Dual Shadow Method section in Neil Fraser's article.
Also note a number of academic publications by Benjamin Pierce about Unison and synchronization in general.
Changelog
+v1.66
+
+- Copies and deletes are now handled in one operation instead of two
+--track-renames
and --backup-dir
are now supported
+- Partial uploads known issue on
local
/ftp
/sftp
has been resolved (unless using --inplace
)
+- Final listings are now generated from sync results, to avoid needing to re-list
+- Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes
+- Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time.
+- A few basic terminal colors are now supported, controllable with
--color
(AUTO
|NEVER
|ALWAYS
)
+- Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as
check
and sync
, for performance improvements and less risk of error.
+- Fixed handling of unicode normalization and case insensitivity, support for
--fix-case
, --ignore-case-sync
, --no-unicode-normalization
+--resync
is now much more efficient (especially for users of --create-empty-src-dirs
)
+- Google Docs (and other files of unknown size) are now supported (with the same options as in
sync
)
+- Equality checks before a sync conflict rename now fall back to
cryptcheck
(when possible) or --download
, instead of of --size-only
, when check
is not available.
+- Bisync no longer fails to find the correct listing file when configs are overridden with backend-specific flags.
+- Bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support.
+- Bisync now supports a "Graceful Shutdown" mode to cleanly cancel a run early without requiring
--resync
.
+- New
--recover
flag allows robust recovery in the event of interruptions, without requiring --resync
.
+- A new
--max-lock
setting allows lock files to automatically renew and expire, for better automatic recovery when a run is interrupted.
+- Bisync now supports auto-resolving sync conflicts and customizing rename behavior with new
--conflict-resolve
, --conflict-loser
, and --conflict-suffix
flags.
+- A new
--resync-mode
flag allows more control over which version of a file gets kept during a --resync
.
+- Bisync now supports
--retries
and --retries-sleep
(when --resilient
is set.)
+
v1.64
- Fixed an issue causing dry runs to inadvertently commit filter changes
@@ -12724,7 +13445,16 @@ y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
-Limitations
+--fichier-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FICHIER_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
rclone about
is not supported by the 1Fichier backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
See List of backends that do not support rclone about and rclone about
Alias
@@ -12796,212 +13526,17 @@ e/n/d/r/c/s/q> q
Type: string
Required: true
-Amazon Drive
-Amazon Drive, formerly known as Amazon Cloud Drive, is a cloud storage service run by Amazon for consumers.
-Status
-Important: rclone supports Amazon Drive only if you have your own set of API keys. Unfortunately the Amazon Drive developer program is now closed to new entries so if you don't already have your own set of keys you will not be able to use rclone with Amazon Drive.
-For the history on why rclone no longer has a set of Amazon Drive API keys see the forum.
-If you happen to know anyone who works at Amazon then please ask them to re-instate rclone into the Amazon Drive developer program - thanks!
-Configuration
-The initial setup for Amazon Drive involves getting a token from Amazon which you need to do in your browser. rclone config
walks you through it.
-The configuration process for Amazon Drive may involve using an oauth proxy. This is used to keep the Amazon credentials out of the source code. The proxy runs in Google's very secure App Engine environment and doesn't store any credentials which pass through it.
-Since rclone doesn't currently have its own Amazon Drive credentials so you will either need to have your own client_id
and client_secret
with Amazon Drive, or use a third-party oauth proxy in which case you will need to enter client_id
, client_secret
, auth_url
and token_url
.
-Note also if you are not using Amazon's auth_url
and token_url
, (ie you filled in something for those) then if setting up on a remote machine you can only use the copying the config method of configuration - rclone authorize
will not work.
-Here is an example of how to make a remote called remote
. First run:
- rclone config
-This will guide you through an interactive setup process:
-No remotes found, make a new one?
-n) New remote
-r) Rename remote
-c) Copy remote
-s) Set configuration password
-q) Quit config
-n/r/c/s/q> n
-name> remote
-Type of storage to configure.
-Choose a number from below, or type in your own value
-[snip]
-XX / Amazon Drive
- \ "amazon cloud drive"
-[snip]
-Storage> amazon cloud drive
-Amazon Application Client Id - required.
-client_id> your client ID goes here
-Amazon Application Client Secret - required.
-client_secret> your client secret goes here
-Auth server URL - leave blank to use Amazon's.
-auth_url> Optional auth URL
-Token server url - leave blank to use Amazon's.
-token_url> Optional token URL
-Remote config
-Make sure your Redirect URL is set to "http://127.0.0.1:53682/" in your custom config.
-Use web browser to automatically authenticate rclone with remote?
- * Say Y if the machine running rclone has a web browser you can use
- * Say N if running rclone on a (remote) machine without web browser access
-If not sure try Y. If Y failed, try N.
-y) Yes
-n) No
-y/n> y
-If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
-Log in and authorize rclone for access
-Waiting for code...
-Got code
---------------------
-[remote]
-client_id = your client ID goes here
-client_secret = your client secret goes here
-auth_url = Optional auth URL
-token_url = Optional token URL
-token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
---------------------
-y) Yes this is OK
-e) Edit this remote
-d) Delete this remote
-y/e/d> y
-See the remote setup docs for how to set it up on a machine with no Internet browser available.
-Note that rclone runs a webserver on your local machine to collect the token as returned from Amazon. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on http://127.0.0.1:53682/
and this it may require you to unblock it temporarily if you are running a host firewall.
-Once configured you can then use rclone
like this,
-List directories in top level of your Amazon Drive
-rclone lsd remote:
-List all the files in your Amazon Drive
-rclone ls remote:
-To copy a local directory to an Amazon Drive directory called backup
-rclone copy /home/source remote:backup
-Modification times and hashes
-Amazon Drive doesn't allow modification times to be changed via the API so these won't be accurate or used for syncing.
-It does support the MD5 hash algorithm, so for a more accurate sync, you can use the --checksum
flag.
-Restricted filename characters
-
-
-
-
-
-
-NUL |
-0x00 |
-␀ |
-
-
-/ |
-0x2F |
-/ |
-
-
-
-Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
-Deleting files
-Any files you delete with rclone will end up in the trash. Amazon don't provide an API to permanently delete files, nor to empty the trash, so you will have to do that with one of Amazon's apps or via the Amazon Drive website. As of November 17, 2016, files are automatically deleted by Amazon from the trash after 30 days.
-
-Let's say you usually use amazon.co.uk
. When you authenticate with rclone it will take you to an amazon.com
page to log in. Your amazon.co.uk
email and password should work here just fine.
-Standard options
-Here are the Standard options specific to amazon cloud drive (Amazon Drive).
---acd-client-id
-OAuth Client Id.
-Leave blank normally.
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_ACD_CLIENT_ID
-- Type: string
-- Required: false
-
---acd-client-secret
-OAuth Client Secret.
-Leave blank normally.
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_ACD_CLIENT_SECRET
-- Type: string
-- Required: false
-
Advanced options
-Here are the Advanced options specific to amazon cloud drive (Amazon Drive).
---acd-token
-OAuth Access Token as a JSON blob.
+Here are the Advanced options specific to alias (Alias for an existing remote).
+--alias-description
+Description of the remote
Properties:
-- Config: token
-- Env Var: RCLONE_ACD_TOKEN
+- Config: description
+- Env Var: RCLONE_ALIAS_DESCRIPTION
- Type: string
- Required: false
---acd-auth-url
-Auth server URL.
-Leave blank to use the provider defaults.
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_ACD_AUTH_URL
-- Type: string
-- Required: false
-
---acd-token-url
-Token server url.
-Leave blank to use the provider defaults.
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_ACD_TOKEN_URL
-- Type: string
-- Required: false
-
---acd-checkpoint
-Checkpoint for internal polling (debug).
-Properties:
-
-- Config: checkpoint
-- Env Var: RCLONE_ACD_CHECKPOINT
-- Type: string
-- Required: false
-
---acd-upload-wait-per-gb
-Additional time per GiB to wait after a failed complete upload to see if it appears.
-Sometimes Amazon Drive gives an error when a file has been fully uploaded but the file appears anyway after a little while. This happens sometimes for files over 1 GiB in size and nearly every time for files bigger than 10 GiB. This parameter controls the time rclone waits for the file to appear.
-The default value for this parameter is 3 minutes per GiB, so by default it will wait 3 minutes for every GiB uploaded to see if the file appears.
-You can disable this feature by setting it to 0. This may cause conflict errors as rclone retries the failed upload but the file will most likely appear correctly eventually.
-These values were determined empirically by observing lots of uploads of big files for a range of file sizes.
-Upload with the "-v" flag to see more info about what rclone is doing in this situation.
-Properties:
-
-- Config: upload_wait_per_gb
-- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
-- Type: Duration
-- Default: 3m0s
-
---acd-templink-threshold
-Files >= this size will be downloaded via their tempLink.
-Files this size or more will be downloaded via their "tempLink". This is to work around a problem with Amazon Drive which blocks downloads of files bigger than about 10 GiB. The default for this is 9 GiB which shouldn't need to be changed.
-To download files above this threshold, rclone requests a "tempLink" which downloads the file through a temporary URL directly from the underlying S3 storage.
-Properties:
-
-- Config: templink_threshold
-- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
-- Type: SizeSuffix
-- Default: 9Gi
-
---acd-encoding
-The encoding for the backend.
-See the encoding section in the overview for more info.
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_ACD_ENCODING
-- Type: Encoding
-- Default: Slash,InvalidUtf8,Dot
-
-Limitations
-Note that Amazon Drive is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".
-Amazon Drive has rate limiting so you may notice errors in the sync (429 errors). rclone will automatically retry the sync up to 3 times by default (see --retries
flag) which should hopefully work around this problem.
-Amazon Drive has an internal limit of file sizes that can be uploaded to the service. This limit is not officially published, but all files larger than this will fail.
-At the time of writing (Jan 2016) is in the area of 50 GiB per file. This means that larger files are likely to fail.
-Unfortunately there is no way for rclone to see that this failure is because of file size, so it will retry the operation, as any other failure. To avoid this problem, use --max-size 50000M
option to limit the maximum size of uploaded files. Note that --max-size
does not split files into segments, it only ignores files over this size.
-rclone about
is not supported by the Amazon Drive backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
-See List of backends that do not support rclone about and rclone about
Amazon S3 Storage Providers
The S3 backend can be used with a number of different providers:
@@ -13045,7 +13580,7 @@ y/e/d> y
rclone ls remote:bucket
Sync /home/local/directory
to the remote bucket, deleting any excess files in the bucket.
rclone sync --interactive /home/local/directory remote:bucket
-Configuration
+Configuration
Here is an example of making an s3 configuration for the AWS S3 provider. Most applies to the other providers as well, any differences are described below.
First run
rclone config
@@ -13059,7 +13594,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -13244,7 +13779,7 @@ y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d>
-Modification times and hashes
+Modification times and hashes
Modification times
The modified time is stored as metadata on the object as X-Amz-Meta-Mtime
as floating point since the epoch, accurate to 1 ns.
If the modification time needs to be updated rclone will attempt to perform a server side copy to update the modification if the object can be copied in a single part. In the case the object is larger than 5Gb or is in Glacier or Glacier Deep Archive storage the object will be uploaded rather than copied.
@@ -13340,7 +13875,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
If there are real files present with the same names as versions, then behaviour of --s3-versions
can be unpredictable.
Cleanup
If you run rclone cleanup s3:bucket
then it will remove all pending multipart uploads older than 24 hours. You can use the --interactive
/i
or --dry-run
flag to see exactly what it will do. If you want more control over the expiry date then run rclone backend cleanup s3:bucket -o max-age=1h
to expire all uploads older than one hour. You can use rclone backend list-multipart-uploads s3:bucket
to see the pending multipart uploads.
-Restricted filename characters
+Restricted filename characters
S3 allows any valid UTF-8 string as a key.
Invalid UTF-8 bytes will be replaced, as they can't be used in XML.
The following characters are replaced since these are problematic when dealing with the REST API:
@@ -13434,6 +13969,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
GetObject
PutObject
PutObjectACL
+CreateBucket
(unless using s3-no-check-bucket)
When using the lsd
subcommand, the ListAllMyBuckets
permission is required.
Example policy:
@@ -13468,6 +14004,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
- This is a policy that can be used when creating bucket. It assumes that
USER_NAME
has been created.
- The Resource entry must include both resource ARNs, as one implies the bucket and the other implies the bucket's objects.
+- When using s3-no-check-bucket and the bucket already exsits, the
"arn:aws:s3:::BUCKET_NAME"
doesn't have to be included.
For reference, here's an Ansible script that will generate one or more buckets that will work with rclone sync
.
Key Management System (KMS)
@@ -13483,7 +14020,7 @@ $ rclone -q --s3-versions ls s3:cleanup-test
If you configure a default retention period on a bucket, requests to upload objects in such a bucket must include the Content-MD5 header.
As mentioned in the Modification times and hashes section, small files that are not uploaded as multipart, use a different tag, causing the upload to fail. A simple solution is to set the --s3-upload-cutoff 0
and force all the files to be uploaded as multipart.
-Standard options
+Standard options
Here are the Standard options specific to s3 (Amazon S3 Compliant Storage Providers including AWS, Alibaba, ArvanCloud, Ceph, ChinaMobile, Cloudflare, DigitalOcean, Dreamhost, GCS, HuaweiOBS, IBMCOS, IDrive, IONOS, LyveCloud, Leviia, Liara, Linode, Minio, Netease, Petabox, RackCorp, Rclone, Scaleway, SeaweedFS, StackPath, Storj, Synology, TencentCOS, Wasabi, Qiniu and others).
--s3-provider
Choose your S3 provider.
@@ -14315,8 +14852,8 @@ Windows: "%USERPROFILE%\.aws\credentials"
Required: false
--s3-upload-concurrency
-Concurrency for multipart uploads.
-This is the number of chunks of the same file that are uploaded concurrently.
+Concurrency for multipart uploads and copies.
+This is the number of chunks of the same file that are uploaded concurrently for multipart uploads and copies.
If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.
Properties:
@@ -14347,6 +14884,16 @@ Windows: "%USERPROFILE%\.aws\credentials"
- Type: bool
- Default: false
+--s3-use-dual-stack
+If true use AWS S3 dual-stack endpoint (IPv6 support).
+See AWS Docs on Dualstack Endpoints
+Properties:
+
+- Config: use_dual_stack
+- Env Var: RCLONE_S3_USE_DUAL_STACK
+- Type: bool
+- Default: false
+
--s3-use-accelerate-endpoint
If true use the AWS S3 accelerated endpoint.
See: AWS S3 Transfer acceleration
@@ -14546,6 +15093,18 @@ Windows: "%USERPROFILE%\.aws\credentials"
Type: Time
Default: off
+--s3-version-deleted
+Show deleted file markers when using versions.
+This shows deleted file markers in the listing when using versions. These will appear as 0 size files. The only operation which can be performed on them is deletion.
+Deleting a delete marker will reveal the previous version.
+Deleted files will always show with a timestamp.
+Properties:
+
+- Config: version_deleted
+- Env Var: RCLONE_S3_VERSION_DELETED
+- Type: bool
+- Default: false
+
--s3-decompress
If set this will decompress gzip encoded objects.
It is possible to upload objects to S3 with "Content-Encoding: gzip" set. Normally rclone will download these files as compressed objects.
@@ -14631,6 +15190,15 @@ Windows: "%USERPROFILE%\.aws\credentials"
Type: Tristate
Default: unset
+--s3-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_S3_DESCRIPTION
+- Type: string
+- Required: false
+
User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.
Here are the possible system metadata items for the s3 backend.
@@ -15067,10 +15635,10 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
-Storage> 5
+Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
@@ -15185,18 +15753,11 @@ e/n/d/r/c/s/q> q
Select "s3" storage.
Choose a number from below, or type in your own value
- 1 / Alias for an existing remote
- \ "alias"
- 2 / Amazon Drive
- \ "amazon cloud drive"
- 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS)
- \ "s3"
- 4 / Backblaze B2
- \ "b2"
[snip]
- 23 / HTTP
- \ "http"
-Storage> 3
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+[snip]
+Storage> s3
- Select IBM COS as the S3 Storage Provider.
@@ -15339,7 +15900,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -15434,7 +15995,7 @@ name> ionos-fra
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -15616,15 +16177,8 @@ n/s/q> n
Select s3
storage.
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -15837,7 +16391,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -16064,7 +16618,7 @@ Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
[snip]
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -16166,7 +16720,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
...
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
...
Storage> s3
@@ -16414,15 +16968,8 @@ n/s/q> n
Select s3
storage.
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -16609,7 +17156,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- X / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
+XX / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
\ (s3)
[snip]
Storage> s3
@@ -16828,13 +17375,8 @@ n/s/q> n
Select s3
storage.
Choose a number from below, or type in your own value
-1 / 1Fichier
- \ "fichier"
- 2 / Alias for an existing remote
- \ "alias"
- 3 / Amazon Drive
- \ "amazon cloud drive"
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -16928,7 +17470,7 @@ cos s3
For Netease NOS configure as per the configurator rclone config
setting the provider Netease
. This will automatically set force_path_style = false
which is necessary for it to run properly.
Petabox
Here is an example of making a Petabox configuration. First run:
-
+
This will guide you through an interactive setup process.
No remotes found, make a new one?
n) New remote
@@ -17162,7 +17704,7 @@ Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
Storage> s3
@@ -17729,9 +18271,12 @@ Properties:
#### --b2-download-auth-duration
-Time before the authorization token will expire in s or suffix ms|s|m|h|d.
+Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
+
+This is used in combination with "rclone link" for making files
+accessible to the public and sets the duration before the download
+authorization token will expire.
-The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.
Properties:
@@ -17807,6 +18352,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --b2-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_B2_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the b2 backend.
@@ -18266,6 +18822,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
+#### --box-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_BOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -18898,6 +19465,17 @@ Properties:
- Type: Duration
- Default: 1s
+#### --cache-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CACHE_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the cache backend.
@@ -19338,6 +19916,17 @@ Properties:
- If meta format is set to "none", rename transactions will always be used.
- This method is EXPERIMENTAL, don't use on production systems.
+#### --chunker-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CHUNKER_DESCRIPTION
+- Type: string
+- Required: false
+
# Citrix ShareFile
@@ -19583,6 +20172,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --sharefile-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SHAREFILE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -20053,6 +20653,22 @@ Properties:
- Type: bool
- Default: false
+#### --crypt-strict-names
+
+If set, this will raise an error when crypt comes across a filename that can't be decrypted.
+
+(By default, rclone will just log a NOTICE and continue as normal.)
+This can happen if encrypted and unencrypted files are stored in the same
+directory (which is not recommended.) It may also indicate a more serious
+problem that should be investigated.
+
+Properties:
+
+- Config: strict_names
+- Env Var: RCLONE_CRYPT_STRICT_NAMES
+- Type: bool
+- Default: false
+
#### --crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -20090,6 +20706,17 @@ Properties:
- Type: string
- Default: ".bin"
+#### --crypt-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CRYPT_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -20258,7 +20885,7 @@ encoding is modified in two ways:
* we strip the padding character `=`
`base32` is used rather than the more efficient `base64` so rclone can be
-used on case insensitive remotes (e.g. Windows, Amazon Drive).
+used on case insensitive remotes (e.g. Windows, Box, Dropbox, Onedrive etc).
### Key derivation
@@ -20391,6 +21018,17 @@ Properties:
- Type: SizeSuffix
- Default: 20Mi
+#### --compress-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_COMPRESS_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -20496,6 +21134,21 @@ Properties:
- Type: SpaceSepList
- Default:
+### Advanced options
+
+Here are the Advanced options specific to combine (Combine several remotes into one).
+
+#### --combine-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_COMBINE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -20929,6 +21582,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+#### --dropbox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_DROPBOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -21189,6 +21853,17 @@ Properties:
- Type: Encoding
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
+#### --filefabric-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FILEFABRIC_DESCRIPTION
+- Type: string
+- Required: false
+
# FTP
@@ -21585,6 +22260,17 @@ Properties:
- "Ctl,LeftPeriod,Slash"
- VsFTPd can't handle file names starting with dot
+#### --ftp-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FTP_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -22227,6 +22913,17 @@ Properties:
- Type: Encoding
- Default: Slash,CrLf,InvalidUtf8,Dot
+#### --gcs-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_GCS_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -23507,10 +24204,23 @@ Properties:
- "true"
- Get GCP IAM credentials from the environment (env vars or IAM).
+#### --drive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_DRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
User metadata is stored in the properties field of the drive object.
+Metadata is supported on files and directories.
+
Here are the possible system metadata items for the drive backend.
| Name | Help | Type | Example | Read Only |
@@ -24247,6 +24957,18 @@ This will guide you through an interactive setup process:
- Config: batch_commit_timeout - Env Var: RCLONE_GPHOTOS_BATCH_COMMIT_TIMEOUT - Type: Duration - Default: 10m0s |
+#### --gphotos-description |
+
+
+Description of the remote |
+
+
+Properties: |
+
+
+- Config: description - Env Var: RCLONE_GPHOTOS_DESCRIPTION - Type: string - Required: false |
+
+
## Limitations |
@@ -24489,6 +25211,17 @@ Properties:
- Type: SizeSuffix
- Default: 0
+#### --hasher-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HASHER_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -24780,6 +25513,17 @@ Properties:
- Type: Encoding
- Default: Slash,Colon,Del,Ctl,InvalidUtf8,Dot
+#### --hdfs-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HDFS_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -25160,6 +25904,17 @@ Properties:
- Type: Encoding
- Default: Slash,Dot
+#### --hidrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HIDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -25363,6 +26118,17 @@ Properties:
- Type: bool
- Default: false
+#### --http-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HTTP_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the http backend.
@@ -25550,6 +26316,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket
+#### --imagekit-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_IMAGEKIT_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -25768,6 +26545,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot
+#### --internetarchive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_INTERNETARCHIVE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Metadata fields provided by Internet Archive.
@@ -26157,6 +26945,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot
+#### --jottacloud-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_JOTTACLOUD_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Jottacloud has limited support for metadata, currently an extended set of timestamps.
@@ -26344,6 +27143,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --koofr-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_KOOFR_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -26418,6 +27228,21 @@ Properties:
- Type: string
- Required: true
+### Advanced options
+
+Here are the Advanced options specific to linkbox (Linkbox).
+
+#### --linkbox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_LINKBOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -26776,6 +27601,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --mailru-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MAILRU_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -27019,6 +27855,17 @@ Properties:
- Type: Encoding
- Default: Slash,InvalidUtf8,Dot
+#### --mega-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MEGA_DESCRIPTION
+- Type: string
+- Required: false
+
### Process `killed`
@@ -27083,6 +27930,21 @@ The memory backend replaces the [default restricted characters
set](https://rclone.org/overview/#restricted-characters).
+### Advanced options
+
+Here are the Advanced options specific to memory (In memory object storage system.).
+
+#### --memory-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MEMORY_DESCRIPTION
+- Type: string
+- Required: false
+
# Akamai NetStorage
@@ -27280,6 +28142,17 @@ Properties:
- "https"
- HTTPS protocol
+#### --netstorage-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_NETSTORAGE_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the netstorage backend.
@@ -28114,6 +28987,35 @@ Properties:
- Type: bool
- Default: false
+#### --azureblob-delete-snapshots
+
+Set to specify how to deal with snapshots on blob deletion.
+
+Properties:
+
+- Config: delete_snapshots
+- Env Var: RCLONE_AZUREBLOB_DELETE_SNAPSHOTS
+- Type: string
+- Required: false
+- Choices:
+ - ""
+ - By default, the delete operation fails if a blob has snapshots
+ - "include"
+ - Specify 'include' to remove the root blob and all its snapshots
+ - "only"
+ - Specify 'only' to remove only the snapshots but keep the root blob.
+
+#### --azureblob-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_AZUREBLOB_DESCRIPTION
+- Type: string
+- Required: false
+
### Custom upload headers
@@ -28783,6 +29685,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot
+#### --azurefiles-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_AZUREFILES_DESCRIPTION
+- Type: string
+- Required: false
+
### Custom upload headers
@@ -29372,7 +30285,7 @@ Properties:
If set rclone will use delta listing to implement recursive listings.
-If this flag is set the the onedrive backend will advertise `ListR`
+If this flag is set the onedrive backend will advertise `ListR`
support for recursive listings.
Setting this flag speeds up these things greatly:
@@ -29405,6 +30318,30 @@ Properties:
- Type: bool
- Default: false
+#### --onedrive-metadata-permissions
+
+Control whether permissions should be read or written in metadata.
+
+Reading permissions metadata from files can be done quickly, but it
+isn't always desirable to set the permissions from the metadata.
+
+
+Properties:
+
+- Config: metadata_permissions
+- Env Var: RCLONE_ONEDRIVE_METADATA_PERMISSIONS
+- Type: Bits
+- Default: off
+- Examples:
+ - "off"
+ - Do not read or write the value
+ - "read"
+ - Read the value only
+ - "write"
+ - Write the value only
+ - "read,write"
+ - Read and Write the value.
+
#### --onedrive-encoding
The encoding for the backend.
@@ -29418,4068 +30355,3871 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --onedrive-description
-
-## Limitations
-
-If you don't use rclone for 90 days the refresh token will
-expire. This will result in authorization problems. This is easy to
-fix by running the `rclone config reconnect remote:` command to get a
-new token and refresh token.
-
-### Naming
-
-Note that OneDrive is case insensitive so you can't have a
-file called "Hello.doc" and one called "hello.doc".
-
-There are quite a few characters that can't be in OneDrive file
-names. These can't occur on Windows platforms, but on non-Windows
-platforms they are common. Rclone will map these names to and from an
-identical looking unicode equivalent. For example if a file has a `?`
-in it will be mapped to `?` instead.
-
-### File sizes
-
-The largest allowed file size is 250 GiB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize).
-
-### Path length
-
-The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
-
-### Number of files
-
-OneDrive seems to be OK with at least 50,000 files in a folder, but at
-100,000 rclone will get errors listing the directory like `couldn’t
-list files: UnknownError:`. See
-[#2707](https://github.com/rclone/rclone/issues/2707) for more info.
-
-An official document about the limitations for different types of OneDrive can be found [here](https://support.office.com/en-us/article/invalid-file-names-and-file-types-in-onedrive-onedrive-for-business-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa).
-
-## Versions
-
-Every change in a file OneDrive causes the service to create a new
-version of the file. This counts against a users quota. For
-example changing the modification time of a file creates a second
-version, so the file apparently uses twice the space.
-
-For example the `copy` command is affected by this as rclone copies
-the file and then afterwards sets the modification time to match the
-source file which uses another version.
-
-You can use the `rclone cleanup` command (see below) to remove all old
-versions.
-
-Or you can set the `no_versions` parameter to `true` and rclone will
-remove versions after operations which create new versions. This takes
-extra transactions so only enable it if you need it.
-
-**Note** At the time of writing Onedrive Personal creates versions
-(but not for setting the modification time) but the API for removing
-them returns "API not found" so cleanup and `no_versions` should not
-be used on Onedrive Personal.
-
-### Disabling versioning
-
-Starting October 2018, users will no longer be able to
-disable versioning by default. This is because Microsoft has brought
-an
-[update](https://techcommunity.microsoft.com/t5/Microsoft-OneDrive-Blog/New-Updates-to-OneDrive-and-SharePoint-Team-Site-Versioning/ba-p/204390)
-to the mechanism. To change this new default setting, a PowerShell
-command is required to be run by a SharePoint admin. If you are an
-admin, you can run these commands in PowerShell to change that
-setting:
-
-1. `Install-Module -Name Microsoft.Online.SharePoint.PowerShell` (in case you haven't installed this already)
-2. `Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking`
-3. `Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM` (replacing `YOURSITE`, `YOU`, `YOURSITE.COM` with the actual values; this will prompt for your credentials)
-4. `Set-SPOTenant -EnableMinimumVersionRequirement $False`
-5. `Disconnect-SPOService` (to disconnect from the server)
-
-*Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.*
-
-User [Weropol](https://github.com/Weropol) has found a method to disable
-versioning on OneDrive
-
-1. Open the settings menu by clicking on the gear symbol at the top of the OneDrive Business page.
-2. Click Site settings.
-3. Once on the Site settings page, navigate to Site Administration > Site libraries and lists.
-4. Click Customize "Documents".
-5. Click General Settings > Versioning Settings.
-6. Under Document Version History select the option No versioning.
-Note: This will disable the creation of new file versions, but will not remove any previous versions. Your documents are safe.
-7. Apply the changes by clicking OK.
-8. Use rclone to upload or modify files. (I also use the --no-update-modtime flag)
-9. Restore the versioning settings after using rclone. (Optional)
-
-## Cleanup
-
-OneDrive supports `rclone cleanup` which causes rclone to look through
-every file under the path supplied and delete all version but the
-current version. Because this involves traversing all the files, then
-querying each file for versions it can be quite slow. Rclone does
-`--checkers` tests in parallel. The command also supports `--interactive`/`i`
-or `--dry-run` which is a great way to see what it would do.
-
- rclone cleanup --interactive remote:path/subdir # interactively remove all old version for path/subdir
- rclone cleanup remote:path/subdir # unconditionally remove all old version for path/subdir
-
-**NB** Onedrive personal can't currently delete versions
-
-## Troubleshooting ##
-
-### Excessive throttling or blocked on SharePoint
-
-If you experience excessive throttling or is being blocked on SharePoint then it may help to set the user agent explicitly with a flag like this: `--user-agent "ISV|rclone.org|rclone/v1.55.1"`
-
-The specific details can be found in the Microsoft document: [Avoid getting throttled or blocked in SharePoint Online](https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic-to-avoid-throttling)
-
-### Unexpected file size/hash differences on Sharepoint ####
-
-It is a
-[known](https://github.com/OneDrive/onedrive-api-docs/issues/935#issuecomment-441741631)
-issue that Sharepoint (not OneDrive or OneDrive for Business) silently modifies
-uploaded files, mainly Office files (.docx, .xlsx, etc.), causing file size and
-hash checks to fail. There are also other situations that will cause OneDrive to
-report inconsistent file sizes. To use rclone with such
-affected files on Sharepoint, you
-may disable these checks with the following command line arguments:
-
---ignore-checksum --ignore-size
-
-Alternatively, if you have write access to the OneDrive files, it may be possible
-to fix this problem for certain files, by attempting the steps below.
-Open the web interface for [OneDrive](https://onedrive.live.com) and find the
-affected files (which will be in the error messages/log for rclone). Simply click on
-each of these files, causing OneDrive to open them on the web. This will cause each
-file to be converted in place to a format that is functionally equivalent
-but which will no longer trigger the size discrepancy. Once all problematic files
-are converted you will no longer need the ignore options above.
-
-### Replacing/deleting existing files on Sharepoint gets "item not found" ####
-
-It is a [known](https://github.com/OneDrive/onedrive-api-docs/issues/1068) issue
-that Sharepoint (not OneDrive or OneDrive for Business) may return "item not
-found" errors when users try to replace or delete uploaded files; this seems to
-mainly affect Office files (.docx, .xlsx, etc.) and web files (.html, .aspx, etc.). As a workaround, you may use
-the `--backup-dir <BACKUP_DIR>` command line argument so rclone moves the
-files to be replaced/deleted into a given backup directory (instead of directly
-replacing/deleting them). For example, to instruct rclone to move the files into
-the directory `rclone-backup-dir` on backend `mysharepoint`, you may use:
-
---backup-dir mysharepoint:rclone-backup-dir
-
-### access\_denied (AADSTS65005) ####
-
-Error: access_denied Code: AADSTS65005 Description: Using application 'rclone' is currently not supported for your organization [YOUR_ORGANIZATION] because it is in an unmanaged state. An administrator needs to claim ownership of the company by DNS validation of [YOUR_ORGANIZATION] before the application rclone can be provisioned.
-
-This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
-
-However, there are other ways to interact with your OneDrive account. Have a look at the WebDAV backend: https://rclone.org/webdav/#sharepoint
-
-### invalid\_grant (AADSTS50076) ####
-
-Error: invalid_grant Code: AADSTS50076 Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
-
-If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
-
-### Invalid request when making public links ####
-
-On Sharepoint and OneDrive for Business, `rclone link` may return an "Invalid
-request" error. A possible cause is that the organisation admin didn't allow
-public links to be made for the organisation/sharepoint library. To fix the
-permissions as an admin, take a look at the docs:
-[1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off),
-[2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3).
-
-### Can not access `Shared` with me files
-
-Shared with me files is not supported by rclone [currently](https://github.com/rclone/rclone/issues/4062), but there is a workaround:
-
-1. Visit [https://onedrive.live.com](https://onedrive.live.com/)
-2. Right click a item in `Shared`, then click `Add shortcut to My files` in the context
- ![make_shortcut](https://user-images.githubusercontent.com/60313789/206118040-7e762b3b-aa61-41a1-8649-cc18889f3572.png "Screenshot (Shared with me)")
-3. The shortcut will appear in `My files`, you can access it with rclone, it behaves like a normal folder/file.
- ![in_my_files](https://i.imgur.com/0S8H3li.png "Screenshot (My Files)")
- ![rclone_mount](https://i.imgur.com/2Iq66sW.png "Screenshot (rclone mount)")
-
-### Live Photos uploaded from iOS (small video clips in .heic files)
-
-The iOS OneDrive app introduced [upload and storage](https://techcommunity.microsoft.com/t5/microsoft-onedrive-blog/live-photos-come-to-onedrive/ba-p/1953452)
-of [Live Photos](https://support.apple.com/en-gb/HT207310) in 2020.
-The usage and download of these uploaded Live Photos is unfortunately still work-in-progress
-and this introduces several issues when copying, synchronising and mounting – both in rclone and in the native OneDrive client on Windows.
-
-The root cause can easily be seen if you locate one of your Live Photos in the OneDrive web interface.
-Then download the photo from the web interface. You will then see that the size of downloaded .heic file is smaller than the size displayed in the web interface.
-The downloaded file is smaller because it only contains a single frame (still photo) extracted from the Live Photo (movie) stored in OneDrive.
-
-The different sizes will cause `rclone copy/sync` to repeatedly recopy unmodified photos something like this:
-
- DEBUG : 20230203_123826234_iOS.heic: Sizes differ (src 4470314 vs dst 1298667)
- DEBUG : 20230203_123826234_iOS.heic: sha1 = fc2edde7863b7a7c93ca6771498ac797f8460750 OK
- INFO : 20230203_123826234_iOS.heic: Copied (replaced existing)
-
-These recopies can be worked around by adding `--ignore-size`. Please note that this workaround only syncs the still-picture not the movie clip,
-and relies on modification dates being correctly updated on all files in all situations.
-
-The different sizes will also cause `rclone check` to report size errors something like this:
-
- ERROR : 20230203_123826234_iOS.heic: sizes differ
-
-These check errors can be suppressed by adding `--ignore-size`.
-
-The different sizes will also cause `rclone mount` to fail downloading with an error something like this:
-
- ERROR : 20230203_123826234_iOS.heic: ReadFileHandle.Read error: low level retry 1/10: unexpected EOF
-
-or like this when using `--cache-mode=full`:
-
- INFO : 20230203_123826234_iOS.heic: vfs cache: downloader: error count now 1: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
- ERROR : 20230203_123826234_iOS.heic: vfs cache: failed to download: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
-
-# OpenDrive
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configuration
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-
-- New remote
-- Delete remote
-- Quit config e/n/d/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / OpenDrive "opendrive" [snip] Storage> opendrive Username username> Password
-- Yes type in my own password
-- Generate random password y/g> y Enter the password: password: Confirm the password: password: -------------------- [remote] username = password = *** ENCRYPTED *** --------------------
-- Yes this is OK
-- Edit this remote
-- Delete this remote y/e/d> y
-
-
-List directories in top level of your OpenDrive
-
- rclone lsd remote:
-
-List all the files in your OpenDrive
-
- rclone ls remote:
-
-To copy a local directory to an OpenDrive directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-OpenDrive allows modification times to be set on objects accurate to 1
-second. These will be used to detect whether objects need syncing or
-not.
-
-The MD5 hash algorithm is supported.
-
-### Restricted filename characters
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| NUL | 0x00 | ␀ |
-| / | 0x2F | / |
-| " | 0x22 | " |
-| * | 0x2A | * |
-| : | 0x3A | : |
-| < | 0x3C | < |
-| > | 0x3E | > |
-| ? | 0x3F | ? |
-| \ | 0x5C | \ |
-| \| | 0x7C | | |
-
-File names can also not begin or end with the following characters.
-These only get replaced if they are the first or last character in the name:
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| SP | 0x20 | ␠ |
-| HT | 0x09 | ␉ |
-| LF | 0x0A | ␊ |
-| VT | 0x0B | ␋ |
-| CR | 0x0D | ␍ |
-
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-
-### Standard options
-
-Here are the Standard options specific to opendrive (OpenDrive).
-
-#### --opendrive-username
-
-Username.
+Description of the remote
Properties:
-- Config: username
-- Env Var: RCLONE_OPENDRIVE_USERNAME
-- Type: string
-- Required: true
-
-#### --opendrive-password
-
-Password.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: password
-- Env Var: RCLONE_OPENDRIVE_PASSWORD
-- Type: string
-- Required: true
-
-### Advanced options
-
-Here are the Advanced options specific to opendrive (OpenDrive).
-
-#### --opendrive-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_OPENDRIVE_ENCODING
-- Type: Encoding
-- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot
-
-#### --opendrive-chunk-size
-
-Files will be uploaded in chunks this size.
-
-Note that these chunks are buffered in memory so increasing them will
-increase memory use.
-
-Properties:
-
-- Config: chunk_size
-- Env Var: RCLONE_OPENDRIVE_CHUNK_SIZE
-- Type: SizeSuffix
-- Default: 10Mi
-
-
-
-## Limitations
-
-Note that OpenDrive is case insensitive so you can't have a
-file called "Hello.doc" and one called "hello.doc".
-
-There are quite a few characters that can't be in OpenDrive file
-names. These can't occur on Windows platforms, but on non-Windows
-platforms they are common. Rclone will map these names to and from an
-identical looking unicode equivalent. For example if a file has a `?`
-in it will be mapped to `?` instead.
-
-`rclone about` is not supported by the OpenDrive backend. Backends without
-this capability cannot determine free space for an rclone mount or
-use policy `mfs` (most free space) as a member of an rclone union
-remote.
-
-See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
-
-# Oracle Object Storage
-- [Oracle Object Storage Overview](https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/objectstorageoverview.htm)
-- [Oracle Object Storage FAQ](https://www.oracle.com/cloud/storage/object-storage/faq/)
-- [Oracle Object Storage Limits](https://docs.oracle.com/en-us/iaas/Content/Resources/Assets/whitepapers/oci-object-storage-best-practices.pdf)
-
-Paths are specified as `remote:bucket` (or `remote:` for the `lsd` command.) You may put subdirectories in
-too, e.g. `remote:bucket/path/to/dir`.
-
-Sample command to transfer local artifacts to remote:bucket in oracle object storage:
-
-`rclone -vvv --progress --stats-one-line --max-stats-groups 10 --log-format date,time,UTC,longfile --fast-list --buffer-size 256Mi --oos-no-check-bucket --oos-upload-cutoff 10Mi --multi-thread-cutoff 16Mi --multi-thread-streams 3000 --transfers 3000 --checkers 64 --retries 2 --oos-chunk-size 10Mi --oos-upload-concurrency 10000 --oos-attempt-resume-upload --oos-leave-parts-on-error sync ./artifacts remote:bucket -vv`
-
-## Configuration
-
-Here is an example of making an oracle object storage configuration. `rclone config` walks you
-through it.
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-
-
-- New remote
-- Delete remote
-- Rename remote
-- Copy remote
-- Set configuration password
-- Quit config e/n/d/r/c/s/q> n
-
-Enter name for new remote. name> remote
-Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. [snip] XX / Oracle Cloud Infrastructure Object Storage (oracleobjectstorage) Storage> oracleobjectstorage
-Option provider. Choose your Auth Provider Choose a number from below, or type in your own string value. Press Enter for the default (env_auth). 1 / automatically pickup the credentials from runtime(env), first one to provide auth wins (env_auth) / use an OCI user and an API key for authentication. 2 | you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key. | https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm (user_principal_auth) / use instance principals to authorize an instance to make API calls. 3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata. | https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm (instance_principal_auth) 4 / use resource principals to make API calls (resource_principal_auth) 5 / no credentials needed, this is typically for reading public buckets (no_auth) provider> 2
-Option namespace. Object storage namespace Enter a value. namespace> idbamagbg734
-Option compartment. Object storage compartment OCID Enter a value. compartment> ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
-Option region. Object storage Region Enter a value. region> us-ashburn-1
-Option endpoint. Endpoint for Object storage API. Leave blank to use the default endpoint for the region. Enter a value. Press Enter to leave empty. endpoint>
-Option config_file. Full Path to OCI config file Choose a number from below, or type in your own string value. Press Enter for the default (~/.oci/config). 1 / oci configuration file location (~/.oci/config) config_file> /etc/oci/dev.conf
-Option config_profile. Profile name inside OCI config file Choose a number from below, or type in your own string value. Press Enter for the default (Default). 1 / Use the default profile (Default) config_profile> Test
-Edit advanced config? y) Yes n) No (default) y/n> n
-Configuration complete. Options: - type: oracleobjectstorage - namespace: idbamagbg734 - compartment: ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba - region: us-ashburn-1 - provider: user_principal_auth - config_file: /etc/oci/dev.conf - config_profile: Test Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
-
-See all buckets
-
- rclone lsd remote:
-
-Create a new bucket
-
- rclone mkdir remote:bucket
-
-List the contents of a bucket
-
- rclone ls remote:bucket
- rclone ls remote:bucket --max-depth 1
-
-## Authentication Providers
-
-OCI has various authentication methods. To learn more about authentication methods please refer [oci authentication
-methods](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)
-These choices can be specified in the rclone config file.
-
-Rclone supports the following OCI authentication provider.
-
- User Principal
- Instance Principal
- Resource Principal
- No authentication
-
-### User Principal
-
-Sample rclone config file for Authentication Provider User Principal:
-
- [oos]
- type = oracleobjectstorage
- namespace = id<redacted>34
- compartment = ocid1.compartment.oc1..aa<redacted>ba
- region = us-ashburn-1
- provider = user_principal_auth
- config_file = /home/opc/.oci/config
- config_profile = Default
-
-Advantages:
-- One can use this method from any server within OCI or on-premises or from other cloud provider.
-
-Considerations:
-- you need to configure user’s privileges / policy to allow access to object storage
-- Overhead of managing users and keys.
-- If the user is deleted, the config file will no longer work and may cause automation regressions that use the user's credentials.
-
-### Instance Principal
-
-An OCI compute instance can be authorized to use rclone by using it's identity and certificates as an instance principal.
-With this approach no credentials have to be stored and managed.
-
-Sample rclone configuration file for Authentication Provider Instance Principal:
-
- [opc@rclone ~]$ cat ~/.config/rclone/rclone.conf
- [oos]
- type = oracleobjectstorage
- namespace = id<redacted>fn
- compartment = ocid1.compartment.oc1..aa<redacted>k7a
- region = us-ashburn-1
- provider = instance_principal_auth
-
-Advantages:
-
-- With instance principals, you don't need to configure user credentials and transfer/ save it to disk in your compute
- instances or rotate the credentials.
-- You don’t need to deal with users and keys.
-- Greatly helps in automation as you don't have to manage access keys, user private keys, storing them in vault,
- using kms etc.
-
-Considerations:
-
-- You need to configure a dynamic group having this instance as member and add policy to read object storage to that
- dynamic group.
-- Everyone who has access to this machine can execute the CLI commands.
-- It is applicable for oci compute instances only. It cannot be used on external instance or resources.
-
-### Resource Principal
-
-Resource principal auth is very similar to instance principal auth but used for resources that are not
-compute instances such as [serverless functions](https://docs.oracle.com/en-us/iaas/Content/Functions/Concepts/functionsoverview.htm).
-To use resource principal ensure Rclone process is started with these environment variables set in its process.
-
- export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
- export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
- export OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM=/usr/share/model-server/key.pem
- export OCI_RESOURCE_PRINCIPAL_RPST=/usr/share/model-server/security_token
-
-Sample rclone configuration file for Authentication Provider Resource Principal:
-
- [oos]
- type = oracleobjectstorage
- namespace = id<redacted>34
- compartment = ocid1.compartment.oc1..aa<redacted>ba
- region = us-ashburn-1
- provider = resource_principal_auth
-
-### No authentication
-
-Public buckets do not require any authentication mechanism to read objects.
-Sample rclone configuration file for No authentication:
-
- [oos]
- type = oracleobjectstorage
- namespace = id<redacted>34
- compartment = ocid1.compartment.oc1..aa<redacted>ba
- region = us-ashburn-1
- provider = no_auth
-
-### Modification times and hashes
-
-The modification time is stored as metadata on the object as
-`opc-meta-mtime` as floating point since the epoch, accurate to 1 ns.
-
-If the modification time needs to be updated rclone will attempt to perform a server
-side copy to update the modification if the object can be copied in a single part.
-In the case the object is larger than 5Gb, the object will be uploaded rather than copied.
-
-Note that reading this from the object takes an additional `HEAD` request as the metadata
-isn't returned in object listings.
-
-The MD5 hash algorithm is supported.
-
-### Multipart uploads
-
-rclone supports multipart uploads with OOS which means that it can
-upload files bigger than 5 GiB.
-
-Note that files uploaded *both* with multipart upload *and* through
-crypt remotes do not have MD5 sums.
-
-rclone switches from single part uploads to multipart uploads at the
-point specified by `--oos-upload-cutoff`. This can be a maximum of 5 GiB
-and a minimum of 0 (ie always upload multipart files).
-
-The chunk sizes used in the multipart upload are specified by
-`--oos-chunk-size` and the number of chunks uploaded concurrently is
-specified by `--oos-upload-concurrency`.
-
-Multipart uploads will use `--transfers` * `--oos-upload-concurrency` *
-`--oos-chunk-size` extra memory. Single part uploads to not use extra
-memory.
-
-Single part transfers can be faster than multipart transfers or slower
-depending on your latency from oos - the more latency, the more likely
-single part transfers will be faster.
-
-Increasing `--oos-upload-concurrency` will increase throughput (8 would
-be a sensible value) and increasing `--oos-chunk-size` also increases
-throughput (16M would be sensible). Increasing either of these will
-use more memory. The default values are high enough to gain most of
-the possible performance without using too much memory.
-
-
-### Standard options
-
-Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
-
-#### --oos-provider
-
-Choose your Auth Provider
-
-Properties:
-
-- Config: provider
-- Env Var: RCLONE_OOS_PROVIDER
-- Type: string
-- Default: "env_auth"
-- Examples:
- - "env_auth"
- - automatically pickup the credentials from runtime(env), first one to provide auth wins
- - "user_principal_auth"
- - use an OCI user and an API key for authentication.
- - you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
- - https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
- - "instance_principal_auth"
- - use instance principals to authorize an instance to make API calls.
- - each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
- - https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
- - "resource_principal_auth"
- - use resource principals to make API calls
- - "no_auth"
- - no credentials needed, this is typically for reading public buckets
-
-#### --oos-namespace
-
-Object storage namespace
-
-Properties:
-
-- Config: namespace
-- Env Var: RCLONE_OOS_NAMESPACE
-- Type: string
-- Required: true
-
-#### --oos-compartment
-
-Object storage compartment OCID
-
-Properties:
-
-- Config: compartment
-- Env Var: RCLONE_OOS_COMPARTMENT
-- Provider: !no_auth
-- Type: string
-- Required: true
-
-#### --oos-region
-
-Object storage Region
-
-Properties:
-
-- Config: region
-- Env Var: RCLONE_OOS_REGION
-- Type: string
-- Required: true
-
-#### --oos-endpoint
-
-Endpoint for Object storage API.
-
-Leave blank to use the default endpoint for the region.
-
-Properties:
-
-- Config: endpoint
-- Env Var: RCLONE_OOS_ENDPOINT
+- Config: description
+- Env Var: RCLONE_ONEDRIVE_DESCRIPTION
- Type: string
- Required: false
-#### --oos-config-file
-
-Path to OCI config file
-
-Properties:
-
-- Config: config_file
-- Env Var: RCLONE_OOS_CONFIG_FILE
-- Provider: user_principal_auth
-- Type: string
-- Default: "~/.oci/config"
-- Examples:
- - "~/.oci/config"
- - oci configuration file location
-
-#### --oos-config-profile
-
-Profile name inside the oci config file
-
-Properties:
-
-- Config: config_profile
-- Env Var: RCLONE_OOS_CONFIG_PROFILE
-- Provider: user_principal_auth
-- Type: string
-- Default: "Default"
-- Examples:
- - "Default"
- - Use the default profile
-
-### Advanced options
-
-Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
-
-#### --oos-storage-tier
-
-The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm
-
-Properties:
-
-- Config: storage_tier
-- Env Var: RCLONE_OOS_STORAGE_TIER
-- Type: string
-- Default: "Standard"
-- Examples:
- - "Standard"
- - Standard storage tier, this is the default tier
- - "InfrequentAccess"
- - InfrequentAccess storage tier
- - "Archive"
- - Archive storage tier
-
-#### --oos-upload-cutoff
-
-Cutoff for switching to chunked upload.
-
-Any files larger than this will be uploaded in chunks of chunk_size.
-The minimum is 0 and the maximum is 5 GiB.
-
-Properties:
-
-- Config: upload_cutoff
-- Env Var: RCLONE_OOS_UPLOAD_CUTOFF
-- Type: SizeSuffix
-- Default: 200Mi
-
-#### --oos-chunk-size
-
-Chunk size to use for uploading.
-
-When uploading files larger than upload_cutoff or files with unknown
-size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
-as multipart uploads using this chunk size.
-
-Note that "upload_concurrency" chunks of this size are buffered
-in memory per transfer.
-
-If you are transferring large files over high-speed links and you have
-enough memory, then increasing this will speed up the transfers.
-
-Rclone will automatically increase the chunk size when uploading a
-large file of known size to stay below the 10,000 chunks limit.
-
-Files of unknown size are uploaded with the configured
-chunk_size. Since the default chunk size is 5 MiB and there can be at
-most 10,000 chunks, this means that by default the maximum size of
-a file you can stream upload is 48 GiB. If you wish to stream upload
-larger files then you will need to increase chunk_size.
-
-Increasing the chunk size decreases the accuracy of the progress
-statistics displayed with "-P" flag.
-
-
-Properties:
-
-- Config: chunk_size
-- Env Var: RCLONE_OOS_CHUNK_SIZE
-- Type: SizeSuffix
-- Default: 5Mi
-
-#### --oos-max-upload-parts
-
-Maximum number of parts in a multipart upload.
-
-This option defines the maximum number of multipart chunks to use
-when doing a multipart upload.
-
-OCI has max parts limit of 10,000 chunks.
-
-Rclone will automatically increase the chunk size when uploading a
-large file of a known size to stay below this number of chunks limit.
-
-
-Properties:
-
-- Config: max_upload_parts
-- Env Var: RCLONE_OOS_MAX_UPLOAD_PARTS
-- Type: int
-- Default: 10000
-
-#### --oos-upload-concurrency
-
-Concurrency for multipart uploads.
-
-This is the number of chunks of the same file that are uploaded
-concurrently.
-
-If you are uploading small numbers of large files over high-speed links
-and these uploads do not fully utilize your bandwidth, then increasing
-this may help to speed up the transfers.
-
-Properties:
-
-- Config: upload_concurrency
-- Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
-- Type: int
-- Default: 10
-
-#### --oos-copy-cutoff
-
-Cutoff for switching to multipart copy.
-
-Any files larger than this that need to be server-side copied will be
-copied in chunks of this size.
-
-The minimum is 0 and the maximum is 5 GiB.
-
-Properties:
-
-- Config: copy_cutoff
-- Env Var: RCLONE_OOS_COPY_CUTOFF
-- Type: SizeSuffix
-- Default: 4.656Gi
-
-#### --oos-copy-timeout
-
-Timeout for copy.
-
-Copy is an asynchronous operation, specify timeout to wait for copy to succeed
-
-
-Properties:
-
-- Config: copy_timeout
-- Env Var: RCLONE_OOS_COPY_TIMEOUT
-- Type: Duration
-- Default: 1m0s
-
-#### --oos-disable-checksum
-
-Don't store MD5 checksum with object metadata.
-
-Normally rclone will calculate the MD5 checksum of the input before
-uploading it so it can add it to metadata on the object. This is great
-for data integrity checking but can cause long delays for large files
-to start uploading.
-
-Properties:
-
-- Config: disable_checksum
-- Env Var: RCLONE_OOS_DISABLE_CHECKSUM
-- Type: bool
-- Default: false
-
-#### --oos-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_OOS_ENCODING
-- Type: Encoding
-- Default: Slash,InvalidUtf8,Dot
-
-#### --oos-leave-parts-on-error
-
-If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
-
-It should be set to true for resuming uploads across different sessions.
-
-WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
-additional costs if not cleaned up.
-
-
-Properties:
-
-- Config: leave_parts_on_error
-- Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
-- Type: bool
-- Default: false
-
-#### --oos-attempt-resume-upload
-
-If true attempt to resume previously started multipart upload for the object.
-This will be helpful to speed up multipart transfers by resuming uploads from past session.
-
-WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
-aborted and a new multipart upload is started with the new chunk size.
-
-The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
-
-
-Properties:
-
-- Config: attempt_resume_upload
-- Env Var: RCLONE_OOS_ATTEMPT_RESUME_UPLOAD
-- Type: bool
-- Default: false
-
-#### --oos-no-check-bucket
-
-If set, don't attempt to check the bucket exists or create it.
-
-This can be useful when trying to minimise the number of transactions
-rclone does if you know the bucket exists already.
-
-It can also be needed if the user you are using does not have bucket
-creation permissions.
-
-
-Properties:
-
-- Config: no_check_bucket
-- Env Var: RCLONE_OOS_NO_CHECK_BUCKET
-- Type: bool
-- Default: false
-
-#### --oos-sse-customer-key-file
-
-To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
-with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'
-
-Properties:
-
-- Config: sse_customer_key_file
-- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_FILE
-- Type: string
-- Required: false
-- Examples:
- - ""
- - None
-
-#### --oos-sse-customer-key
-
-To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
-encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
-needed. For more information, see Using Your Own Keys for Server-Side Encryption
-(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)
-
-Properties:
-
-- Config: sse_customer_key
-- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY
-- Type: string
-- Required: false
-- Examples:
- - ""
- - None
-
-#### --oos-sse-customer-key-sha256
-
-If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
-key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
-Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
-
-Properties:
-
-- Config: sse_customer_key_sha256
-- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_SHA256
-- Type: string
-- Required: false
-- Examples:
- - ""
- - None
-
-#### --oos-sse-kms-key-id
-
-if using your own master key in vault, this header specifies the
-OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
-the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
-Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
-
-Properties:
-
-- Config: sse_kms_key_id
-- Env Var: RCLONE_OOS_SSE_KMS_KEY_ID
-- Type: string
-- Required: false
-- Examples:
- - ""
- - None
-
-#### --oos-sse-customer-algorithm
-
-If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
-Object Storage supports "AES256" as the encryption algorithm. For more information, see
-Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
-
-Properties:
-
-- Config: sse_customer_algorithm
-- Env Var: RCLONE_OOS_SSE_CUSTOMER_ALGORITHM
-- Type: string
-- Required: false
-- Examples:
- - ""
- - None
- - "AES256"
- - AES256
-
-## Backend commands
-
-Here are the commands specific to the oracleobjectstorage backend.
-
-Run them with
-
- rclone backend COMMAND remote:
-
-The help below will explain what arguments each command takes.
-
-See the [backend](https://rclone.org/commands/rclone_backend/) command for more
-info on how to pass options and arguments.
-
-These can be run on a running backend using the rc command
-[backend/command](https://rclone.org/rc/#backend-command).
-
-### rename
-
-change the name of an object
-
- rclone backend rename remote: [options] [<arguments>+]
-
-This command can be used to rename a object.
-
-Usage Examples:
-
- rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
-
-
-### list-multipart-uploads
-
-List the unfinished multipart uploads
-
- rclone backend list-multipart-uploads remote: [options] [<arguments>+]
-
-This command lists the unfinished multipart uploads in JSON format.
-
- rclone backend list-multipart-uploads oos:bucket/path/to/object
-
-It returns a dictionary of buckets with values as lists of unfinished
-multipart uploads.
-
-You can call it with no bucket in which case it lists all bucket, with
-a bucket or with a bucket and path.
-
+### Metadata
+
+OneDrive supports System Metadata (not User Metadata, as of this writing) for
+both files and directories. Much of the metadata is read-only, and there are some
+differences between OneDrive Personal and Business (see table below for
+details).
+
+Permissions are also supported, if `--onedrive-metadata-permissions` is set. The
+accepted values for `--onedrive-metadata-permissions` are `read`, `write`,
+`read,write`, and `off` (the default). `write` supports adding new permissions,
+updating the "role" of existing permissions, and removing permissions. Updating
+and removing require the Permission ID to be known, so it is recommended to use
+`read,write` instead of `write` if you wish to update/remove permissions.
+
+Permissions are read/written in JSON format using the same schema as the
+[OneDrive API](https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online),
+which differs slightly between OneDrive Personal and Business.
+
+Example for OneDrive Personal:
+```json
+[
{
- "test-bucket": [
- {
- "namespace": "test-namespace",
- "bucket": "test-bucket",
- "object": "600m.bin",
- "uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
- "timeCreated": "2022-07-29T06:21:16.595Z",
- "storageTier": "Standard"
- }
- ]
+ "id": "1234567890ABC!123",
+ "grantedTo": {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ },
+ "invitation": {
+ "email": "ryan@contoso.com"
+ },
+ "link": {
+ "webUrl": "https://1drv.ms/t/s!1234567890ABC"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "s!1234567890ABC"
+ }
+]
+Example for OneDrive Business:
+[
+ {
+ "id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
+ "grantedToIdentities": [
+ {
+ "user": {
+ "displayName": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "link": {
+ "type": "view",
+ "scope": "users",
+ "webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "u!LKj1lkdlals90j1nlkascl"
+ },
+ {
+ "id": "5D33DD65C6932946",
+ "grantedTo": {
+ "user": {
+ "displayName": "John Doe",
+ "id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
+ },
+ "application": {},
+ "device": {}
+ },
+ "roles": [
+ "owner"
+ ],
+ "shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
+ }
+]
+To write permissions, pass in a "permissions" metadata key using this same format. The --metadata-mapper
tool can be very helpful for this.
+When adding permissions, an email address can be provided in the User.ID
or DisplayName
properties of grantedTo
or grantedToIdentities
. Alternatively, an ObjectID can be provided in User.ID
. At least one valid recipient must be provided in order to add a permission for a user. Creating a Public Link is also supported, if Link.Scope
is set to "anonymous"
.
+Example request to add a "read" permission:
+[
+ {
+ "id": "",
+ "grantedTo": {
+ "user": {},
+ "application": {},
+ "device": {}
+ },
+ "grantedToIdentities": [
+ {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "roles": [
+ "read"
+ ]
+ }
+]
+Note that adding a permission can fail if a conflicting permission already exists for the file/folder.
+To update an existing permission, include both the Permission ID and the new roles
to be assigned. roles
is the only property that can be changed.
+To remove permissions, pass in a blob containing only the permissions you wish to keep (which can be empty, to remove all.)
+Note that both reading and writing permissions requires extra API calls, so if you don't need to read or write permissions it is recommended to omit --onedrive-metadata-permissions
.
+Metadata and permissions are supported for Folders (directories) as well as Files. Note that setting the mtime
or btime
on a Folder requires one extra API call on OneDrive Business only.
+OneDrive does not currently support User Metadata. When writing metadata, only writeable system properties will be written -- any read-only or unrecognized keys passed in will be ignored.
+TIP: to see the metadata and permissions for any file or folder, run:
+rclone lsjson remote:path --stat -M --onedrive-metadata-permissions read
+Here are the possible system metadata items for the onedrive backend.
+
+
+
+
+
+
+
+
+
+
+
+
+
+btime |
+Time of file birth (creation) with S accuracy (mS for OneDrive Personal). |
+RFC 3339 |
+2006-01-02T15:04:05Z |
+N |
+
+
+content-type |
+The MIME type of the file. |
+string |
+text/plain |
+Y |
+
+
+created-by-display-name |
+Display name of the user that created the item. |
+string |
+John Doe |
+Y |
+
+
+created-by-id |
+ID of the user that created the item. |
+string |
+48d31887-5fad-4d73-a9f5-3c356e68a038 |
+Y |
+
+
+description |
+A short description of the file. Max 1024 characters. Only supported for OneDrive Personal. |
+string |
+Contract for signing |
+N |
+
+
+id |
+The unique identifier of the item within OneDrive. |
+string |
+01BYE5RZ6QN3ZWBTUFOFD3GSPGOHDJD36K |
+Y |
+
+
+last-modified-by-display-name |
+Display name of the user that last modified the item. |
+string |
+John Doe |
+Y |
+
+
+last-modified-by-id |
+ID of the user that last modified the item. |
+string |
+48d31887-5fad-4d73-a9f5-3c356e68a038 |
+Y |
+
+
+malware-detected |
+Whether OneDrive has detected that the item contains malware. |
+boolean |
+true |
+Y |
+
+
+mtime |
+Time of last modification with S accuracy (mS for OneDrive Personal). |
+RFC 3339 |
+2006-01-02T15:04:05Z |
+N |
+
+
+package-type |
+If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. |
+string |
+oneNote |
+Y |
+
+
+permissions |
+Permissions in a JSON dump of OneDrive format. Enable with --onedrive-metadata-permissions. Properties: id, grantedTo, grantedToIdentities, invitation, inheritedFrom, link, roles, shareId |
+JSON |
+{} |
+N |
+
+
+shared-by-id |
+ID of the user that shared the item (if shared). |
+string |
+48d31887-5fad-4d73-a9f5-3c356e68a038 |
+Y |
+
+
+shared-owner-id |
+ID of the owner of the shared item (if shared). |
+string |
+48d31887-5fad-4d73-a9f5-3c356e68a038 |
+Y |
+
+
+shared-scope |
+If shared, indicates the scope of how the item is shared: anonymous, organization, or users. |
+string |
+users |
+Y |
+
+
+shared-time |
+Time when the item was shared, with S accuracy (mS for OneDrive Personal). |
+RFC 3339 |
+2006-01-02T15:04:05Z |
+Y |
+
+
+utime |
+Time of upload with S accuracy (mS for OneDrive Personal). |
+RFC 3339 |
+2006-01-02T15:04:05Z |
+Y |
+
+
+
+See the metadata docs for more info.
+Limitations
+If you don't use rclone for 90 days the refresh token will expire. This will result in authorization problems. This is easy to fix by running the rclone config reconnect remote:
command to get a new token and refresh token.
+Naming
+Note that OneDrive is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".
+There are quite a few characters that can't be in OneDrive file names. These can't occur on Windows platforms, but on non-Windows platforms they are common. Rclone will map these names to and from an identical looking unicode equivalent. For example if a file has a ?
in it will be mapped to ?
instead.
+File sizes
+The largest allowed file size is 250 GiB for both OneDrive Personal and OneDrive for Business (Updated 13 Jan 2021).
+Path length
+The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
+Number of files
+OneDrive seems to be OK with at least 50,000 files in a folder, but at 100,000 rclone will get errors listing the directory like couldn’t list files: UnknownError:
. See #2707 for more info.
+An official document about the limitations for different types of OneDrive can be found here.
+Versions
+Every change in a file OneDrive causes the service to create a new version of the file. This counts against a users quota. For example changing the modification time of a file creates a second version, so the file apparently uses twice the space.
+For example the copy
command is affected by this as rclone copies the file and then afterwards sets the modification time to match the source file which uses another version.
+You can use the rclone cleanup
command (see below) to remove all old versions.
+Or you can set the no_versions
parameter to true
and rclone will remove versions after operations which create new versions. This takes extra transactions so only enable it if you need it.
+Note At the time of writing Onedrive Personal creates versions (but not for setting the modification time) but the API for removing them returns "API not found" so cleanup and no_versions
should not be used on Onedrive Personal.
+Disabling versioning
+Starting October 2018, users will no longer be able to disable versioning by default. This is because Microsoft has brought an update to the mechanism. To change this new default setting, a PowerShell command is required to be run by a SharePoint admin. If you are an admin, you can run these commands in PowerShell to change that setting:
+
+Install-Module -Name Microsoft.Online.SharePoint.PowerShell
(in case you haven't installed this already)
+Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking
+Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM
(replacing YOURSITE
, YOU
, YOURSITE.COM
with the actual values; this will prompt for your credentials)
+Set-SPOTenant -EnableMinimumVersionRequirement $False
+Disconnect-SPOService
(to disconnect from the server)
+
+Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.
+User Weropol has found a method to disable versioning on OneDrive
+
+- Open the settings menu by clicking on the gear symbol at the top of the OneDrive Business page.
+- Click Site settings.
+- Once on the Site settings page, navigate to Site Administration > Site libraries and lists.
+- Click Customize "Documents".
+- Click General Settings > Versioning Settings.
+- Under Document Version History select the option No versioning. Note: This will disable the creation of new file versions, but will not remove any previous versions. Your documents are safe.
+- Apply the changes by clicking OK.
+- Use rclone to upload or modify files. (I also use the --no-update-modtime flag)
+- Restore the versioning settings after using rclone. (Optional)
+
+Cleanup
+OneDrive supports rclone cleanup
which causes rclone to look through every file under the path supplied and delete all version but the current version. Because this involves traversing all the files, then querying each file for versions it can be quite slow. Rclone does --checkers
tests in parallel. The command also supports --interactive
/i
or --dry-run
which is a great way to see what it would do.
+rclone cleanup --interactive remote:path/subdir # interactively remove all old version for path/subdir
+rclone cleanup remote:path/subdir # unconditionally remove all old version for path/subdir
+NB Onedrive personal can't currently delete versions
+Troubleshooting
+Excessive throttling or blocked on SharePoint
+If you experience excessive throttling or is being blocked on SharePoint then it may help to set the user agent explicitly with a flag like this: --user-agent "ISV|rclone.org|rclone/v1.55.1"
+The specific details can be found in the Microsoft document: Avoid getting throttled or blocked in SharePoint Online
+Unexpected file size/hash differences on Sharepoint
+It is a known issue that Sharepoint (not OneDrive or OneDrive for Business) silently modifies uploaded files, mainly Office files (.docx, .xlsx, etc.), causing file size and hash checks to fail. There are also other situations that will cause OneDrive to report inconsistent file sizes. To use rclone with such affected files on Sharepoint, you may disable these checks with the following command line arguments:
+--ignore-checksum --ignore-size
+Alternatively, if you have write access to the OneDrive files, it may be possible to fix this problem for certain files, by attempting the steps below. Open the web interface for OneDrive and find the affected files (which will be in the error messages/log for rclone). Simply click on each of these files, causing OneDrive to open them on the web. This will cause each file to be converted in place to a format that is functionally equivalent but which will no longer trigger the size discrepancy. Once all problematic files are converted you will no longer need the ignore options above.
+Replacing/deleting existing files on Sharepoint gets "item not found"
+It is a known issue that Sharepoint (not OneDrive or OneDrive for Business) may return "item not found" errors when users try to replace or delete uploaded files; this seems to mainly affect Office files (.docx, .xlsx, etc.) and web files (.html, .aspx, etc.). As a workaround, you may use the --backup-dir <BACKUP_DIR>
command line argument so rclone moves the files to be replaced/deleted into a given backup directory (instead of directly replacing/deleting them). For example, to instruct rclone to move the files into the directory rclone-backup-dir
on backend mysharepoint
, you may use:
+--backup-dir mysharepoint:rclone-backup-dir
+access_denied (AADSTS65005)
+Error: access_denied
+Code: AADSTS65005
+Description: Using application 'rclone' is currently not supported for your organization [YOUR_ORGANIZATION] because it is in an unmanaged state. An administrator needs to claim ownership of the company by DNS validation of [YOUR_ORGANIZATION] before the application rclone can be provisioned.
+This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
+However, there are other ways to interact with your OneDrive account. Have a look at the WebDAV backend: https://rclone.org/webdav/#sharepoint
+invalid_grant (AADSTS50076)
+Error: invalid_grant
+Code: AADSTS50076
+Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
+If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run rclone config
, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: Already have a token - refresh?
. For this question, answer y
and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
+Invalid request when making public links
+On Sharepoint and OneDrive for Business, rclone link
may return an "Invalid request" error. A possible cause is that the organisation admin didn't allow public links to be made for the organisation/sharepoint library. To fix the permissions as an admin, take a look at the docs: 1, 2.
+Can not access Shared
with me files
+Shared with me files is not supported by rclone currently, but there is a workaround:
+
+- Visit https://onedrive.live.com
+- Right click a item in
Shared
, then click Add shortcut to My files
in the context
+- The shortcut will appear in
My files
, you can access it with rclone, it behaves like a normal folder/file.
+
+Live Photos uploaded from iOS (small video clips in .heic files)
+The iOS OneDrive app introduced upload and storage of Live Photos in 2020. The usage and download of these uploaded Live Photos is unfortunately still work-in-progress and this introduces several issues when copying, synchronising and mounting – both in rclone and in the native OneDrive client on Windows.
+The root cause can easily be seen if you locate one of your Live Photos in the OneDrive web interface. Then download the photo from the web interface. You will then see that the size of downloaded .heic file is smaller than the size displayed in the web interface. The downloaded file is smaller because it only contains a single frame (still photo) extracted from the Live Photo (movie) stored in OneDrive.
+The different sizes will cause rclone copy/sync
to repeatedly recopy unmodified photos something like this:
+DEBUG : 20230203_123826234_iOS.heic: Sizes differ (src 4470314 vs dst 1298667)
+DEBUG : 20230203_123826234_iOS.heic: sha1 = fc2edde7863b7a7c93ca6771498ac797f8460750 OK
+INFO : 20230203_123826234_iOS.heic: Copied (replaced existing)
+These recopies can be worked around by adding --ignore-size
. Please note that this workaround only syncs the still-picture not the movie clip, and relies on modification dates being correctly updated on all files in all situations.
+The different sizes will also cause rclone check
to report size errors something like this:
+ERROR : 20230203_123826234_iOS.heic: sizes differ
+These check errors can be suppressed by adding --ignore-size
.
+The different sizes will also cause rclone mount
to fail downloading with an error something like this:
+ERROR : 20230203_123826234_iOS.heic: ReadFileHandle.Read error: low level retry 1/10: unexpected EOF
+or like this when using --cache-mode=full
:
+INFO : 20230203_123826234_iOS.heic: vfs cache: downloader: error count now 1: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
+ERROR : 20230203_123826234_iOS.heic: vfs cache: failed to download: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
+OpenDrive
+Paths are specified as remote:path
+Paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+n) New remote
+d) Delete remote
+q) Quit config
+e/n/d/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / OpenDrive
+ \ "opendrive"
+[snip]
+Storage> opendrive
+Username
+username>
+Password
+y) Yes type in my own password
+g) Generate random password
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+--------------------
+[remote]
+username =
+password = *** ENCRYPTED ***
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+List directories in top level of your OpenDrive
+rclone lsd remote:
+List all the files in your OpenDrive
+rclone ls remote:
+To copy a local directory to an OpenDrive directory called backup
+rclone copy /home/source remote:backup
+Modification times and hashes
+OpenDrive allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not.
+The MD5 hash algorithm is supported.
+Restricted filename characters
+
+
+
+
+
+
+NUL |
+0x00 |
+␀ |
+
+
+/ |
+0x2F |
+/ |
+
+
+" |
+0x22 |
+" |
+
+
+* |
+0x2A |
+* |
+
+
+: |
+0x3A |
+: |
+
+
+< |
+0x3C |
+< |
+
+
+> |
+0x3E |
+> |
+
+
+? |
+0x3F |
+? |
+
+
+\ |
+0x5C |
+\ |
+
+
+| |
+0x7C |
+| |
+
+
+
+File names can also not begin or end with the following characters. These only get replaced if they are the first or last character in the name:
+
+
+
+
+
+
+SP |
+0x20 |
+␠ |
+
+
+HT |
+0x09 |
+␉ |
+
+
+LF |
+0x0A |
+␊ |
+
+
+VT |
+0x0B |
+␋ |
+
+
+CR |
+0x0D |
+␍ |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Standard options
+Here are the Standard options specific to opendrive (OpenDrive).
+--opendrive-username
+Username.
+Properties:
+
+- Config: username
+- Env Var: RCLONE_OPENDRIVE_USERNAME
+- Type: string
+- Required: true
+
+--opendrive-password
+Password.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: password
+- Env Var: RCLONE_OPENDRIVE_PASSWORD
+- Type: string
+- Required: true
+
+Advanced options
+Here are the Advanced options specific to opendrive (OpenDrive).
+--opendrive-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_OPENDRIVE_ENCODING
+- Type: Encoding
+- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot
+
+--opendrive-chunk-size
+Files will be uploaded in chunks this size.
+Note that these chunks are buffered in memory so increasing them will increase memory use.
+Properties:
+
+- Config: chunk_size
+- Env Var: RCLONE_OPENDRIVE_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
+
+--opendrive-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_OPENDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+Note that OpenDrive is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".
+There are quite a few characters that can't be in OpenDrive file names. These can't occur on Windows platforms, but on non-Windows platforms they are common. Rclone will map these names to and from an identical looking unicode equivalent. For example if a file has a ?
in it will be mapped to ?
instead.
+rclone about
is not supported by the OpenDrive backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
+See List of backends that do not support rclone about and rclone about
+Oracle Object Storage
+
+Paths are specified as remote:bucket
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir
.
+Sample command to transfer local artifacts to remote:bucket in oracle object storage:
+rclone -vvv --progress --stats-one-line --max-stats-groups 10 --log-format date,time,UTC,longfile --fast-list --buffer-size 256Mi --oos-no-check-bucket --oos-upload-cutoff 10Mi --multi-thread-cutoff 16Mi --multi-thread-streams 3000 --transfers 3000 --checkers 64 --retries 2 --oos-chunk-size 10Mi --oos-upload-concurrency 10000 --oos-attempt-resume-upload --oos-leave-parts-on-error sync ./artifacts remote:bucket -vv
+Configuration
+Here is an example of making an oracle object storage configuration. rclone config
walks you through it.
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+n) New remote
+d) Delete remote
+r) Rename remote
+c) Copy remote
+s) Set configuration password
+q) Quit config
+e/n/d/r/c/s/q> n
+Enter name for new remote.
+name> remote
-### cleanup
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+[snip]
+XX / Oracle Cloud Infrastructure Object Storage
+ \ (oracleobjectstorage)
+Storage> oracleobjectstorage
-Remove unfinished multipart uploads.
+Option provider.
+Choose your Auth Provider
+Choose a number from below, or type in your own string value.
+Press Enter for the default (env_auth).
+ 1 / automatically pickup the credentials from runtime(env), first one to provide auth wins
+ \ (env_auth)
+ / use an OCI user and an API key for authentication.
+ 2 | you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
+ | https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
+ \ (user_principal_auth)
+ / use instance principals to authorize an instance to make API calls.
+ 3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
+ | https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
+ \ (instance_principal_auth)
+ / use workload identity to grant Kubernetes pods policy-driven access to Oracle Cloud
+ 4 | Infrastructure (OCI) resources using OCI Identity and Access Management (IAM).
+ | https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
+ \ (workload_identity_auth)
+ 5 / use resource principals to make API calls
+ \ (resource_principal_auth)
+ 6 / no credentials needed, this is typically for reading public buckets
+ \ (no_auth)
+provider> 2
- rclone backend cleanup remote: [options] [<arguments>+]
+Option namespace.
+Object storage namespace
+Enter a value.
+namespace> idbamagbg734
-This command removes unfinished multipart uploads of age greater than
-max-age which defaults to 24 hours.
+Option compartment.
+Object storage compartment OCID
+Enter a value.
+compartment> ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
-Note that you can use --interactive/-i or --dry-run with this command to see what
-it would do.
+Option region.
+Object storage Region
+Enter a value.
+region> us-ashburn-1
- rclone backend cleanup oos:bucket/path/to/object
- rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
+Option endpoint.
+Endpoint for Object storage API.
+Leave blank to use the default endpoint for the region.
+Enter a value. Press Enter to leave empty.
+endpoint>
-Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
+Option config_file.
+Full Path to OCI config file
+Choose a number from below, or type in your own string value.
+Press Enter for the default (~/.oci/config).
+ 1 / oci configuration file location
+ \ (~/.oci/config)
+config_file> /etc/oci/dev.conf
+Option config_profile.
+Profile name inside OCI config file
+Choose a number from below, or type in your own string value.
+Press Enter for the default (Default).
+ 1 / Use the default profile
+ \ (Default)
+config_profile> Test
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> n
+
+Configuration complete.
Options:
+- type: oracleobjectstorage
+- namespace: idbamagbg734
+- compartment: ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
+- region: us-ashburn-1
+- provider: user_principal_auth
+- config_file: /etc/oci/dev.conf
+- config_profile: Test
+Keep this "remote" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+See all buckets
+rclone lsd remote:
+Create a new bucket
+rclone mkdir remote:bucket
+List the contents of a bucket
+rclone ls remote:bucket
+rclone ls remote:bucket --max-depth 1
+Authentication Providers
+OCI has various authentication methods. To learn more about authentication methods please refer oci authentication methods These choices can be specified in the rclone config file.
+Rclone supports the following OCI authentication provider.
+User Principal
+Instance Principal
+Resource Principal
+Workload Identity
+No authentication
+User Principal
+Sample rclone config file for Authentication Provider User Principal:
+[oos]
+type = oracleobjectstorage
+namespace = id<redacted>34
+compartment = ocid1.compartment.oc1..aa<redacted>ba
+region = us-ashburn-1
+provider = user_principal_auth
+config_file = /home/opc/.oci/config
+config_profile = Default
+Advantages: - One can use this method from any server within OCI or on-premises or from other cloud provider.
+Considerations: - you need to configure user’s privileges / policy to allow access to object storage - Overhead of managing users and keys. - If the user is deleted, the config file will no longer work and may cause automation regressions that use the user's credentials.
+Instance Principal
+An OCI compute instance can be authorized to use rclone by using it's identity and certificates as an instance principal. With this approach no credentials have to be stored and managed.
+Sample rclone configuration file for Authentication Provider Instance Principal:
+[opc@rclone ~]$ cat ~/.config/rclone/rclone.conf
+[oos]
+type = oracleobjectstorage
+namespace = id<redacted>fn
+compartment = ocid1.compartment.oc1..aa<redacted>k7a
+region = us-ashburn-1
+provider = instance_principal_auth
+Advantages:
+
+- With instance principals, you don't need to configure user credentials and transfer/ save it to disk in your compute instances or rotate the credentials.
+- You don’t need to deal with users and keys.
+- Greatly helps in automation as you don't have to manage access keys, user private keys, storing them in vault, using kms etc.
+
+Considerations:
+
+- You need to configure a dynamic group having this instance as member and add policy to read object storage to that dynamic group.
+- Everyone who has access to this machine can execute the CLI commands.
+- It is applicable for oci compute instances only. It cannot be used on external instance or resources.
+
+Resource Principal
+Resource principal auth is very similar to instance principal auth but used for resources that are not compute instances such as serverless functions. To use resource principal ensure Rclone process is started with these environment variables set in its process.
+export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
+export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
+export OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM=/usr/share/model-server/key.pem
+export OCI_RESOURCE_PRINCIPAL_RPST=/usr/share/model-server/security_token
+Sample rclone configuration file for Authentication Provider Resource Principal:
+[oos]
+type = oracleobjectstorage
+namespace = id<redacted>34
+compartment = ocid1.compartment.oc1..aa<redacted>ba
+region = us-ashburn-1
+provider = resource_principal_auth
+Workload Identity
+Workload Identity auth may be used when running Rclone from Kubernetes pod on a Container Engine for Kubernetes (OKE) cluster. For more details on configuring Workload Identity, see Granting Workloads Access to OCI Resources. To use workload identity, ensure Rclone is started with these environment variables set in its process.
+export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
+export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
+No authentication
+Public buckets do not require any authentication mechanism to read objects. Sample rclone configuration file for No authentication:
+[oos]
+type = oracleobjectstorage
+namespace = id<redacted>34
+compartment = ocid1.compartment.oc1..aa<redacted>ba
+region = us-ashburn-1
+provider = no_auth
+Modification times and hashes
+The modification time is stored as metadata on the object as opc-meta-mtime
as floating point since the epoch, accurate to 1 ns.
+If the modification time needs to be updated rclone will attempt to perform a server side copy to update the modification if the object can be copied in a single part. In the case the object is larger than 5Gb, the object will be uploaded rather than copied.
+Note that reading this from the object takes an additional HEAD
request as the metadata isn't returned in object listings.
+The MD5 hash algorithm is supported.
+Multipart uploads
+rclone supports multipart uploads with OOS which means that it can upload files bigger than 5 GiB.
+Note that files uploaded both with multipart upload and through crypt remotes do not have MD5 sums.
+rclone switches from single part uploads to multipart uploads at the point specified by --oos-upload-cutoff
. This can be a maximum of 5 GiB and a minimum of 0 (ie always upload multipart files).
+The chunk sizes used in the multipart upload are specified by --oos-chunk-size
and the number of chunks uploaded concurrently is specified by --oos-upload-concurrency
.
+Multipart uploads will use --transfers
* --oos-upload-concurrency
* --oos-chunk-size
extra memory. Single part uploads to not use extra memory.
+Single part transfers can be faster than multipart transfers or slower depending on your latency from oos - the more latency, the more likely single part transfers will be faster.
+Increasing --oos-upload-concurrency
will increase throughput (8 would be a sensible value) and increasing --oos-chunk-size
also increases throughput (16M would be sensible). Increasing either of these will use more memory. The default values are high enough to gain most of the possible performance without using too much memory.
+Standard options
+Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
+--oos-provider
+Choose your Auth Provider
+Properties:
+
+- Config: provider
+- Env Var: RCLONE_OOS_PROVIDER
+- Type: string
+- Default: "env_auth"
+- Examples:
+
+- "env_auth"
+
+- automatically pickup the credentials from runtime(env), first one to provide auth wins
+
+- "user_principal_auth"
+
+- use an OCI user and an API key for authentication.
+- you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
+- https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
+
+- "instance_principal_auth"
+
+- use instance principals to authorize an instance to make API calls.
+- each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
+- https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
+
+- "workload_identity_auth"
+
+- use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM).
+- https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
+
+- "resource_principal_auth"
+
+- use resource principals to make API calls
+
+- "no_auth"
+
+- no credentials needed, this is typically for reading public buckets
+
+
+
+--oos-namespace
+Object storage namespace
+Properties:
+
+- Config: namespace
+- Env Var: RCLONE_OOS_NAMESPACE
+- Type: string
+- Required: true
+
+--oos-compartment
+Object storage compartment OCID
+Properties:
+
+- Config: compartment
+- Env Var: RCLONE_OOS_COMPARTMENT
+- Provider: !no_auth
+- Type: string
+- Required: true
+
+--oos-region
+Object storage Region
+Properties:
+
+- Config: region
+- Env Var: RCLONE_OOS_REGION
+- Type: string
+- Required: true
+
+--oos-endpoint
+Endpoint for Object storage API.
+Leave blank to use the default endpoint for the region.
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_OOS_ENDPOINT
+- Type: string
+- Required: false
+
+--oos-config-file
+Path to OCI config file
+Properties:
+
+- Config: config_file
+- Env Var: RCLONE_OOS_CONFIG_FILE
+- Provider: user_principal_auth
+- Type: string
+- Default: "~/.oci/config"
+- Examples:
+
+- "~/.oci/config"
+
+- oci configuration file location
+
+
+
+--oos-config-profile
+Profile name inside the oci config file
+Properties:
+
+- Config: config_profile
+- Env Var: RCLONE_OOS_CONFIG_PROFILE
+- Provider: user_principal_auth
+- Type: string
+- Default: "Default"
+- Examples:
+
+- "Default"
+
+- Use the default profile
+
+
+
+Advanced options
+Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
+--oos-storage-tier
+The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm
+Properties:
+
+- Config: storage_tier
+- Env Var: RCLONE_OOS_STORAGE_TIER
+- Type: string
+- Default: "Standard"
+- Examples:
+
+- "Standard"
+
+- Standard storage tier, this is the default tier
+
+- "InfrequentAccess"
+
+- InfrequentAccess storage tier
+
+- "Archive"
+
+
+
+--oos-upload-cutoff
+Cutoff for switching to chunked upload.
+Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.
+Properties:
+
+- Config: upload_cutoff
+- Env Var: RCLONE_OOS_UPLOAD_CUTOFF
+- Type: SizeSuffix
+- Default: 200Mi
+
+--oos-chunk-size
+Chunk size to use for uploading.
+When uploading files larger than upload_cutoff or files with unknown size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded as multipart uploads using this chunk size.
+Note that "upload_concurrency" chunks of this size are buffered in memory per transfer.
+If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers.
+Rclone will automatically increase the chunk size when uploading a large file of known size to stay below the 10,000 chunks limit.
+Files of unknown size are uploaded with the configured chunk_size. Since the default chunk size is 5 MiB and there can be at most 10,000 chunks, this means that by default the maximum size of a file you can stream upload is 48 GiB. If you wish to stream upload larger files then you will need to increase chunk_size.
+Increasing the chunk size decreases the accuracy of the progress statistics displayed with "-P" flag.
+Properties:
+
+- Config: chunk_size
+- Env Var: RCLONE_OOS_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 5Mi
+
+--oos-max-upload-parts
+Maximum number of parts in a multipart upload.
+This option defines the maximum number of multipart chunks to use when doing a multipart upload.
+OCI has max parts limit of 10,000 chunks.
+Rclone will automatically increase the chunk size when uploading a large file of a known size to stay below this number of chunks limit.
+Properties:
+
+- Config: max_upload_parts
+- Env Var: RCLONE_OOS_MAX_UPLOAD_PARTS
+- Type: int
+- Default: 10000
+
+--oos-upload-concurrency
+Concurrency for multipart uploads.
+This is the number of chunks of the same file that are uploaded concurrently.
+If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.
+Properties:
+
+- Config: upload_concurrency
+- Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
+- Type: int
+- Default: 10
+
+--oos-copy-cutoff
+Cutoff for switching to multipart copy.
+Any files larger than this that need to be server-side copied will be copied in chunks of this size.
+The minimum is 0 and the maximum is 5 GiB.
+Properties:
+
+- Config: copy_cutoff
+- Env Var: RCLONE_OOS_COPY_CUTOFF
+- Type: SizeSuffix
+- Default: 4.656Gi
+
+--oos-copy-timeout
+Timeout for copy.
+Copy is an asynchronous operation, specify timeout to wait for copy to succeed
+Properties:
+
+- Config: copy_timeout
+- Env Var: RCLONE_OOS_COPY_TIMEOUT
+- Type: Duration
+- Default: 1m0s
+
+--oos-disable-checksum
+Don't store MD5 checksum with object metadata.
+Normally rclone will calculate the MD5 checksum of the input before uploading it so it can add it to metadata on the object. This is great for data integrity checking but can cause long delays for large files to start uploading.
+Properties:
+
+- Config: disable_checksum
+- Env Var: RCLONE_OOS_DISABLE_CHECKSUM
+- Type: bool
+- Default: false
+
+--oos-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_OOS_ENCODING
+- Type: Encoding
+- Default: Slash,InvalidUtf8,Dot
+
+--oos-leave-parts-on-error
+If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
+It should be set to true for resuming uploads across different sessions.
+WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add additional costs if not cleaned up.
+Properties:
+
+- Config: leave_parts_on_error
+- Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
+- Type: bool
+- Default: false
+
+--oos-attempt-resume-upload
+If true attempt to resume previously started multipart upload for the object. This will be helpful to speed up multipart transfers by resuming uploads from past session.
+WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is aborted and a new multipart upload is started with the new chunk size.
+The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
+Properties:
+
+- Config: attempt_resume_upload
+- Env Var: RCLONE_OOS_ATTEMPT_RESUME_UPLOAD
+- Type: bool
+- Default: false
+
+--oos-no-check-bucket
+If set, don't attempt to check the bucket exists or create it.
+This can be useful when trying to minimise the number of transactions rclone does if you know the bucket exists already.
+It can also be needed if the user you are using does not have bucket creation permissions.
+Properties:
+
+- Config: no_check_bucket
+- Env Var: RCLONE_OOS_NO_CHECK_BUCKET
+- Type: bool
+- Default: false
+
+--oos-sse-customer-key-file
+To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'
+Properties:
+
+- Config: sse_customer_key_file
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_FILE
+- Type: string
+- Required: false
+- Examples:
+
+
+--oos-sse-customer-key
+To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed. For more information, see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)
+Properties:
+
+- Config: sse_customer_key
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY
+- Type: string
+- Required: false
+- Examples:
+
+
+--oos-sse-customer-key-sha256
+If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+Properties:
+
+- Config: sse_customer_key_sha256
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_SHA256
+- Type: string
+- Required: false
+- Examples:
+
+
+--oos-sse-kms-key-id
+if using your own master key in vault, this header specifies the OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
+Properties:
+
+- Config: sse_kms_key_id
+- Env Var: RCLONE_OOS_SSE_KMS_KEY_ID
+- Type: string
+- Required: false
+- Examples:
+
+
+--oos-sse-customer-algorithm
+If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm. Object Storage supports "AES256" as the encryption algorithm. For more information, see Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+Properties:
+
+- Config: sse_customer_algorithm
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_ALGORITHM
+- Type: string
+- Required: false
+- Examples:
+
+
+--oos-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_OOS_DESCRIPTION
+- Type: string
+- Required: false
+
+Backend commands
+Here are the commands specific to the oracleobjectstorage backend.
+Run them with
+rclone backend COMMAND remote:
+The help below will explain what arguments each command takes.
+See the backend command for more info on how to pass options and arguments.
+These can be run on a running backend using the rc command backend/command.
+rename
+change the name of an object
+rclone backend rename remote: [options] [<arguments>+]
+This command can be used to rename a object.
+Usage Examples:
+rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
+list-multipart-uploads
+List the unfinished multipart uploads
+rclone backend list-multipart-uploads remote: [options] [<arguments>+]
+This command lists the unfinished multipart uploads in JSON format.
+rclone backend list-multipart-uploads oos:bucket/path/to/object
+It returns a dictionary of buckets with values as lists of unfinished multipart uploads.
+You can call it with no bucket in which case it lists all bucket, with a bucket or with a bucket and path.
+{
+ "test-bucket": [
+ {
+ "namespace": "test-namespace",
+ "bucket": "test-bucket",
+ "object": "600m.bin",
+ "uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
+ "timeCreated": "2022-07-29T06:21:16.595Z",
+ "storageTier": "Standard"
+ }
+ ]
+cleanup
+Remove unfinished multipart uploads.
+rclone backend cleanup remote: [options] [<arguments>+]
+This command removes unfinished multipart uploads of age greater than max-age which defaults to 24 hours.
+Note that you can use --interactive/-i or --dry-run with this command to see what it would do.
+rclone backend cleanup oos:bucket/path/to/object
+rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
+Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
+Options:
+
+- "max-age": Max age of upload to delete
+
+restore
+Restore objects from Archive to Standard storage
+rclone backend restore remote: [options] [<arguments>+]
+This command can be used to restore one or more objects from Archive to Standard storage.
+Usage Examples:
-- "max-age": Max age of upload to delete
+rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
+rclone backend restore oos:bucket -o hours=HOURS
+This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
+rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
+All the objects shown will be marked for restore, then
+rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
+It returns a list of status dictionaries with Object Name and Status
+keys. The Status will be "RESTORED"" if it was successful or an error message
+if not.
-
-## Tutorials
-### [Mounting Buckets](https://rclone.org/oracleobjectstorage/tutorial_mount/)
-
-# QingStor
-
-Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
-command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
-
-## Configuration
-
-Here is an example of making an QingStor configuration. First run
-
- rclone config
-
-This will guide you through an interactive setup process.
-
-No remotes found, make a new one? n) New remote r) Rename remote c) Copy remote s) Set configuration password q) Quit config n/r/c/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / QingStor Object Storage "qingstor" [snip] Storage> qingstor Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank. Choose a number from below, or type in your own value 1 / Enter QingStor credentials in the next step "false" 2 / Get QingStor credentials from the environment (env vars or IAM) "true" env_auth> 1 QingStor Access Key ID - leave blank for anonymous access or runtime credentials. access_key_id> access_key QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials. secret_access_key> secret_key Enter an endpoint URL to connection QingStor API. Leave blank will use the default value "https://qingstor.com:443" endpoint> Zone connect to. Default is "pek3a". Choose a number from below, or type in your own value / The Beijing (China) Three Zone 1 | Needs location constraint pek3a. "pek3a" / The Shanghai (China) First Zone 2 | Needs location constraint sh1a. "sh1a" zone> 1 Number of connection retry. Leave blank will use the default value "3". connection_retries> Remote config -------------------- [remote] env_auth = false access_key_id = access_key secret_access_key = secret_key endpoint = zone = pek3a connection_retries = -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-This remote is called `remote` and can now be used like this
-
-See all buckets
-
- rclone lsd remote:
-
-Make a new bucket
-
- rclone mkdir remote:bucket
-
-List the contents of a bucket
-
- rclone ls remote:bucket
-
-Sync `/home/local/directory` to the remote bucket, deleting any excess
-files in the bucket.
-
- rclone sync --interactive /home/local/directory remote:bucket
-
-### --fast-list
-
-This remote supports `--fast-list` which allows you to use fewer
-transactions in exchange for more memory. See the [rclone
-docs](https://rclone.org/docs/#fast-list) for more details.
-
-### Multipart uploads
-
-rclone supports multipart uploads with QingStor which means that it can
-upload files bigger than 5 GiB. Note that files uploaded with multipart
-upload don't have an MD5SUM.
-
-Note that incomplete multipart uploads older than 24 hours can be
-removed with `rclone cleanup remote:bucket` just for one bucket
-`rclone cleanup remote:` for all buckets. QingStor does not ever
-remove incomplete multipart uploads so it may be necessary to run this
-from time to time.
-
-### Buckets and Zone
-
-With QingStor you can list buckets (`rclone lsd`) using any zone,
-but you can only access the content of a bucket from the zone it was
-created in. If you attempt to access a bucket from the wrong zone,
-you will get an error, `incorrect zone, the bucket is not in 'XXX'
-zone`.
-
-### Authentication
-
-There are two ways to supply `rclone` with a set of QingStor
-credentials. In order of precedence:
-
- - Directly in the rclone configuration file (as configured by `rclone config`)
- - set `access_key_id` and `secret_access_key`
- - Runtime configuration:
- - set `env_auth` to `true` in the config file
- - Exporting the following environment variables before running `rclone`
- - Access Key ID: `QS_ACCESS_KEY_ID` or `QS_ACCESS_KEY`
- - Secret Access Key: `QS_SECRET_ACCESS_KEY` or `QS_SECRET_KEY`
-
-### Restricted filename characters
-
-The control characters 0x00-0x1F and / are replaced as in the [default
-restricted characters set](https://rclone.org/overview/#restricted-characters). Note
-that 0x7F is not replaced.
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-
-### Standard options
-
-Here are the Standard options specific to qingstor (QingCloud Object Storage).
-
-#### --qingstor-env-auth
-
-Get QingStor credentials from runtime.
-
-Only applies if access_key_id and secret_access_key is blank.
-
-Properties:
-
-- Config: env_auth
-- Env Var: RCLONE_QINGSTOR_ENV_AUTH
-- Type: bool
-- Default: false
-- Examples:
- - "false"
- - Enter QingStor credentials in the next step.
- - "true"
- - Get QingStor credentials from the environment (env vars or IAM).
-
-#### --qingstor-access-key-id
-
-QingStor Access Key ID.
-
-Leave blank for anonymous access or runtime credentials.
-
-Properties:
-
-- Config: access_key_id
-- Env Var: RCLONE_QINGSTOR_ACCESS_KEY_ID
-- Type: string
-- Required: false
-
-#### --qingstor-secret-access-key
-
-QingStor Secret Access Key (password).
-
-Leave blank for anonymous access or runtime credentials.
-
-Properties:
-
-- Config: secret_access_key
-- Env Var: RCLONE_QINGSTOR_SECRET_ACCESS_KEY
-- Type: string
-- Required: false
-
-#### --qingstor-endpoint
-
+[
+ {
+ "Object": "test.txt"
+ "Status": "RESTORED",
+ },
+ {
+ "Object": "test/file4.txt"
+ "Status": "RESTORED",
+ }
+]
+Options:
+
+- "hours": The number of hours for which this object will be restored. Default is 24 hrs.
+
+Tutorials
+
+QingStor
+Paths are specified as remote:bucket
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir
.
+Configuration
+Here is an example of making an QingStor configuration. First run
+rclone config
+This will guide you through an interactive setup process.
+No remotes found, make a new one?
+n) New remote
+r) Rename remote
+c) Copy remote
+s) Set configuration password
+q) Quit config
+n/r/c/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / QingStor Object Storage
+ \ "qingstor"
+[snip]
+Storage> qingstor
+Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
+Choose a number from below, or type in your own value
+ 1 / Enter QingStor credentials in the next step
+ \ "false"
+ 2 / Get QingStor credentials from the environment (env vars or IAM)
+ \ "true"
+env_auth> 1
+QingStor Access Key ID - leave blank for anonymous access or runtime credentials.
+access_key_id> access_key
+QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
+secret_access_key> secret_key
Enter an endpoint URL to connection QingStor API.
+Leave blank will use the default value "https://qingstor.com:443"
+endpoint>
+Zone connect to. Default is "pek3a".
+Choose a number from below, or type in your own value
+ / The Beijing (China) Three Zone
+ 1 | Needs location constraint pek3a.
+ \ "pek3a"
+ / The Shanghai (China) First Zone
+ 2 | Needs location constraint sh1a.
+ \ "sh1a"
+zone> 1
+Number of connection retry.
+Leave blank will use the default value "3".
+connection_retries>
+Remote config
+--------------------
+[remote]
+env_auth = false
+access_key_id = access_key
+secret_access_key = secret_key
+endpoint =
+zone = pek3a
+connection_retries =
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+This remote is called remote
and can now be used like this
+See all buckets
+rclone lsd remote:
+Make a new bucket
+rclone mkdir remote:bucket
+List the contents of a bucket
+rclone ls remote:bucket
+Sync /home/local/directory
to the remote bucket, deleting any excess files in the bucket.
+rclone sync --interactive /home/local/directory remote:bucket
+--fast-list
+This remote supports --fast-list
which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details.
+Multipart uploads
+rclone supports multipart uploads with QingStor which means that it can upload files bigger than 5 GiB. Note that files uploaded with multipart upload don't have an MD5SUM.
+Note that incomplete multipart uploads older than 24 hours can be removed with rclone cleanup remote:bucket
just for one bucket rclone cleanup remote:
for all buckets. QingStor does not ever remove incomplete multipart uploads so it may be necessary to run this from time to time.
+Buckets and Zone
+With QingStor you can list buckets (rclone lsd
) using any zone, but you can only access the content of a bucket from the zone it was created in. If you attempt to access a bucket from the wrong zone, you will get an error, incorrect zone, the bucket is not in 'XXX' zone
.
+Authentication
+There are two ways to supply rclone
with a set of QingStor credentials. In order of precedence:
+
+- Directly in the rclone configuration file (as configured by
rclone config
)
+
+- set
access_key_id
and secret_access_key
+
+- Runtime configuration:
+
+- set
env_auth
to true
in the config file
+- Exporting the following environment variables before running
rclone
+
+- Access Key ID:
QS_ACCESS_KEY_ID
or QS_ACCESS_KEY
+- Secret Access Key:
QS_SECRET_ACCESS_KEY
or QS_SECRET_KEY
+
+
+
+Restricted filename characters
+The control characters 0x00-0x1F and / are replaced as in the default restricted characters set. Note that 0x7F is not replaced.
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Standard options
+Here are the Standard options specific to qingstor (QingCloud Object Storage).
+--qingstor-env-auth
+Get QingStor credentials from runtime.
+Only applies if access_key_id and secret_access_key is blank.
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_QINGSTOR_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+
+- "false"
+
+- Enter QingStor credentials in the next step.
+
+- "true"
+
+- Get QingStor credentials from the environment (env vars or IAM).
+
+
+
+--qingstor-access-key-id
+QingStor Access Key ID.
+Leave blank for anonymous access or runtime credentials.
+Properties:
+
+- Config: access_key_id
+- Env Var: RCLONE_QINGSTOR_ACCESS_KEY_ID
+- Type: string
+- Required: false
+
+--qingstor-secret-access-key
+QingStor Secret Access Key (password).
+Leave blank for anonymous access or runtime credentials.
+Properties:
+
+- Config: secret_access_key
+- Env Var: RCLONE_QINGSTOR_SECRET_ACCESS_KEY
+- Type: string
+- Required: false
+
+--qingstor-endpoint
+Enter an endpoint URL to connection QingStor API.
+Leave blank will use the default value "https://qingstor.com:443".
+Properties:
+
+- Config: endpoint
+- Env Var: RCLONE_QINGSTOR_ENDPOINT
+- Type: string
+- Required: false
+
+--qingstor-zone
+Zone to connect to.
+Default is "pek3a".
+Properties:
+
+- Config: zone
+- Env Var: RCLONE_QINGSTOR_ZONE
+- Type: string
+- Required: false
+- Examples:
+
+- "pek3a"
+
+- The Beijing (China) Three Zone.
+- Needs location constraint pek3a.
+
+- "sh1a"
+
+- The Shanghai (China) First Zone.
+- Needs location constraint sh1a.
+
+- "gd2a"
+
+- The Guangdong (China) Second Zone.
+- Needs location constraint gd2a.
+
+
+
+Advanced options
+Here are the Advanced options specific to qingstor (QingCloud Object Storage).
+--qingstor-connection-retries
+Number of connection retries.
+Properties:
+
+- Config: connection_retries
+- Env Var: RCLONE_QINGSTOR_CONNECTION_RETRIES
+- Type: int
+- Default: 3
+
+--qingstor-upload-cutoff
+Cutoff for switching to chunked upload.
+Any files larger than this will be uploaded in chunks of chunk_size. The minimum is 0 and the maximum is 5 GiB.
+Properties:
+
+- Config: upload_cutoff
+- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
+- Type: SizeSuffix
+- Default: 200Mi
+
+--qingstor-chunk-size
+Chunk size to use for uploading.
+When uploading files larger than upload_cutoff they will be uploaded as multipart uploads using this chunk size.
+Note that "--qingstor-upload-concurrency" chunks of this size are buffered in memory per transfer.
+If you are transferring large files over high-speed links and you have enough memory, then increasing this will speed up the transfers.
+Properties:
+
+- Config: chunk_size
+- Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 4Mi
+
+--qingstor-upload-concurrency
+Concurrency for multipart uploads.
+This is the number of chunks of the same file that are uploaded concurrently.
+NB if you set this to > 1 then the checksums of multipart uploads become corrupted (the uploads themselves are not corrupted though).
+If you are uploading small numbers of large files over high-speed links and these uploads do not fully utilize your bandwidth, then increasing this may help to speed up the transfers.
+Properties:
+
+- Config: upload_concurrency
+- Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
+- Type: int
+- Default: 1
+
+--qingstor-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_QINGSTOR_ENCODING
+- Type: Encoding
+- Default: Slash,Ctl,InvalidUtf8
+
+--qingstor-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_QINGSTOR_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+rclone about
is not supported by the qingstor backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
+See List of backends that do not support rclone about and rclone about
+Quatrix
+Quatrix by Maytech is Quatrix Secure Compliant File Sharing | Maytech.
+Paths are specified as remote:path
+Paths may be as deep as required, e.g., remote:directory/subdirectory
.
+The initial setup for Quatrix involves getting an API Key from Quatrix. You can get the API key in the user's profile at https://<account>/profile/api-keys
or with the help of the API - https://docs.maytech.net/quatrix/quatrix-api/api-explorer#/API-Key/post_api_key_create.
+See complete Swagger documentation for Quatrix - https://docs.maytech.net/quatrix/quatrix-api/api-explorer
+Configuration
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / Quatrix by Maytech
+ \ "quatrix"
+[snip]
+Storage> quatrix
+API key for accessing Quatrix account.
+api_key> your_api_key
+Host name of Quatrix account.
+host> example.quatrix.it
-Leave blank will use the default value "https://qingstor.com:443".
+--------------------
+[remote]
+api_key = your_api_key
+host = example.quatrix.it
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Once configured you can then use rclone
like this,
+List directories in top level of your Quatrix
+rclone lsd remote:
+List all the files in your Quatrix
+rclone ls remote:
+To copy a local directory to an Quatrix directory called backup
+rclone copy /home/source remote:backup
+API key validity
+API Key is created with no expiration date. It will be valid until you delete or deactivate it in your account. After disabling, the API Key can be enabled back. If the API Key was deleted and a new key was created, you can update it in rclone config. The same happens if the hostname was changed.
+$ rclone config
+Current remotes:
-Properties:
+Name Type
+==== ====
+remote quatrix
-- Config: endpoint
-- Env Var: RCLONE_QINGSTOR_ENDPOINT
-- Type: string
-- Required: false
-
-#### --qingstor-zone
-
-Zone to connect to.
-
-Default is "pek3a".
-
-Properties:
-
-- Config: zone
-- Env Var: RCLONE_QINGSTOR_ZONE
-- Type: string
-- Required: false
-- Examples:
- - "pek3a"
- - The Beijing (China) Three Zone.
- - Needs location constraint pek3a.
- - "sh1a"
- - The Shanghai (China) First Zone.
- - Needs location constraint sh1a.
- - "gd2a"
- - The Guangdong (China) Second Zone.
- - Needs location constraint gd2a.
-
-### Advanced options
-
-Here are the Advanced options specific to qingstor (QingCloud Object Storage).
-
-#### --qingstor-connection-retries
-
-Number of connection retries.
-
-Properties:
-
-- Config: connection_retries
-- Env Var: RCLONE_QINGSTOR_CONNECTION_RETRIES
-- Type: int
-- Default: 3
-
-#### --qingstor-upload-cutoff
-
-Cutoff for switching to chunked upload.
-
-Any files larger than this will be uploaded in chunks of chunk_size.
-The minimum is 0 and the maximum is 5 GiB.
-
-Properties:
-
-- Config: upload_cutoff
-- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
-- Type: SizeSuffix
-- Default: 200Mi
-
-#### --qingstor-chunk-size
-
-Chunk size to use for uploading.
-
-When uploading files larger than upload_cutoff they will be uploaded
-as multipart uploads using this chunk size.
-
-Note that "--qingstor-upload-concurrency" chunks of this size are buffered
-in memory per transfer.
-
-If you are transferring large files over high-speed links and you have
-enough memory, then increasing this will speed up the transfers.
-
-Properties:
-
-- Config: chunk_size
-- Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
-- Type: SizeSuffix
-- Default: 4Mi
-
-#### --qingstor-upload-concurrency
-
-Concurrency for multipart uploads.
-
-This is the number of chunks of the same file that are uploaded
-concurrently.
-
-NB if you set this to > 1 then the checksums of multipart uploads
-become corrupted (the uploads themselves are not corrupted though).
-
-If you are uploading small numbers of large files over high-speed links
-and these uploads do not fully utilize your bandwidth, then increasing
-this may help to speed up the transfers.
-
-Properties:
-
-- Config: upload_concurrency
-- Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
-- Type: int
-- Default: 1
-
-#### --qingstor-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_QINGSTOR_ENCODING
-- Type: Encoding
-- Default: Slash,Ctl,InvalidUtf8
-
-
-
-## Limitations
-
-`rclone about` is not supported by the qingstor backend. Backends without
-this capability cannot determine free space for an rclone mount or
-use policy `mfs` (most free space) as a member of an rclone union
-remote.
-
-See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
-
-# Quatrix
-
-Quatrix by Maytech is [Quatrix Secure Compliant File Sharing | Maytech](https://www.maytech.net/products/quatrix-business).
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g., `remote:directory/subdirectory`.
-
-The initial setup for Quatrix involves getting an API Key from Quatrix. You can get the API key in the user's profile at `https://<account>/profile/api-keys`
-or with the help of the API - https://docs.maytech.net/quatrix/quatrix-api/api-explorer#/API-Key/post_api_key_create.
-
-See complete Swagger documentation for Quatrix - https://docs.maytech.net/quatrix/quatrix-api/api-explorer
-
-## Configuration
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / Quatrix by Maytech "quatrix" [snip] Storage> quatrix API key for accessing Quatrix account. api_key> your_api_key Host name of Quatrix account. host> example.quatrix.it
-
-
-
-
-
-
-
-
-
-y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y ``` |
-
-
-Once configured you can then use rclone like this, |
-
-
-List directories in top level of your Quatrix |
-
-
-rclone lsd remote: |
-
-
-List all the files in your Quatrix |
-
-
-rclone ls remote: |
-
-
-To copy a local directory to an Quatrix directory called backup |
-
-
-rclone copy /home/source remote:backup |
-
-
-### API key validity |
-
-
-API Key is created with no expiration date. It will be valid until you delete or deactivate it in your account. After disabling, the API Key can be enabled back. If the API Key was deleted and a new key was created, you can update it in rclone config. The same happens if the hostname was changed. |
-
-
-``` $ rclone config Current remotes: |
-
-
-Name Type ==== ==== remote quatrix |
-
-
-e) Edit existing remote n) New remote d) Delete remote r) Rename remote c) Copy remote s) Set configuration password q) Quit config e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote |
-
-
-
-[remote] type = quatrix host = some_host.quatrix.it api_key = your_api_key -------------------- Edit remote Option api_key. API key for accessing Quatrix account Enter a string value. Press Enter for the default (your_api_key) api_key> Option host. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it).
-
-
-
-
-
-
-
-
-
-y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y ``` |
-
-
-### Modification times and hashes |
-
-
-Quatrix allows modification times to be set on objects accurate to 1 microsecond. These will be used to detect whether objects need syncing or not. |
-
-
-Quatrix does not support hashes, so you cannot use the --checksum flag. |
-
-
-### Restricted filename characters |
-
-
-File names in Quatrix are case sensitive and have limitations like the maximum length of a filename is 255, and the minimum length is 1. A file name cannot be equal to . or .. nor contain / , \ or non-printable ascii. |
-
-
-### Transfers |
-
-
-For files above 50 MiB rclone will use a chunked transfer. Rclone will upload up to --transfers chunks at the same time (shared among all multipart uploads). Chunks are buffered in memory, and the minimal chunk size is 10_000_000 bytes by default, and it can be changed in the advanced configuration, so increasing --transfers will increase the memory use. The chunk size has a maximum size limit, which is set to 100_000_000 bytes by default and can be changed in the advanced configuration. The size of the uploaded chunk will dynamically change depending on the upload speed. The total memory use equals the number of transfers multiplied by the minimal chunk size. In case there's free memory allocated for the upload (which equals the difference of maximal_summary_chunk_size and minimal_chunk_size * transfers ), the chunk size may increase in case of high upload speed. As well as it can decrease in case of upload speed problems. If no free memory is available, all chunks will equal minimal_chunk_size . |
-
-
-### Deleting files |
-
-
-Files you delete with rclone will end up in Trash and be stored there for 30 days. Quatrix also provides an API to permanently delete files and an API to empty the Trash so that you can remove files permanently from your account. |
-
-
-### Standard options |
-
-
-Here are the Standard options specific to quatrix (Quatrix by Maytech). |
-
-
-#### --quatrix-api-key |
-
-
-API key for accessing Quatrix account |
-
-
-Properties: |
-
-
-- Config: api_key - Env Var: RCLONE_QUATRIX_API_KEY - Type: string - Required: true |
-
-
-#### --quatrix-host |
-
-
-Host name of Quatrix account |
-
-
-Properties: |
-
-
-- Config: host - Env Var: RCLONE_QUATRIX_HOST - Type: string - Required: true |
-
-
-### Advanced options |
-
-
-Here are the Advanced options specific to quatrix (Quatrix by Maytech). |
-
-
-#### --quatrix-encoding |
-
-
-The encoding for the backend. |
-
-
-See the encoding section in the overview for more info. |
-
-
-Properties: |
-
-
-- Config: encoding - Env Var: RCLONE_QUATRIX_ENCODING - Type: Encoding - Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot |
-
-
-#### --quatrix-effective-upload-time |
-
-
-Wanted upload time for one chunk |
-
-
-Properties: |
-
-
-- Config: effective_upload_time - Env Var: RCLONE_QUATRIX_EFFECTIVE_UPLOAD_TIME - Type: string - Default: "4s" |
-
-
-#### --quatrix-minimal-chunk-size |
-
-
-The minimal size for one chunk |
-
-
-Properties: |
-
-
-- Config: minimal_chunk_size - Env Var: RCLONE_QUATRIX_MINIMAL_CHUNK_SIZE - Type: SizeSuffix - Default: 9.537Mi |
-
-
-#### --quatrix-maximal-summary-chunk-size |
-
-
-The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size' |
-
-
-Properties: |
-
-
-- Config: maximal_summary_chunk_size - Env Var: RCLONE_QUATRIX_MAXIMAL_SUMMARY_CHUNK_SIZE - Type: SizeSuffix - Default: 95.367Mi |
-
-
-#### --quatrix-hard-delete |
-
-
-Delete files permanently rather than putting them into the trash. |
-
-
-Properties: |
-
-
-- Config: hard_delete - Env Var: RCLONE_QUATRIX_HARD_DELETE - Type: bool - Default: false |
-
-
-## Storage usage |
-
-
-The storage usage in Quatrix is restricted to the account during the purchase. You can restrict any user with a smaller storage limit. The account limit is applied if the user has no custom storage limit. Once you've reached the limit, the upload of files will fail. This can be fixed by freeing up the space or increasing the quota. |
-
-
-## Server-side operations |
-
-
-Quatrix supports server-side operations (copy and move). In case of conflict, files are overwritten during server-side operation. |
-
-
-# Sia |
-
-
-Sia (sia.tech) is a decentralized cloud storage platform based on the blockchain technology. With rclone you can use it like any other remote filesystem or mount Sia folders locally. The technology behind it involves a number of new concepts such as Siacoins and Wallet, Blockchain and Consensus, Renting and Hosting, and so on. If you are new to it, you'd better first familiarize yourself using their excellent support documentation. |
-
-
-## Introduction |
-
-
-Before you can use rclone with Sia, you will need to have a running copy of Sia-UI or siad (the Sia daemon) locally on your computer or on local network (e.g. a NAS). Please follow the Get started guide and install one. |
-
-
-rclone interacts with Sia network by talking to the Sia daemon via HTTP API which is usually available on port 9980. By default you will run the daemon locally on the same computer so it's safe to leave the API password blank (the API URL will be http://127.0.0.1:9980 making external access impossible). |
-
-
-However, if you want to access Sia daemon running on another node, for example due to memory constraints or because you want to share single daemon between several rclone and Sia-UI instances, you'll need to make a few more provisions: - Ensure you have Sia daemon installed directly or in a docker container because Sia-UI does not support this mode natively. - Run it on externally accessible port, for example provide --api-addr :9980 and --disable-api-security arguments on the daemon command line. - Enforce API password for the siad daemon via environment variable SIA_API_PASSWORD or text file named apipassword in the daemon directory. - Set rclone backend option api_password taking it from above locations. |
-
-
-Notes: 1. If your wallet is locked, rclone cannot unlock it automatically. You should either unlock it in advance by using Sia-UI or via command line siac wallet unlock . Alternatively you can make siad unlock your wallet automatically upon startup by running it with environment variable SIA_WALLET_PASSWORD . 2. If siad cannot find the SIA_API_PASSWORD variable or the apipassword file in the SIA_DIR directory, it will generate a random password and store in the text file named apipassword under YOUR_HOME/.sia/ directory on Unix or C:\Users\YOUR_HOME\AppData\Local\Sia\apipassword on Windows. Remember this when you configure password in rclone. 3. The only way to use siad without API password is to run it on localhost with command line argument --authorize-api=false , but this is insecure and strongly discouraged. |
-
-
-## Configuration |
-
-
-Here is an example of how to make a sia remote called mySia . First, run: |
-
-
-rclone config |
-
-
-This will guide you through an interactive setup process: |
-
-
-``` No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> mySia Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value ... 29 / Sia Decentralized Cloud "sia" ... Storage> sia Sia daemon API URL, like http://sia.daemon.host:9980. Note that siad must run with --disable-api-security to open API port for other hosts (not recommended). Keep default if Sia daemon runs on localhost. Enter a string value. Press Enter for the default ("http://127.0.0.1:9980"). api_url> http://127.0.0.1:9980 Sia Daemon API Password. Can be found in the apipassword file located in HOME/.sia/ or in the daemon directory. y) Yes type in my own password g) Generate random password n) No leave this optional password blank (default) y/g/n> y Enter the password: password: Confirm the password: password: Edit advanced config? y) Yes n) No (default) y/n> n |
-
-
-
-[mySia] type = sia api_url = http://127.0.0.1:9980 api_password = *** ENCRYPTED *** -------------------- y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
-
-Once configured, you can then use `rclone` like this:
-
-- List directories in top level of your Sia storage
-
-rclone lsd mySia:
-
-- List all the files in your Sia storage
-
-rclone ls mySia:
-
-- Upload a local directory to the Sia directory called _backup_
-
-rclone copy /home/source mySia:backup
-
-
-### Standard options
-
-Here are the Standard options specific to sia (Sia Decentralized Cloud).
-
-#### --sia-api-url
+e) Edit existing remote
+n) New remote
+d) Delete remote
+r) Rename remote
+c) Copy remote
+s) Set configuration password
+q) Quit config
+e/n/d/r/c/s/q> e
+Choose a number from below, or type in an existing value
+ 1 > remote
+remote> remote
+--------------------
+[remote]
+type = quatrix
+host = some_host.quatrix.it
+api_key = your_api_key
+--------------------
+Edit remote
+Option api_key.
+API key for accessing Quatrix account
+Enter a string value. Press Enter for the default (your_api_key)
+api_key>
+Option host.
+Host name of Quatrix account
+Enter a string value. Press Enter for the default (some_host.quatrix.it).
+--------------------
+[remote]
+type = quatrix
+host = some_host.quatrix.it
+api_key = your_api_key
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Modification times and hashes
+Quatrix allows modification times to be set on objects accurate to 1 microsecond. These will be used to detect whether objects need syncing or not.
+Quatrix does not support hashes, so you cannot use the --checksum
flag.
+Restricted filename characters
+File names in Quatrix are case sensitive and have limitations like the maximum length of a filename is 255, and the minimum length is 1. A file name cannot be equal to .
or ..
nor contain /
, \
or non-printable ascii.
+Transfers
+For files above 50 MiB rclone will use a chunked transfer. Rclone will upload up to --transfers
chunks at the same time (shared among all multipart uploads). Chunks are buffered in memory, and the minimal chunk size is 10_000_000 bytes by default, and it can be changed in the advanced configuration, so increasing --transfers
will increase the memory use. The chunk size has a maximum size limit, which is set to 100_000_000 bytes by default and can be changed in the advanced configuration. The size of the uploaded chunk will dynamically change depending on the upload speed. The total memory use equals the number of transfers multiplied by the minimal chunk size. In case there's free memory allocated for the upload (which equals the difference of maximal_summary_chunk_size
and minimal_chunk_size
* transfers
), the chunk size may increase in case of high upload speed. As well as it can decrease in case of upload speed problems. If no free memory is available, all chunks will equal minimal_chunk_size
.
+Deleting files
+Files you delete with rclone will end up in Trash and be stored there for 30 days. Quatrix also provides an API to permanently delete files and an API to empty the Trash so that you can remove files permanently from your account.
+Standard options
+Here are the Standard options specific to quatrix (Quatrix by Maytech).
+--quatrix-api-key
+API key for accessing Quatrix account
+Properties:
+
+- Config: api_key
+- Env Var: RCLONE_QUATRIX_API_KEY
+- Type: string
+- Required: true
+
+--quatrix-host
+Host name of Quatrix account
+Properties:
+
+- Config: host
+- Env Var: RCLONE_QUATRIX_HOST
+- Type: string
+- Required: true
+
+Advanced options
+Here are the Advanced options specific to quatrix (Quatrix by Maytech).
+--quatrix-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_QUATRIX_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--quatrix-effective-upload-time
+Wanted upload time for one chunk
+Properties:
+
+- Config: effective_upload_time
+- Env Var: RCLONE_QUATRIX_EFFECTIVE_UPLOAD_TIME
+- Type: string
+- Default: "4s"
+
+--quatrix-minimal-chunk-size
+The minimal size for one chunk
+Properties:
+
+- Config: minimal_chunk_size
+- Env Var: RCLONE_QUATRIX_MINIMAL_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 9.537Mi
+
+--quatrix-maximal-summary-chunk-size
+The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size'
+Properties:
+
+- Config: maximal_summary_chunk_size
+- Env Var: RCLONE_QUATRIX_MAXIMAL_SUMMARY_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 95.367Mi
+
+--quatrix-hard-delete
+Delete files permanently rather than putting them into the trash
+Properties:
+
+- Config: hard_delete
+- Env Var: RCLONE_QUATRIX_HARD_DELETE
+- Type: bool
+- Default: false
+
+--quatrix-skip-project-folders
+Skip project folders in operations
+Properties:
+
+- Config: skip_project_folders
+- Env Var: RCLONE_QUATRIX_SKIP_PROJECT_FOLDERS
+- Type: bool
+- Default: false
+
+--quatrix-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_QUATRIX_DESCRIPTION
+- Type: string
+- Required: false
+
+Storage usage
+The storage usage in Quatrix is restricted to the account during the purchase. You can restrict any user with a smaller storage limit. The account limit is applied if the user has no custom storage limit. Once you've reached the limit, the upload of files will fail. This can be fixed by freeing up the space or increasing the quota.
+Server-side operations
+Quatrix supports server-side operations (copy and move). In case of conflict, files are overwritten during server-side operation.
+Sia
+Sia (sia.tech) is a decentralized cloud storage platform based on the blockchain technology. With rclone you can use it like any other remote filesystem or mount Sia folders locally. The technology behind it involves a number of new concepts such as Siacoins and Wallet, Blockchain and Consensus, Renting and Hosting, and so on. If you are new to it, you'd better first familiarize yourself using their excellent support documentation.
+Introduction
+Before you can use rclone with Sia, you will need to have a running copy of Sia-UI
or siad
(the Sia daemon) locally on your computer or on local network (e.g. a NAS). Please follow the Get started guide and install one.
+rclone interacts with Sia network by talking to the Sia daemon via HTTP API which is usually available on port 9980. By default you will run the daemon locally on the same computer so it's safe to leave the API password blank (the API URL will be http://127.0.0.1:9980
making external access impossible).
+However, if you want to access Sia daemon running on another node, for example due to memory constraints or because you want to share single daemon between several rclone and Sia-UI instances, you'll need to make a few more provisions: - Ensure you have Sia daemon installed directly or in a docker container because Sia-UI does not support this mode natively. - Run it on externally accessible port, for example provide --api-addr :9980
and --disable-api-security
arguments on the daemon command line. - Enforce API password for the siad
daemon via environment variable SIA_API_PASSWORD
or text file named apipassword
in the daemon directory. - Set rclone backend option api_password
taking it from above locations.
+Notes: 1. If your wallet is locked, rclone cannot unlock it automatically. You should either unlock it in advance by using Sia-UI or via command line siac wallet unlock
. Alternatively you can make siad
unlock your wallet automatically upon startup by running it with environment variable SIA_WALLET_PASSWORD
. 2. If siad
cannot find the SIA_API_PASSWORD
variable or the apipassword
file in the SIA_DIR
directory, it will generate a random password and store in the text file named apipassword
under YOUR_HOME/.sia/
directory on Unix or C:\Users\YOUR_HOME\AppData\Local\Sia\apipassword
on Windows. Remember this when you configure password in rclone. 3. The only way to use siad
without API password is to run it on localhost with command line argument --authorize-api=false
, but this is insecure and strongly discouraged.
+Configuration
+Here is an example of how to make a sia
remote called mySia
. First, run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> mySia
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+...
+29 / Sia Decentralized Cloud
+ \ "sia"
+...
+Storage> sia
Sia daemon API URL, like http://sia.daemon.host:9980.
-
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
Keep default if Sia daemon runs on localhost.
-
-Properties:
-
-- Config: api_url
-- Env Var: RCLONE_SIA_API_URL
-- Type: string
-- Default: "http://127.0.0.1:9980"
-
-#### --sia-api-password
-
+Enter a string value. Press Enter for the default ("http://127.0.0.1:9980").
+api_url> http://127.0.0.1:9980
Sia Daemon API Password.
-
Can be found in the apipassword file located in HOME/.sia/ or in the daemon directory.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: api_password
-- Env Var: RCLONE_SIA_API_PASSWORD
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to sia (Sia Decentralized Cloud).
-
-#### --sia-user-agent
-
-Siad User Agent
-
-Sia daemon requires the 'Sia-Agent' user agent by default for security
-
-Properties:
-
-- Config: user_agent
-- Env Var: RCLONE_SIA_USER_AGENT
-- Type: string
-- Default: "Sia-Agent"
-
-#### --sia-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_SIA_ENCODING
-- Type: Encoding
-- Default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot
-
-
-
-## Limitations
-
-- Modification times not supported
-- Checksums not supported
-- `rclone about` not supported
-- rclone can work only with _Siad_ or _Sia-UI_ at the moment,
- the **SkyNet daemon is not supported yet.**
-- Sia does not allow control characters or symbols like question and pound
- signs in file names. rclone will transparently [encode](https://rclone.org/overview/#encoding)
- them for you, but you'd better be aware
-
-# Swift
-
-Swift refers to [OpenStack Object Storage](https://docs.openstack.org/swift/latest/).
-Commercial implementations of that being:
-
- * [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
- * [Memset Memstore](https://www.memset.com/cloud/storage/)
- * [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
- * [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
- * [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
- * [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
-
-Paths are specified as `remote:container` (or `remote:` for the `lsd`
-command.) You may put subdirectories in too, e.g. `remote:container/path/to/dir`.
-
-## Configuration
-
-Here is an example of making a swift configuration. First run
-
- rclone config
-
-This will guide you through an interactive setup process.
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH) "swift" [snip] Storage> swift Get swift credentials from environment variables in standard OpenStack form. Choose a number from below, or type in your own value 1 / Enter swift credentials in the next step "false" 2 / Get swift credentials from environment vars. Leave other fields blank if using this. "true" env_auth> true User name to log in (OS_USERNAME). user> API key or password (OS_PASSWORD). key> Authentication URL for server (OS_AUTH_URL). Choose a number from below, or type in your own value 1 / Rackspace US "https://auth.api.rackspacecloud.com/v1.0" 2 / Rackspace UK "https://lon.auth.api.rackspacecloud.com/v1.0" 3 / Rackspace v2 "https://identity.api.rackspacecloud.com/v2.0" 4 / Memset Memstore UK "https://auth.storage.memset.com/v1.0" 5 / Memset Memstore UK v2 "https://auth.storage.memset.com/v2.0" 6 / OVH "https://auth.cloud.ovh.net/v3" 7 / Blomp Cloud Storage "https://authenticate.ain.net" auth> User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID). user_id> User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) domain> Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME) tenant> Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID) tenant_id> Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) tenant_domain> Region name - optional (OS_REGION_NAME) region> Storage URL - optional (OS_STORAGE_URL) storage_url> Auth Token from alternate authentication - optional (OS_AUTH_TOKEN) auth_token> AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION) auth_version> Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) Choose a number from below, or type in your own value 1 / Public (default, choose this if not sure) "public" 2 / Internal (use internal service net) "internal" 3 / Admin "admin" endpoint_type> Remote config -------------------- [test] env_auth = true user = key = auth = user_id = domain = tenant = tenant_id = tenant_domain = region = storage_url = auth_token = auth_version = endpoint_type = -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-This remote is called `remote` and can now be used like this
-
-See all containers
-
- rclone lsd remote:
-
-Make a new container
-
- rclone mkdir remote:container
-
-List the contents of a container
-
- rclone ls remote:container
-
-Sync `/home/local/directory` to the remote container, deleting any
-excess files in the container.
-
- rclone sync --interactive /home/local/directory remote:container
-
-### Configuration from an OpenStack credentials file
-
-An OpenStack credentials file typically looks something something
-like this (without the comments)
-
-export OS_AUTH_URL=https://a.provider.net/v2.0 export OS_TENANT_ID=ffffffffffffffffffffffffffffffff export OS_TENANT_NAME="1234567890123456" export OS_USERNAME="123abc567xy" echo "Please enter your OpenStack Password: " read -sr OS_PASSWORD_INPUT export OS_PASSWORD=$OS_PASSWORD_INPUT export OS_REGION_NAME="SBG1" if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi
-
-The config file needs to look something like this where `$OS_USERNAME`
-represents the value of the `OS_USERNAME` variable - `123abc567xy` in
-the example above.
-
-[remote] type = swift user = $OS_USERNAME key = $OS_PASSWORD auth = $OS_AUTH_URL tenant = $OS_TENANT_NAME
-
-Note that you may (or may not) need to set `region` too - try without first.
-
-### Configuration from the environment
-
-If you prefer you can configure rclone to use swift using a standard
-set of OpenStack environment variables.
-
-When you run through the config, make sure you choose `true` for
-`env_auth` and leave everything else blank.
-
-rclone will then set any empty config parameters from the environment
-using standard OpenStack environment variables. There is [a list of
-the
-variables](https://godoc.org/github.com/ncw/swift#Connection.ApplyEnvironment)
-in the docs for the swift library.
-
-### Using an alternate authentication method
-
-If your OpenStack installation uses a non-standard authentication method
-that might not be yet supported by rclone or the underlying swift library,
-you can authenticate externally (e.g. calling manually the `openstack`
-commands to get a token). Then, you just need to pass the two
-configuration variables ``auth_token`` and ``storage_url``.
-If they are both provided, the other variables are ignored. rclone will
-not try to authenticate but instead assume it is already authenticated
-and use these two variables to access the OpenStack installation.
-
-#### Using rclone without a config file
-
-You can use rclone with swift without a config file, if desired, like
-this:
-
-source openstack-credentials-file export RCLONE_CONFIG_MYREMOTE_TYPE=swift export RCLONE_CONFIG_MYREMOTE_ENV_AUTH=true rclone lsd myremote:
-
-### --fast-list
-
-This remote supports `--fast-list` which allows you to use fewer
-transactions in exchange for more memory. See the [rclone
-docs](https://rclone.org/docs/#fast-list) for more details.
-
-### --update and --use-server-modtime
-
-As noted below, the modified time is stored on metadata on the object. It is
-used by default for all operations that require checking the time a file was
-last updated. It allows rclone to treat the remote more like a true filesystem,
-but it is inefficient because it requires an extra API call to retrieve the
-metadata.
-
-For many operations, the time the object was last uploaded to the remote is
-sufficient to determine if it is "dirty". By using `--update` along with
-`--use-server-modtime`, you can avoid the extra API call and simply upload
-files whose local modtime is newer than the time it was last uploaded.
-
-### Modification times and hashes
-
-The modified time is stored as metadata on the object as
-`X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
-ns.
-
-This is a de facto standard (used in the official python-swiftclient
-amongst others) for storing the modification time for an object.
-
-The MD5 hash algorithm is supported.
-
-### Restricted filename characters
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| NUL | 0x00 | ␀ |
-| / | 0x2F | / |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-
-### Standard options
-
-Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
-
-#### --swift-env-auth
-
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank (default)
+y/g/n> y
+Enter the password:
+password:
+Confirm the password:
+password:
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n> n
+--------------------
+[mySia]
+type = sia
+api_url = http://127.0.0.1:9980
+api_password = *** ENCRYPTED ***
+--------------------
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Once configured, you can then use rclone
like this:
+
+- List directories in top level of your Sia storage
+
+rclone lsd mySia:
+
+- List all the files in your Sia storage
+
+rclone ls mySia:
+
+- Upload a local directory to the Sia directory called backup
+
+rclone copy /home/source mySia:backup
+Standard options
+Here are the Standard options specific to sia (Sia Decentralized Cloud).
+--sia-api-url
+Sia daemon API URL, like http://sia.daemon.host:9980.
+Note that siad must run with --disable-api-security to open API port for other hosts (not recommended). Keep default if Sia daemon runs on localhost.
+Properties:
+
+- Config: api_url
+- Env Var: RCLONE_SIA_API_URL
+- Type: string
+- Default: "http://127.0.0.1:9980"
+
+--sia-api-password
+Sia Daemon API Password.
+Can be found in the apipassword file located in HOME/.sia/ or in the daemon directory.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: api_password
+- Env Var: RCLONE_SIA_API_PASSWORD
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to sia (Sia Decentralized Cloud).
+--sia-user-agent
+Siad User Agent
+Sia daemon requires the 'Sia-Agent' user agent by default for security
+Properties:
+
+- Config: user_agent
+- Env Var: RCLONE_SIA_USER_AGENT
+- Type: string
+- Default: "Sia-Agent"
+
+--sia-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_SIA_ENCODING
+- Type: Encoding
+- Default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot
+
+--sia-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SIA_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+
+- Modification times not supported
+- Checksums not supported
+rclone about
not supported
+- rclone can work only with Siad or Sia-UI at the moment, the SkyNet daemon is not supported yet.
+- Sia does not allow control characters or symbols like question and pound signs in file names. rclone will transparently encode them for you, but you'd better be aware
+
+Swift
+Swift refers to OpenStack Object Storage. Commercial implementations of that being:
+
+Paths are specified as remote:container
(or remote:
for the lsd
command.) You may put subdirectories in too, e.g. remote:container/path/to/dir
.
+Configuration
+Here is an example of making a swift configuration. First run
+rclone config
+This will guide you through an interactive setup process.
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
+ \ "swift"
+[snip]
+Storage> swift
Get swift credentials from environment variables in standard OpenStack form.
-
-Properties:
-
-- Config: env_auth
-- Env Var: RCLONE_SWIFT_ENV_AUTH
-- Type: bool
-- Default: false
-- Examples:
- - "false"
- - Enter swift credentials in the next step.
- - "true"
- - Get swift credentials from environment vars.
- - Leave other fields blank if using this.
-
-#### --swift-user
-
+Choose a number from below, or type in your own value
+ 1 / Enter swift credentials in the next step
+ \ "false"
+ 2 / Get swift credentials from environment vars. Leave other fields blank if using this.
+ \ "true"
+env_auth> true
User name to log in (OS_USERNAME).
-
-Properties:
-
-- Config: user
-- Env Var: RCLONE_SWIFT_USER
-- Type: string
-- Required: false
-
-#### --swift-key
-
+user>
API key or password (OS_PASSWORD).
-
-Properties:
-
-- Config: key
-- Env Var: RCLONE_SWIFT_KEY
-- Type: string
-- Required: false
-
-#### --swift-auth
-
+key>
Authentication URL for server (OS_AUTH_URL).
-
-Properties:
-
-- Config: auth
-- Env Var: RCLONE_SWIFT_AUTH
-- Type: string
-- Required: false
-- Examples:
- - "https://auth.api.rackspacecloud.com/v1.0"
- - Rackspace US
- - "https://lon.auth.api.rackspacecloud.com/v1.0"
- - Rackspace UK
- - "https://identity.api.rackspacecloud.com/v2.0"
- - Rackspace v2
- - "https://auth.storage.memset.com/v1.0"
- - Memset Memstore UK
- - "https://auth.storage.memset.com/v2.0"
- - Memset Memstore UK v2
- - "https://auth.cloud.ovh.net/v3"
- - OVH
- - "https://authenticate.ain.net"
- - Blomp Cloud Storage
-
-#### --swift-user-id
-
+Choose a number from below, or type in your own value
+ 1 / Rackspace US
+ \ "https://auth.api.rackspacecloud.com/v1.0"
+ 2 / Rackspace UK
+ \ "https://lon.auth.api.rackspacecloud.com/v1.0"
+ 3 / Rackspace v2
+ \ "https://identity.api.rackspacecloud.com/v2.0"
+ 4 / Memset Memstore UK
+ \ "https://auth.storage.memset.com/v1.0"
+ 5 / Memset Memstore UK v2
+ \ "https://auth.storage.memset.com/v2.0"
+ 6 / OVH
+ \ "https://auth.cloud.ovh.net/v3"
+ 7 / Blomp Cloud Storage
+ \ "https://authenticate.ain.net"
+auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
-
-Properties:
-
-- Config: user_id
-- Env Var: RCLONE_SWIFT_USER_ID
-- Type: string
-- Required: false
-
-#### --swift-domain
-
+user_id>
User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
-
-Properties:
-
-- Config: domain
-- Env Var: RCLONE_SWIFT_DOMAIN
-- Type: string
-- Required: false
-
-#### --swift-tenant
-
-Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).
-
-Properties:
-
-- Config: tenant
-- Env Var: RCLONE_SWIFT_TENANT
-- Type: string
-- Required: false
-
-#### --swift-tenant-id
-
-Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).
-
-Properties:
-
-- Config: tenant_id
-- Env Var: RCLONE_SWIFT_TENANT_ID
-- Type: string
-- Required: false
-
-#### --swift-tenant-domain
-
-Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).
-
-Properties:
-
-- Config: tenant_domain
-- Env Var: RCLONE_SWIFT_TENANT_DOMAIN
-- Type: string
-- Required: false
-
-#### --swift-region
-
-Region name - optional (OS_REGION_NAME).
-
-Properties:
-
-- Config: region
-- Env Var: RCLONE_SWIFT_REGION
-- Type: string
-- Required: false
-
-#### --swift-storage-url
-
-Storage URL - optional (OS_STORAGE_URL).
-
-Properties:
-
-- Config: storage_url
-- Env Var: RCLONE_SWIFT_STORAGE_URL
-- Type: string
-- Required: false
-
-#### --swift-auth-token
-
-Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).
-
-Properties:
-
-- Config: auth_token
-- Env Var: RCLONE_SWIFT_AUTH_TOKEN
-- Type: string
-- Required: false
-
-#### --swift-application-credential-id
-
-Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).
-
-Properties:
-
-- Config: application_credential_id
-- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_ID
-- Type: string
-- Required: false
-
-#### --swift-application-credential-name
-
-Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).
-
-Properties:
-
-- Config: application_credential_name
-- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_NAME
-- Type: string
-- Required: false
-
-#### --swift-application-credential-secret
-
-Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).
-
-Properties:
-
-- Config: application_credential_secret
-- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_SECRET
-- Type: string
-- Required: false
-
-#### --swift-auth-version
-
-AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).
-
-Properties:
-
-- Config: auth_version
-- Env Var: RCLONE_SWIFT_AUTH_VERSION
-- Type: int
-- Default: 0
-
-#### --swift-endpoint-type
-
-Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).
-
-Properties:
-
-- Config: endpoint_type
-- Env Var: RCLONE_SWIFT_ENDPOINT_TYPE
-- Type: string
-- Default: "public"
-- Examples:
- - "public"
- - Public (default, choose this if not sure)
- - "internal"
- - Internal (use internal service net)
- - "admin"
- - Admin
-
-#### --swift-storage-policy
-
-The storage policy to use when creating a new container.
-
-This applies the specified storage policy when creating a new
-container. The policy cannot be changed afterwards. The allowed
-configuration values and their meaning depend on your Swift storage
-provider.
-
-Properties:
-
-- Config: storage_policy
-- Env Var: RCLONE_SWIFT_STORAGE_POLICY
-- Type: string
-- Required: false
-- Examples:
- - ""
- - Default
- - "pcs"
- - OVH Public Cloud Storage
- - "pca"
- - OVH Public Cloud Archive
-
-### Advanced options
-
-Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
-
-#### --swift-leave-parts-on-error
-
-If true avoid calling abort upload on a failure.
-
-It should be set to true for resuming uploads across different sessions.
-
-Properties:
-
-- Config: leave_parts_on_error
-- Env Var: RCLONE_SWIFT_LEAVE_PARTS_ON_ERROR
-- Type: bool
-- Default: false
-
-#### --swift-chunk-size
-
-Above this size files will be chunked into a _segments container.
-
-Above this size files will be chunked into a _segments container. The
-default for this is 5 GiB which is its maximum value.
-
-Properties:
-
-- Config: chunk_size
-- Env Var: RCLONE_SWIFT_CHUNK_SIZE
-- Type: SizeSuffix
-- Default: 5Gi
-
-#### --swift-no-chunk
-
-Don't chunk files during streaming upload.
-
-When doing streaming uploads (e.g. using rcat or mount) setting this
-flag will cause the swift backend to not upload chunked files.
-
-This will limit the maximum upload size to 5 GiB. However non chunked
-files are easier to deal with and have an MD5SUM.
-
-Rclone will still chunk files bigger than chunk_size when doing normal
-copy operations.
-
-Properties:
-
-- Config: no_chunk
-- Env Var: RCLONE_SWIFT_NO_CHUNK
-- Type: bool
-- Default: false
-
-#### --swift-no-large-objects
-
-Disable support for static and dynamic large objects
-
-Swift cannot transparently store files bigger than 5 GiB. There are
-two schemes for doing that, static or dynamic large objects, and the
-API does not allow rclone to determine whether a file is a static or
-dynamic large object without doing a HEAD on the object. Since these
-need to be treated differently, this means rclone has to issue HEAD
-requests for objects for example when reading checksums.
-
-When `no_large_objects` is set, rclone will assume that there are no
-static or dynamic large objects stored. This means it can stop doing
-the extra HEAD calls which in turn increases performance greatly
-especially when doing a swift to swift transfer with `--checksum` set.
-
-Setting this option implies `no_chunk` and also that no files will be
-uploaded in chunks, so files bigger than 5 GiB will just fail on
-upload.
-
-If you set this option and there *are* static or dynamic large objects,
-then this will give incorrect hashes for them. Downloads will succeed,
-but other operations such as Remove and Copy will fail.
-
-
-Properties:
-
-- Config: no_large_objects
-- Env Var: RCLONE_SWIFT_NO_LARGE_OBJECTS
-- Type: bool
-- Default: false
-
-#### --swift-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_SWIFT_ENCODING
-- Type: Encoding
-- Default: Slash,InvalidUtf8
-
-
-
-## Limitations
-
-The Swift API doesn't return a correct MD5SUM for segmented files
-(Dynamic or Static Large Objects) so rclone won't check or use the
-MD5SUM for these.
-
-## Troubleshooting
-
-### Rclone gives Failed to create file system for "remote:": Bad Request
-
-Due to an oddity of the underlying swift library, it gives a "Bad
-Request" error rather than a more sensible error when the
-authentication fails for Swift.
-
-So this most likely means your username / password is wrong. You can
-investigate further with the `--dump-bodies` flag.
-
-This may also be caused by specifying the region when you shouldn't
-have (e.g. OVH).
-
-### Rclone gives Failed to create file system: Response didn't have storage url and auth token
-
-This is most likely caused by forgetting to specify your tenant when
-setting up a swift remote.
-
-## OVH Cloud Archive
-
-To use rclone with OVH cloud archive, first use `rclone config` to set up a `swift` backend with OVH, choosing `pca` as the `storage_policy`.
-
-### Uploading Objects
-
-Uploading objects to OVH cloud archive is no different to object storage, you just simply run the command you like (move, copy or sync) to upload the objects. Once uploaded the objects will show in a "Frozen" state within the OVH control panel.
-
-### Retrieving Objects
-
-To retrieve objects use `rclone copy` as normal. If the objects are in a frozen state then rclone will ask for them all to be unfrozen and it will wait at the end of the output with a message like the following:
-
-`2019/03/23 13:06:33 NOTICE: Received retry after error - sleeping until 2019-03-23T13:16:33.481657164+01:00 (9m59.99985121s)`
-
-Rclone will wait for the time specified then retry the copy.
-
-# pCloud
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configuration
-
-The initial setup for pCloud involves getting a token from pCloud which you
-need to do in your browser. `rclone config` walks you through it.
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / Pcloud "pcloud" [snip] Storage> pcloud Pcloud App Client Id - leave blank normally. client_id> Pcloud App Client Secret - leave blank normally. client_secret> Remote config Use web browser to automatically authenticate rclone with remote? * Say Y if the machine running rclone has a web browser you can use * Say N if running rclone on a (remote) machine without web browser access If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... Got code -------------------- [remote] client_id = client_secret = token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
-machine with no Internet browser available.
-
-Note that rclone runs a webserver on your local machine to collect the
-token as returned from pCloud. This only runs from the moment it opens
-your browser to the moment you get back the verification code. This
-is on `http://127.0.0.1:53682/` and this it may require you to unblock
-it temporarily if you are running a host firewall.
-
-Once configured you can then use `rclone` like this,
-
-List directories in top level of your pCloud
-
- rclone lsd remote:
-
-List all the files in your pCloud
-
- rclone ls remote:
-
-To copy a local directory to a pCloud directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-pCloud allows modification times to be set on objects accurate to 1
-second. These will be used to detect whether objects need syncing or
-not. In order to set a Modification time pCloud requires the object
-be re-uploaded.
-
-pCloud supports MD5 and SHA1 hashes in the US region, and SHA1 and SHA256
-hashes in the EU region, so you can use the `--checksum` flag.
-
-### Restricted filename characters
-
-In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
-the following characters are also replaced:
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| \ | 0x5C | \ |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-### Deleting files
-
-Deleted files will be moved to the trash. Your subscription level
-will determine how long items stay in the trash. `rclone cleanup` can
-be used to empty the trash.
-
-### Emptying the trash
-
-Due to an API limitation, the `rclone cleanup` command will only work if you
-set your username and password in the advanced options for this backend.
-Since we generally want to avoid storing user passwords in the rclone config
-file, we advise you to only set this up if you need the `rclone cleanup` command to work.
-
-### Root folder ID
-
-You can set the `root_folder_id` for rclone. This is the directory
-(identified by its `Folder ID`) that rclone considers to be the root
-of your pCloud drive.
-
-Normally you will leave this blank and rclone will determine the
-correct root to use itself.
-
-However you can set this to restrict rclone to a specific folder
-hierarchy.
-
-In order to do this you will have to find the `Folder ID` of the
-directory you wish rclone to display. This will be the `folder` field
-of the URL when you open the relevant folder in the pCloud web
-interface.
-
-So if the folder you want rclone to use has a URL which looks like
-`https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid`
-in the browser, then you use `5xxxxxxxx8` as
-the `root_folder_id` in the config.
-
-
-### Standard options
-
-Here are the Standard options specific to pcloud (Pcloud).
-
-#### --pcloud-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_PCLOUD_CLIENT_ID
-- Type: string
-- Required: false
-
-#### --pcloud-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_PCLOUD_CLIENT_SECRET
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to pcloud (Pcloud).
-
-#### --pcloud-token
-
-OAuth Access Token as a JSON blob.
-
-Properties:
-
-- Config: token
-- Env Var: RCLONE_PCLOUD_TOKEN
-- Type: string
-- Required: false
-
-#### --pcloud-auth-url
-
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_PCLOUD_AUTH_URL
-- Type: string
-- Required: false
-
-#### --pcloud-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_PCLOUD_TOKEN_URL
-- Type: string
-- Required: false
-
-#### --pcloud-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PCLOUD_ENCODING
-- Type: Encoding
-- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
-
-#### --pcloud-root-folder-id
-
-Fill in for rclone to use a non root folder as its starting point.
-
-Properties:
-
-- Config: root_folder_id
-- Env Var: RCLONE_PCLOUD_ROOT_FOLDER_ID
-- Type: string
-- Default: "d0"
-
-#### --pcloud-hostname
-
-Hostname to connect to.
-
-This is normally set when rclone initially does the oauth connection,
-however you will need to set it by hand if you are using remote config
-with rclone authorize.
-
-
-Properties:
-
-- Config: hostname
-- Env Var: RCLONE_PCLOUD_HOSTNAME
-- Type: string
-- Default: "api.pcloud.com"
-- Examples:
- - "api.pcloud.com"
- - Original/US region
- - "eapi.pcloud.com"
- - EU region
-
-#### --pcloud-username
-
-Your pcloud username.
-
-This is only required when you want to use the cleanup command. Due to a bug
-in the pcloud API the required API does not support OAuth authentication so
-we have to rely on user password authentication for it.
-
-Properties:
-
-- Config: username
-- Env Var: RCLONE_PCLOUD_USERNAME
-- Type: string
-- Required: false
-
-#### --pcloud-password
-
-Your pcloud password.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: password
-- Env Var: RCLONE_PCLOUD_PASSWORD
-- Type: string
-- Required: false
-
-
-
-# PikPak
-
-PikPak is [a private cloud drive](https://mypikpak.com/).
-
-Paths are specified as `remote:path`, and may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configuration
-
-Here is an example of making a remote for PikPak.
-
-First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n
-Enter name for new remote. name> remote
-Option Storage. Type of storage to configure. Choose a number from below, or type in your own value. XX / PikPak (pikpak) Storage> XX
-Option user. Pikpak username. Enter a value. user> USERNAME
-Option pass. Pikpak password. Choose an alternative below. y) Yes, type in my own password g) Generate random password y/g> y Enter the password: password: Confirm the password: password:
-Edit advanced config? y) Yes n) No (default) y/n>
-Configuration complete. Options: - type: pikpak - user: USERNAME - pass: *** ENCRYPTED *** - token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"} Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
-
-### Modification times and hashes
-
-PikPak keeps modification times on objects, and updates them when uploading objects,
-but it does not support changing only the modification time
-
-The MD5 hash algorithm is supported.
-
-
-### Standard options
-
-Here are the Standard options specific to pikpak (PikPak).
-
-#### --pikpak-user
-
-Pikpak username.
-
-Properties:
-
-- Config: user
-- Env Var: RCLONE_PIKPAK_USER
-- Type: string
-- Required: true
-
-#### --pikpak-pass
-
-Pikpak password.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: pass
-- Env Var: RCLONE_PIKPAK_PASS
-- Type: string
-- Required: true
-
-### Advanced options
-
-Here are the Advanced options specific to pikpak (PikPak).
-
-#### --pikpak-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_PIKPAK_CLIENT_ID
-- Type: string
-- Required: false
-
-#### --pikpak-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
-- Type: string
-- Required: false
-
-#### --pikpak-token
-
-OAuth Access Token as a JSON blob.
-
-Properties:
-
-- Config: token
-- Env Var: RCLONE_PIKPAK_TOKEN
-- Type: string
-- Required: false
-
-#### --pikpak-auth-url
-
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_PIKPAK_AUTH_URL
-- Type: string
-- Required: false
-
-#### --pikpak-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_PIKPAK_TOKEN_URL
-- Type: string
-- Required: false
-
-#### --pikpak-root-folder-id
-
-ID of the root folder.
-Leave blank normally.
-
-Fill in for rclone to use a non root folder as its starting point.
-
-
-Properties:
-
-- Config: root_folder_id
-- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
-- Type: string
-- Required: false
-
-#### --pikpak-use-trash
-
-Send files to the trash instead of deleting permanently.
-
-Defaults to true, namely sending files to the trash.
-Use `--pikpak-use-trash=false` to delete files permanently instead.
-
-Properties:
-
-- Config: use_trash
-- Env Var: RCLONE_PIKPAK_USE_TRASH
-- Type: bool
-- Default: true
-
-#### --pikpak-trashed-only
-
-Only show files that are in the trash.
-
-This will show trashed files in their original directory structure.
-
-Properties:
-
-- Config: trashed_only
-- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
-- Type: bool
-- Default: false
-
-#### --pikpak-hash-memory-limit
-
-Files bigger than this will be cached on disk to calculate hash if required.
-
-Properties:
-
-- Config: hash_memory_limit
-- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
-- Type: SizeSuffix
-- Default: 10Mi
-
-#### --pikpak-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PIKPAK_ENCODING
-- Type: Encoding
-- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
-
-## Backend commands
-
-Here are the commands specific to the pikpak backend.
-
-Run them with
-
- rclone backend COMMAND remote:
-
-The help below will explain what arguments each command takes.
-
-See the [backend](https://rclone.org/commands/rclone_backend/) command for more
-info on how to pass options and arguments.
-
-These can be run on a running backend using the rc command
-[backend/command](https://rclone.org/rc/#backend-command).
-
-### addurl
-
-Add offline download task for url
-
- rclone backend addurl remote: [options] [<arguments>+]
-
-This command adds offline download task for url.
-
-Usage:
-
- rclone backend addurl pikpak:dirpath url
-
-Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
-download will fallback to default 'My Pack' folder.
-
-
-### decompress
-
-Request decompress of a file/files in a folder
-
- rclone backend decompress remote: [options] [<arguments>+]
-
-This command requests decompress of file/files in a folder.
-
-Usage:
-
- rclone backend decompress pikpak:dirpath {filename} -o password=password
- rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
-
-An optional argument 'filename' can be specified for a file located in
-'pikpak:dirpath'. You may want to pass '-o password=password' for a
-password-protected files. Also, pass '-o delete-src-file' to delete
-source files after decompression finished.
-
-Result:
-
- {
- "Decompressed": 17,
- "SourceDeleted": 0,
- "Errors": 0
- }
-
-
-
-
-## Limitations
-
-### Hashes may be empty
-
-PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
-
-### Deleted files still visible with trashed-only
-
-Deleted files will still be visible with `--pikpak-trashed-only` even after the
-trash emptied. This goes away after few days.
-
-# premiumize.me
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configuration
-
-The initial setup for [premiumize.me](https://premiumize.me/) involves getting a token from premiumize.me which you
-need to do in your browser. `rclone config` walks you through it.
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] XX / premiumize.me "premiumizeme" [snip] Storage> premiumizeme ** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ **
-Remote config Use web browser to automatically authenticate rclone with remote? * Say Y if the machine running rclone has a web browser you can use * Say N if running rclone on a (remote) machine without web browser access If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... Got code -------------------- [remote] type = premiumizeme token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d>
-
-See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
-machine with no Internet browser available.
-
-Note that rclone runs a webserver on your local machine to collect the
-token as returned from premiumize.me. This only runs from the moment it opens
-your browser to the moment you get back the verification code. This
-is on `http://127.0.0.1:53682/` and this it may require you to unblock
-it temporarily if you are running a host firewall.
-
-Once configured you can then use `rclone` like this,
-
-List directories in top level of your premiumize.me
-
- rclone lsd remote:
-
-List all the files in your premiumize.me
-
- rclone ls remote:
-
-To copy a local directory to an premiumize.me directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-premiumize.me does not support modification times or hashes, therefore
-syncing will default to `--size-only` checking. Note that using
-`--update` will work.
-
-### Restricted filename characters
-
-In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
-the following characters are also replaced:
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| \ | 0x5C | \ |
-| " | 0x22 | " |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-
-### Standard options
-
-Here are the Standard options specific to premiumizeme (premiumize.me).
-
-#### --premiumizeme-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_PREMIUMIZEME_CLIENT_ID
-- Type: string
-- Required: false
-
-#### --premiumizeme-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_PREMIUMIZEME_CLIENT_SECRET
-- Type: string
-- Required: false
-
-#### --premiumizeme-api-key
-
-API Key.
-
-This is not normally used - use oauth instead.
-
-
-Properties:
-
-- Config: api_key
-- Env Var: RCLONE_PREMIUMIZEME_API_KEY
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to premiumizeme (premiumize.me).
-
-#### --premiumizeme-token
-
-OAuth Access Token as a JSON blob.
-
-Properties:
-
-- Config: token
-- Env Var: RCLONE_PREMIUMIZEME_TOKEN
-- Type: string
-- Required: false
-
-#### --premiumizeme-auth-url
-
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_PREMIUMIZEME_AUTH_URL
-- Type: string
-- Required: false
-
-#### --premiumizeme-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_PREMIUMIZEME_TOKEN_URL
-- Type: string
-- Required: false
-
-#### --premiumizeme-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PREMIUMIZEME_ENCODING
-- Type: Encoding
-- Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
-
-
-
-## Limitations
-
-Note that premiumize.me is case insensitive so you can't have a file called
-"Hello.doc" and one called "hello.doc".
-
-premiumize.me file names can't have the `\` or `"` characters in.
-rclone maps these to and from an identical looking unicode equivalents
-`\` and `"`
-
-premiumize.me only supports filenames up to 255 characters in length.
-
-# Proton Drive
-
-[Proton Drive](https://proton.me/drive) is an end-to-end encrypted Swiss vault
- for your files that protects your data.
-
-This is an rclone backend for Proton Drive which supports the file transfer
-features of Proton Drive using the same client-side encryption.
-
-Due to the fact that Proton Drive doesn't publish its API documentation, this
-backend is implemented with best efforts by reading the open-sourced client
-source code and observing the Proton Drive traffic in the browser.
-
-**NB** This backend is currently in Beta. It is believed to be correct
-and all the integration tests pass. However the Proton Drive protocol
-has evolved over time there may be accounts it is not compatible
-with. Please [post on the rclone forum](https://forum.rclone.org/) if
-you find an incompatibility.
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configurations
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / Proton Drive "Proton Drive" [snip] Storage> protondrive User name user> you@protonmail.com Password. y) Yes type in my own password g) Generate random password n) No leave this optional password blank y/g/n> y Enter the password: password: Confirm the password: password: Option 2fa. 2FA code (if the account requires one) Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config -------------------- [remote] type = protondrive user = you@protonmail.com pass = *** ENCRYPTED *** -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-**NOTE:** The Proton Drive encryption keys need to have been already generated
-after a regular login via the browser, otherwise attempting to use the
-credentials in `rclone` will fail.
-
-Once configured you can then use `rclone` like this,
-
-List directories in top level of your Proton Drive
-
- rclone lsd remote:
-
-List all the files in your Proton Drive
-
- rclone ls remote:
-
-To copy a local directory to an Proton Drive directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-Proton Drive Bridge does not support updating modification times yet.
-
-The SHA1 hash algorithm is supported.
-
-### Restricted filename characters
-
-Invalid UTF-8 bytes will be [replaced](https://rclone.org/overview/#invalid-utf8), also left and
-right spaces will be removed ([code reference](https://github.com/ProtonMail/WebClients/blob/b4eba99d241af4fdae06ff7138bd651a40ef5d3c/applications/drive/src/app/store/_links/validation.ts#L51))
-
-### Duplicated files
-
-Proton Drive can not have two files with exactly the same name and path. If the
-conflict occurs, depending on the advanced config, the file might or might not
-be overwritten.
-
-### [Mailbox password](https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password)
-
-Please set your mailbox password in the advanced config section.
-
-### Caching
-
-The cache is currently built for the case when the rclone is the only instance
-performing operations to the mount point. The event system, which is the proton
-API system that provides visibility of what has changed on the drive, is yet
-to be implemented, so updates from other clients won’t be reflected in the
-cache. Thus, if there are concurrent clients accessing the same mount point,
-then we might have a problem with caching the stale data.
-
-
-### Standard options
-
-Here are the Standard options specific to protondrive (Proton Drive).
-
-#### --protondrive-username
-
-The username of your proton account
-
-Properties:
-
-- Config: username
-- Env Var: RCLONE_PROTONDRIVE_USERNAME
-- Type: string
-- Required: true
-
-#### --protondrive-password
-
-The password of your proton account.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: password
-- Env Var: RCLONE_PROTONDRIVE_PASSWORD
-- Type: string
-- Required: true
-
-#### --protondrive-2fa
-
-The 2FA code
-
-The value can also be provided with --protondrive-2fa=000000
-
-The 2FA code of your proton drive account if the account is set up with
-two-factor authentication
-
-Properties:
-
-- Config: 2fa
-- Env Var: RCLONE_PROTONDRIVE_2FA
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to protondrive (Proton Drive).
-
-#### --protondrive-mailbox-password
-
-The mailbox password of your two-password proton account.
-
-For more information regarding the mailbox password, please check the
-following official knowledge base article:
-https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
-
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: mailbox_password
-- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
-- Type: string
-- Required: false
-
-#### --protondrive-client-uid
-
-Client uid key (internal use only)
-
-Properties:
-
-- Config: client_uid
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
-- Type: string
-- Required: false
-
-#### --protondrive-client-access-token
-
-Client access token key (internal use only)
-
-Properties:
-
-- Config: client_access_token
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
-- Type: string
-- Required: false
-
-#### --protondrive-client-refresh-token
-
-Client refresh token key (internal use only)
-
-Properties:
-
-- Config: client_refresh_token
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
-- Type: string
-- Required: false
-
-#### --protondrive-client-salted-key-pass
-
-Client salted key pass key (internal use only)
-
-Properties:
-
-- Config: client_salted_key_pass
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
-- Type: string
-- Required: false
-
-#### --protondrive-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PROTONDRIVE_ENCODING
-- Type: Encoding
-- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
-
-#### --protondrive-original-file-size
-
-Return the file size before encryption
-
-The size of the encrypted file will be different from (bigger than) the
-original file size. Unless there is a reason to return the file size
-after encryption is performed, otherwise, set this option to true, as
-features like Open() which will need to be supplied with original content
-size, will fail to operate properly
-
-Properties:
-
-- Config: original_file_size
-- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
-- Type: bool
-- Default: true
-
-#### --protondrive-app-version
-
-The app version string
-
-The app version string indicates the client that is currently performing
-the API request. This information is required and will be sent with every
-API request.
-
-Properties:
-
-- Config: app_version
-- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
-- Type: string
-- Default: "macos-drive@1.0.0-alpha.1+rclone"
-
-#### --protondrive-replace-existing-draft
-
-Create a new revision when filename conflict is detected
-
-When a file upload is cancelled or failed before completion, a draft will be
-created and the subsequent upload of the same file to the same location will be
-reported as a conflict.
-
-The value can also be set by --protondrive-replace-existing-draft=true
-
-If the option is set to true, the draft will be replaced and then the upload
-operation will restart. If there are other clients also uploading at the same
-file location at the same time, the behavior is currently unknown. Need to set
-to true for integration tests.
-If the option is set to false, an error "a draft exist - usually this means a
-file is being uploaded at another client, or, there was a failed upload attempt"
-will be returned, and no upload will happen.
-
-Properties:
-
-- Config: replace_existing_draft
-- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
-- Type: bool
-- Default: false
-
-#### --protondrive-enable-caching
-
-Caches the files and folders metadata to reduce API calls
-
-Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
-as the current implementation doesn't update or clear the cache when there are
-external changes.
-
-The files and folders on ProtonDrive are represented as links with keyrings,
-which can be cached to improve performance and be friendly to the API server.
-
-The cache is currently built for the case when the rclone is the only instance
-performing operations to the mount point. The event system, which is the proton
-API system that provides visibility of what has changed on the drive, is yet
-to be implemented, so updates from other clients won’t be reflected in the
-cache. Thus, if there are concurrent clients accessing the same mount point,
-then we might have a problem with caching the stale data.
-
-Properties:
-
-- Config: enable_caching
-- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
-- Type: bool
-- Default: true
-
-
-
-## Limitations
-
-This backend uses the
-[Proton-API-Bridge](https://github.com/henrybear327/Proton-API-Bridge), which
-is based on [go-proton-api](https://github.com/henrybear327/go-proton-api), a
-fork of the [official repo](https://github.com/ProtonMail/go-proton-api).
-
-There is no official API documentation available from Proton Drive. But, thanks
-to Proton open sourcing [proton-go-api](https://github.com/ProtonMail/go-proton-api)
-and the web, iOS, and Android client codebases, we don't need to completely
-reverse engineer the APIs by observing the web client traffic!
-
-[proton-go-api](https://github.com/ProtonMail/go-proton-api) provides the basic
-building blocks of API calls and error handling, such as 429 exponential
-back-off, but it is pretty much just a barebone interface to the Proton API.
-For example, the encryption and decryption of the Proton Drive file are not
-provided in this library.
-
-The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on
-top of this quickly. This codebase handles the intricate tasks before and after
-calling Proton APIs, particularly the complex encryption scheme, allowing
-developers to implement features for other software on top of this codebase.
-There are likely quite a few errors in this library, as there isn't official
-documentation available.
-
-# put.io
-
-Paths are specified as `remote:path`
-
-put.io paths may be as deep as required, e.g.
-`remote:directory/subdirectory`.
-
-## Configuration
-
-The initial setup for put.io involves getting a token from put.io
-which you need to do in your browser. `rclone config` walks you
-through it.
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> putio Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] XX / Put.io "putio" [snip] Storage> putio ** See help for putio backend at: https://rclone.org/putio/ **
-Remote config Use web browser to automatically authenticate rclone with remote? * Say Y if the machine running rclone has a web browser you can use * Say N if running rclone on a (remote) machine without web browser access If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... Got code -------------------- [putio] type = putio token = {"access_token":"XXXXXXXX","expiry":"0001-01-01T00:00:00Z"} -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y Current remotes:
-Name Type ==== ==== putio putio
-
-- Edit existing remote
-- New remote
-- Delete remote
-- Rename remote
-- Copy remote
-- Set configuration password
-- Quit config e/n/d/r/c/s/q> q
-
-
-See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
-machine with no Internet browser available.
-
-Note that rclone runs a webserver on your local machine to collect the
-token as returned from put.io if using web browser to automatically
-authenticate. This only
-runs from the moment it opens your browser to the moment you get back
-the verification code. This is on `http://127.0.0.1:53682/` and this
-it may require you to unblock it temporarily if you are running a host
-firewall, or use manual mode.
-
-You can then use it like this,
-
-List directories in top level of your put.io
-
- rclone lsd remote:
-
-List all the files in your put.io
-
- rclone ls remote:
-
-To copy a local directory to a put.io directory called backup
-
- rclone copy /home/source remote:backup
-
-### Restricted filename characters
-
-In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
-the following characters are also replaced:
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| \ | 0x5C | \ |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-
-### Standard options
-
-Here are the Standard options specific to putio (Put.io).
-
-#### --putio-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_PUTIO_CLIENT_ID
-- Type: string
-- Required: false
-
-#### --putio-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_PUTIO_CLIENT_SECRET
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to putio (Put.io).
-
-#### --putio-token
-
-OAuth Access Token as a JSON blob.
-
-Properties:
-
-- Config: token
-- Env Var: RCLONE_PUTIO_TOKEN
-- Type: string
-- Required: false
-
-#### --putio-auth-url
-
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_PUTIO_AUTH_URL
-- Type: string
-- Required: false
-
-#### --putio-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_PUTIO_TOKEN_URL
-- Type: string
-- Required: false
-
-#### --putio-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PUTIO_ENCODING
-- Type: Encoding
-- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
-
-
-
-## Limitations
-
-put.io has rate limiting. When you hit a limit, rclone automatically
-retries after waiting the amount of time requested by the server.
-
-If you want to avoid ever hitting these limits, you may use the
-`--tpslimit` flag with a low number. Note that the imposed limits
-may be different for different operations, and may change over time.
-
-# Proton Drive
-
-[Proton Drive](https://proton.me/drive) is an end-to-end encrypted Swiss vault
- for your files that protects your data.
-
-This is an rclone backend for Proton Drive which supports the file transfer
-features of Proton Drive using the same client-side encryption.
-
-Due to the fact that Proton Drive doesn't publish its API documentation, this
-backend is implemented with best efforts by reading the open-sourced client
-source code and observing the Proton Drive traffic in the browser.
-
-**NB** This backend is currently in Beta. It is believed to be correct
-and all the integration tests pass. However the Proton Drive protocol
-has evolved over time there may be accounts it is not compatible
-with. Please [post on the rclone forum](https://forum.rclone.org/) if
-you find an incompatibility.
-
-Paths are specified as `remote:path`
-
-Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
-## Configurations
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / Proton Drive "Proton Drive" [snip] Storage> protondrive User name user> you@protonmail.com Password. y) Yes type in my own password g) Generate random password n) No leave this optional password blank y/g/n> y Enter the password: password: Confirm the password: password: Option 2fa. 2FA code (if the account requires one) Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config -------------------- [remote] type = protondrive user = you@protonmail.com pass = *** ENCRYPTED *** -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-**NOTE:** The Proton Drive encryption keys need to have been already generated
-after a regular login via the browser, otherwise attempting to use the
-credentials in `rclone` will fail.
-
-Once configured you can then use `rclone` like this,
-
-List directories in top level of your Proton Drive
-
- rclone lsd remote:
-
-List all the files in your Proton Drive
-
- rclone ls remote:
-
-To copy a local directory to an Proton Drive directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-Proton Drive Bridge does not support updating modification times yet.
-
-The SHA1 hash algorithm is supported.
-
-### Restricted filename characters
-
-Invalid UTF-8 bytes will be [replaced](https://rclone.org/overview/#invalid-utf8), also left and
-right spaces will be removed ([code reference](https://github.com/ProtonMail/WebClients/blob/b4eba99d241af4fdae06ff7138bd651a40ef5d3c/applications/drive/src/app/store/_links/validation.ts#L51))
-
-### Duplicated files
-
-Proton Drive can not have two files with exactly the same name and path. If the
-conflict occurs, depending on the advanced config, the file might or might not
-be overwritten.
-
-### [Mailbox password](https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password)
-
-Please set your mailbox password in the advanced config section.
-
-### Caching
-
-The cache is currently built for the case when the rclone is the only instance
-performing operations to the mount point. The event system, which is the proton
-API system that provides visibility of what has changed on the drive, is yet
-to be implemented, so updates from other clients won’t be reflected in the
-cache. Thus, if there are concurrent clients accessing the same mount point,
-then we might have a problem with caching the stale data.
-
-
-### Standard options
-
-Here are the Standard options specific to protondrive (Proton Drive).
-
-#### --protondrive-username
-
-The username of your proton account
-
-Properties:
-
-- Config: username
-- Env Var: RCLONE_PROTONDRIVE_USERNAME
-- Type: string
-- Required: true
-
-#### --protondrive-password
-
-The password of your proton account.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: password
-- Env Var: RCLONE_PROTONDRIVE_PASSWORD
-- Type: string
-- Required: true
-
-#### --protondrive-2fa
-
-The 2FA code
-
-The value can also be provided with --protondrive-2fa=000000
-
-The 2FA code of your proton drive account if the account is set up with
-two-factor authentication
-
-Properties:
-
-- Config: 2fa
-- Env Var: RCLONE_PROTONDRIVE_2FA
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to protondrive (Proton Drive).
-
-#### --protondrive-mailbox-password
-
-The mailbox password of your two-password proton account.
-
-For more information regarding the mailbox password, please check the
-following official knowledge base article:
-https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
-
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: mailbox_password
-- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
-- Type: string
-- Required: false
-
-#### --protondrive-client-uid
-
-Client uid key (internal use only)
-
-Properties:
-
-- Config: client_uid
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
-- Type: string
-- Required: false
-
-#### --protondrive-client-access-token
-
-Client access token key (internal use only)
-
-Properties:
-
-- Config: client_access_token
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
-- Type: string
-- Required: false
-
-#### --protondrive-client-refresh-token
-
-Client refresh token key (internal use only)
-
-Properties:
-
-- Config: client_refresh_token
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
-- Type: string
-- Required: false
-
-#### --protondrive-client-salted-key-pass
-
-Client salted key pass key (internal use only)
-
-Properties:
-
-- Config: client_salted_key_pass
-- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
-- Type: string
-- Required: false
-
-#### --protondrive-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_PROTONDRIVE_ENCODING
-- Type: Encoding
-- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
-
-#### --protondrive-original-file-size
-
-Return the file size before encryption
-
-The size of the encrypted file will be different from (bigger than) the
-original file size. Unless there is a reason to return the file size
-after encryption is performed, otherwise, set this option to true, as
-features like Open() which will need to be supplied with original content
-size, will fail to operate properly
-
-Properties:
-
-- Config: original_file_size
-- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
-- Type: bool
-- Default: true
-
-#### --protondrive-app-version
-
-The app version string
-
-The app version string indicates the client that is currently performing
-the API request. This information is required and will be sent with every
-API request.
-
-Properties:
-
-- Config: app_version
-- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
-- Type: string
-- Default: "macos-drive@1.0.0-alpha.1+rclone"
-
-#### --protondrive-replace-existing-draft
-
-Create a new revision when filename conflict is detected
-
-When a file upload is cancelled or failed before completion, a draft will be
-created and the subsequent upload of the same file to the same location will be
-reported as a conflict.
-
-The value can also be set by --protondrive-replace-existing-draft=true
-
-If the option is set to true, the draft will be replaced and then the upload
-operation will restart. If there are other clients also uploading at the same
-file location at the same time, the behavior is currently unknown. Need to set
-to true for integration tests.
-If the option is set to false, an error "a draft exist - usually this means a
-file is being uploaded at another client, or, there was a failed upload attempt"
-will be returned, and no upload will happen.
-
-Properties:
-
-- Config: replace_existing_draft
-- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
-- Type: bool
-- Default: false
-
-#### --protondrive-enable-caching
-
-Caches the files and folders metadata to reduce API calls
-
-Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
-as the current implementation doesn't update or clear the cache when there are
-external changes.
-
-The files and folders on ProtonDrive are represented as links with keyrings,
-which can be cached to improve performance and be friendly to the API server.
-
-The cache is currently built for the case when the rclone is the only instance
-performing operations to the mount point. The event system, which is the proton
-API system that provides visibility of what has changed on the drive, is yet
-to be implemented, so updates from other clients won’t be reflected in the
-cache. Thus, if there are concurrent clients accessing the same mount point,
-then we might have a problem with caching the stale data.
-
-Properties:
-
-- Config: enable_caching
-- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
-- Type: bool
-- Default: true
-
-
-
-## Limitations
-
-This backend uses the
-[Proton-API-Bridge](https://github.com/henrybear327/Proton-API-Bridge), which
-is based on [go-proton-api](https://github.com/henrybear327/go-proton-api), a
-fork of the [official repo](https://github.com/ProtonMail/go-proton-api).
-
-There is no official API documentation available from Proton Drive. But, thanks
-to Proton open sourcing [proton-go-api](https://github.com/ProtonMail/go-proton-api)
-and the web, iOS, and Android client codebases, we don't need to completely
-reverse engineer the APIs by observing the web client traffic!
-
-[proton-go-api](https://github.com/ProtonMail/go-proton-api) provides the basic
-building blocks of API calls and error handling, such as 429 exponential
-back-off, but it is pretty much just a barebone interface to the Proton API.
-For example, the encryption and decryption of the Proton Drive file are not
-provided in this library.
-
-The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on
-top of this quickly. This codebase handles the intricate tasks before and after
-calling Proton APIs, particularly the complex encryption scheme, allowing
-developers to implement features for other software on top of this codebase.
-There are likely quite a few errors in this library, as there isn't official
-documentation available.
-
-# Seafile
-
-This is a backend for the [Seafile](https://www.seafile.com/) storage service:
-- It works with both the free community edition or the professional edition.
-- Seafile versions 6.x, 7.x, 8.x and 9.x are all supported.
-- Encrypted libraries are also supported.
-- It supports 2FA enabled users
-- Using a Library API Token is **not** supported
-
-## Configuration
-
-There are two distinct modes you can setup your remote:
-- you point your remote to the **root of the server**, meaning you don't specify a library during the configuration:
-Paths are specified as `remote:library`. You may put subdirectories in too, e.g. `remote:library/path/to/dir`.
-- you point your remote to a specific library during the configuration:
-Paths are specified as `remote:path/to/dir`. **This is the recommended mode when using encrypted libraries**. (_This mode is possibly slightly faster than the root mode_)
-
-### Configuration in root mode
-
-Here is an example of making a seafile configuration for a user with **no** two-factor authentication. First run
-
- rclone config
-
-This will guide you through an interactive setup process. To authenticate
-you will need the URL of your server, your email (or username) and your password.
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> seafile Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] XX / Seafile "seafile" [snip] Storage> seafile ** See help for seafile backend at: https://rclone.org/seafile/ **
-URL of seafile host to connect to Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value 1 / Connect to cloud.seafile.com "https://cloud.seafile.com/" url> http://my.seafile.server/ User name (usually email address) Enter a string value. Press Enter for the default (""). user> me@example.com Password y) Yes type in my own password g) Generate random password n) No leave this optional password blank (default) y/g> y Enter the password: password: Confirm the password: password: Two-factor authentication ('true' if the account has 2FA enabled) Enter a boolean value (true or false). Press Enter for the default ("false"). 2fa> false Name of the library. Leave blank to access all non-encrypted libraries. Enter a string value. Press Enter for the default (""). library> Library password (for encrypted libraries only). Leave blank if you pass it through the command line. y) Yes type in my own password g) Generate random password n) No leave this optional password blank (default) y/g/n> n Edit advanced config? (y/n) y) Yes n) No (default) y/n> n Remote config Two-factor authentication is not enabled on this account. -------------------- [seafile] type = seafile url = http://my.seafile.server/ user = me@example.com pass = *** ENCRYPTED *** 2fa = false -------------------- y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
-
-This remote is called `seafile`. It's pointing to the root of your seafile server and can now be used like this:
-
-See all libraries
-
- rclone lsd seafile:
-
-Create a new library
-
- rclone mkdir seafile:library
-
-List the contents of a library
-
- rclone ls seafile:library
-
-Sync `/home/local/directory` to the remote library, deleting any
-excess files in the library.
-
- rclone sync --interactive /home/local/directory seafile:library
-
-### Configuration in library mode
-
-Here's an example of a configuration in library mode with a user that has the two-factor authentication enabled. Your 2FA code will be asked at the end of the configuration, and will attempt to authenticate you:
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> seafile Type of storage to configure. Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value [snip] XX / Seafile "seafile" [snip] Storage> seafile ** See help for seafile backend at: https://rclone.org/seafile/ **
-URL of seafile host to connect to Enter a string value. Press Enter for the default (""). Choose a number from below, or type in your own value 1 / Connect to cloud.seafile.com "https://cloud.seafile.com/" url> http://my.seafile.server/ User name (usually email address) Enter a string value. Press Enter for the default (""). user> me@example.com Password y) Yes type in my own password g) Generate random password n) No leave this optional password blank (default) y/g> y Enter the password: password: Confirm the password: password: Two-factor authentication ('true' if the account has 2FA enabled) Enter a boolean value (true or false). Press Enter for the default ("false"). 2fa> true Name of the library. Leave blank to access all non-encrypted libraries. Enter a string value. Press Enter for the default (""). library> My Library Library password (for encrypted libraries only). Leave blank if you pass it through the command line. y) Yes type in my own password g) Generate random password n) No leave this optional password blank (default) y/g/n> n Edit advanced config? (y/n) y) Yes n) No (default) y/n> n Remote config Two-factor authentication: please enter your 2FA code 2fa code> 123456 Authenticating... Success! -------------------- [seafile] type = seafile url = http://my.seafile.server/ user = me@example.com pass = 2fa = true library = My Library -------------------- y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
-
-You'll notice your password is blank in the configuration. It's because we only need the password to authenticate you once.
-
-You specified `My Library` during the configuration. The root of the remote is pointing at the
-root of the library `My Library`:
-
-See all files in the library:
-
- rclone lsd seafile:
-
-Create a new directory inside the library
-
- rclone mkdir seafile:directory
-
-List the contents of a directory
-
- rclone ls seafile:directory
-
-Sync `/home/local/directory` to the remote library, deleting any
-excess files in the library.
-
- rclone sync --interactive /home/local/directory seafile:
-
-
-### --fast-list
-
-Seafile version 7+ supports `--fast-list` which allows you to use fewer
-transactions in exchange for more memory. See the [rclone
-docs](https://rclone.org/docs/#fast-list) for more details.
-Please note this is not supported on seafile server version 6.x
-
-
-### Restricted filename characters
-
-In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
-the following characters are also replaced:
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| / | 0x2F | / |
-| " | 0x22 | " |
-| \ | 0x5C | \ |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-### Seafile and rclone link
-
-Rclone supports generating share links for non-encrypted libraries only.
-They can either be for a file or a directory:
-
-rclone link seafile:seafile-tutorial.doc http://my.seafile.server/f/fdcd8a2f93f84b8b90f4/
-
-or if run on a directory you will get:
-
-rclone link seafile:dir http://my.seafile.server/d/9ea2455f6f55478bbb0d/
-
-Please note a share link is unique for each file or directory. If you run a link command on a file/dir
-that has already been shared, you will get the exact same link.
-
-### Compatibility
-
-It has been actively developed using the [seafile docker image](https://github.com/haiwen/seafile-docker) of these versions:
-- 6.3.4 community edition
-- 7.0.5 community edition
-- 7.1.3 community edition
-- 9.0.10 community edition
-
-Versions below 6.0 are not supported.
-Versions between 6.0 and 6.3 haven't been tested and might not work properly.
-
-Each new version of `rclone` is automatically tested against the [latest docker image](https://hub.docker.com/r/seafileltd/seafile-mc/) of the seafile community server.
-
-
-### Standard options
-
-Here are the Standard options specific to seafile (seafile).
-
-#### --seafile-url
-
-URL of seafile host to connect to.
-
-Properties:
-
-- Config: url
-- Env Var: RCLONE_SEAFILE_URL
-- Type: string
-- Required: true
-- Examples:
- - "https://cloud.seafile.com/"
- - Connect to cloud.seafile.com.
-
-#### --seafile-user
-
-User name (usually email address).
-
-Properties:
-
-- Config: user
-- Env Var: RCLONE_SEAFILE_USER
-- Type: string
-- Required: true
-
-#### --seafile-pass
-
-Password.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: pass
-- Env Var: RCLONE_SEAFILE_PASS
-- Type: string
-- Required: false
-
-#### --seafile-2fa
-
-Two-factor authentication ('true' if the account has 2FA enabled).
-
-Properties:
-
-- Config: 2fa
-- Env Var: RCLONE_SEAFILE_2FA
-- Type: bool
-- Default: false
-
-#### --seafile-library
-
-Name of the library.
-
-Leave blank to access all non-encrypted libraries.
-
-Properties:
-
-- Config: library
-- Env Var: RCLONE_SEAFILE_LIBRARY
-- Type: string
-- Required: false
-
-#### --seafile-library-key
-
-Library password (for encrypted libraries only).
-
-Leave blank if you pass it through the command line.
-
-**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
-Properties:
-
-- Config: library_key
-- Env Var: RCLONE_SEAFILE_LIBRARY_KEY
-- Type: string
-- Required: false
-
-#### --seafile-auth-token
-
-Authentication token.
-
-Properties:
-
-- Config: auth_token
-- Env Var: RCLONE_SEAFILE_AUTH_TOKEN
-- Type: string
-- Required: false
-
-### Advanced options
-
-Here are the Advanced options specific to seafile (seafile).
-
-#### --seafile-create-library
-
-Should rclone create a library if it doesn't exist.
-
-Properties:
-
-- Config: create_library
-- Env Var: RCLONE_SEAFILE_CREATE_LIBRARY
-- Type: bool
-- Default: false
-
-#### --seafile-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_SEAFILE_ENCODING
-- Type: Encoding
-- Default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8
-
-
-
-# SFTP
-
-SFTP is the [Secure (or SSH) File Transfer
-Protocol](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol).
-
-The SFTP backend can be used with a number of different providers:
-
-
-- Hetzner Storage Box
-- rsync.net
-
-
-SFTP runs over SSH v2 and is installed as standard with most modern
-SSH installations.
-
-Paths are specified as `remote:path`. If the path does not begin with
-a `/` it is relative to the home directory of the user. An empty path
-`remote:` refers to the user's home directory. For example, `rclone lsd remote:`
-would list the home directory of the user configured in the rclone remote config
-(`i.e /home/sftpuser`). However, `rclone lsd remote:/` would list the root
-directory for remote machine (i.e. `/`)
-
-Note that some SFTP servers will need the leading / - Synology is a
-good example of this. rsync.net and Hetzner, on the other hand, requires users to
-OMIT the leading /.
-
-Note that by default rclone will try to execute shell commands on
-the server, see [shell access considerations](#shell-access-considerations).
-
-## Configuration
-
-Here is an example of making an SFTP configuration. First run
-
- rclone config
-
-This will guide you through an interactive setup process.
-
-No remotes found, make a new one? n) New remote s) Set configuration password q) Quit config n/s/q> n name> remote Type of storage to configure. Choose a number from below, or type in your own value [snip] XX / SSH/SFTP "sftp" [snip] Storage> sftp SSH host to connect to Choose a number from below, or type in your own value 1 / Connect to example.com "example.com" host> example.com SSH username Enter a string value. Press Enter for the default ("$USER"). user> sftpuser SSH port number Enter a signed integer. Press Enter for the default (22). port> SSH password, leave blank to use ssh-agent. y) Yes type in my own password g) Generate random password n) No leave this optional password blank y/g/n> n Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config -------------------- [remote] host = example.com user = sftpuser port = pass = key_file = -------------------- y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
-
-This remote is called `remote` and can now be used like this:
-
-See all directories in the home directory
-
- rclone lsd remote:
-
-See all directories in the root directory
-
- rclone lsd remote:/
-
-Make a new directory
-
- rclone mkdir remote:path/to/directory
-
-List the contents of a directory
-
- rclone ls remote:path/to/directory
-
-Sync `/home/local/directory` to the remote directory, deleting any
-excess files in the directory.
-
- rclone sync --interactive /home/local/directory remote:directory
-
-Mount the remote path `/srv/www-data/` to the local path
-`/mnt/www-data`
-
- rclone mount remote:/srv/www-data/ /mnt/www-data
-
-### SSH Authentication
-
-The SFTP remote supports three authentication methods:
-
- * Password
- * Key file, including certificate signed keys
- * ssh-agent
-
-Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
-Only unencrypted OpenSSH or PEM encrypted files are supported.
-
-The key file can be specified in either an external file (key_file) or contained within the
-rclone config file (key_pem). If using key_pem in the config file, the entry should be on a
-single line with new line ('\n' or '\r\n') separating lines. i.e.
-
- key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
-
-This will generate it correctly for key_pem for use in the config:
-
- awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
-
-If you don't specify `pass`, `key_file`, or `key_pem` or `ask_password` then
-rclone will attempt to contact an ssh-agent. You can also specify `key_use_agent`
-to force the usage of an ssh-agent. In this case `key_file` or `key_pem` can
-also be specified to force the usage of a specific key in the ssh-agent.
-
-Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
-
-If you set the `ask_password` option, rclone will prompt for a password when
-needed and no password has been configured.
-
-#### Certificate-signed keys
-
-With traditional key-based authentication, you configure your private key only,
-and the public key built into it will be used during the authentication process.
-
-If you have a certificate you may use it to sign your public key, creating a
-separate SSH user certificate that should be used instead of the plain public key
-extracted from the private key. Then you must provide the path to the
-user certificate public key file in `pubkey_file`.
-
-Note: This is not the traditional public key paired with your private key,
-typically saved as `/home/$USER/.ssh/id_rsa.pub`. Setting this path in
-`pubkey_file` will not work.
-
-Example:
-
-[remote] type = sftp host = example.com user = sftpuser key_file = ~/id_rsa pubkey_file = ~/id_rsa-cert.pub
-
-If you concatenate a cert with a private key then you can specify the
-merged file in both places.
-
-Note: the cert must come first in the file. e.g.
-
-```
-cat id_rsa-cert.pub id_rsa > merged_key
-```
-
-### Host key validation
-
-By default rclone will not check the server's host key for validation. This
-can allow an attacker to replace a server with their own and if you use
-password authentication then this can lead to that password being exposed.
-
-Host key matching, using standard `known_hosts` files can be turned on by
-enabling the `known_hosts_file` option. This can point to the file maintained
-by `OpenSSH` or can point to a unique file.
-
-e.g. using the OpenSSH `known_hosts` file:
-
-```
+domain>
+Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+tenant>
+Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+tenant_id>
+Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+tenant_domain>
+Region name - optional (OS_REGION_NAME)
+region>
+Storage URL - optional (OS_STORAGE_URL)
+storage_url>
+Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+auth_token>
+AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+auth_version>
+Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)
+Choose a number from below, or type in your own value
+ 1 / Public (default, choose this if not sure)
+ \ "public"
+ 2 / Internal (use internal service net)
+ \ "internal"
+ 3 / Admin
+ \ "admin"
+endpoint_type>
+Remote config
+--------------------
+[test]
+env_auth = true
+user =
+key =
+auth =
+user_id =
+domain =
+tenant =
+tenant_id =
+tenant_domain =
+region =
+storage_url =
+auth_token =
+auth_version =
+endpoint_type =
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+This remote is called remote
and can now be used like this
+See all containers
+rclone lsd remote:
+Make a new container
+rclone mkdir remote:container
+List the contents of a container
+rclone ls remote:container
+Sync /home/local/directory
to the remote container, deleting any excess files in the container.
+rclone sync --interactive /home/local/directory remote:container
+Configuration from an OpenStack credentials file
+An OpenStack credentials file typically looks something something like this (without the comments)
+export OS_AUTH_URL=https://a.provider.net/v2.0
+export OS_TENANT_ID=ffffffffffffffffffffffffffffffff
+export OS_TENANT_NAME="1234567890123456"
+export OS_USERNAME="123abc567xy"
+echo "Please enter your OpenStack Password: "
+read -sr OS_PASSWORD_INPUT
+export OS_PASSWORD=$OS_PASSWORD_INPUT
+export OS_REGION_NAME="SBG1"
+if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi
+The config file needs to look something like this where $OS_USERNAME
represents the value of the OS_USERNAME
variable - 123abc567xy
in the example above.
+[remote]
+type = swift
+user = $OS_USERNAME
+key = $OS_PASSWORD
+auth = $OS_AUTH_URL
+tenant = $OS_TENANT_NAME
+Note that you may (or may not) need to set region
too - try without first.
+Configuration from the environment
+If you prefer you can configure rclone to use swift using a standard set of OpenStack environment variables.
+When you run through the config, make sure you choose true
for env_auth
and leave everything else blank.
+rclone will then set any empty config parameters from the environment using standard OpenStack environment variables. There is a list of the variables in the docs for the swift library.
+Using an alternate authentication method
+If your OpenStack installation uses a non-standard authentication method that might not be yet supported by rclone or the underlying swift library, you can authenticate externally (e.g. calling manually the openstack
commands to get a token). Then, you just need to pass the two configuration variables auth_token
and storage_url
. If they are both provided, the other variables are ignored. rclone will not try to authenticate but instead assume it is already authenticated and use these two variables to access the OpenStack installation.
+Using rclone without a config file
+You can use rclone with swift without a config file, if desired, like this:
+source openstack-credentials-file
+export RCLONE_CONFIG_MYREMOTE_TYPE=swift
+export RCLONE_CONFIG_MYREMOTE_ENV_AUTH=true
+rclone lsd myremote:
+--fast-list
+This remote supports --fast-list
which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details.
+--update and --use-server-modtime
+As noted below, the modified time is stored on metadata on the object. It is used by default for all operations that require checking the time a file was last updated. It allows rclone to treat the remote more like a true filesystem, but it is inefficient because it requires an extra API call to retrieve the metadata.
+For many operations, the time the object was last uploaded to the remote is sufficient to determine if it is "dirty". By using --update
along with --use-server-modtime
, you can avoid the extra API call and simply upload files whose local modtime is newer than the time it was last uploaded.
+Modification times and hashes
+The modified time is stored as metadata on the object as X-Object-Meta-Mtime
as floating point since the epoch accurate to 1 ns.
+This is a de facto standard (used in the official python-swiftclient amongst others) for storing the modification time for an object.
+The MD5 hash algorithm is supported.
+Restricted filename characters
+
+
+
+
+
+
+NUL |
+0x00 |
+␀ |
+
+
+/ |
+0x2F |
+/ |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Standard options
+Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
+--swift-env-auth
+Get swift credentials from environment variables in standard OpenStack form.
+Properties:
+
+- Config: env_auth
+- Env Var: RCLONE_SWIFT_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+
+- "false"
+
+- Enter swift credentials in the next step.
+
+- "true"
+
+- Get swift credentials from environment vars.
+- Leave other fields blank if using this.
+
+
+
+--swift-user
+User name to log in (OS_USERNAME).
+Properties:
+
+- Config: user
+- Env Var: RCLONE_SWIFT_USER
+- Type: string
+- Required: false
+
+--swift-key
+API key or password (OS_PASSWORD).
+Properties:
+
+- Config: key
+- Env Var: RCLONE_SWIFT_KEY
+- Type: string
+- Required: false
+
+--swift-auth
+Authentication URL for server (OS_AUTH_URL).
+Properties:
+
+- Config: auth
+- Env Var: RCLONE_SWIFT_AUTH
+- Type: string
+- Required: false
+- Examples:
+
+- "https://auth.api.rackspacecloud.com/v1.0"
+
+- "https://lon.auth.api.rackspacecloud.com/v1.0"
+
+- "https://identity.api.rackspacecloud.com/v2.0"
+
+- "https://auth.storage.memset.com/v1.0"
+
+- "https://auth.storage.memset.com/v2.0"
+
+- "https://auth.cloud.ovh.net/v3"
+
+- "https://authenticate.ain.net"
+
+
+
+--swift-user-id
+User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
+Properties:
+
+- Config: user_id
+- Env Var: RCLONE_SWIFT_USER_ID
+- Type: string
+- Required: false
+
+--swift-domain
+User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+Properties:
+
+- Config: domain
+- Env Var: RCLONE_SWIFT_DOMAIN
+- Type: string
+- Required: false
+
+--swift-tenant
+Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).
+Properties:
+
+- Config: tenant
+- Env Var: RCLONE_SWIFT_TENANT
+- Type: string
+- Required: false
+
+--swift-tenant-id
+Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).
+Properties:
+
+- Config: tenant_id
+- Env Var: RCLONE_SWIFT_TENANT_ID
+- Type: string
+- Required: false
+
+--swift-tenant-domain
+Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).
+Properties:
+
+- Config: tenant_domain
+- Env Var: RCLONE_SWIFT_TENANT_DOMAIN
+- Type: string
+- Required: false
+
+--swift-region
+Region name - optional (OS_REGION_NAME).
+Properties:
+
+- Config: region
+- Env Var: RCLONE_SWIFT_REGION
+- Type: string
+- Required: false
+
+--swift-storage-url
+Storage URL - optional (OS_STORAGE_URL).
+Properties:
+
+- Config: storage_url
+- Env Var: RCLONE_SWIFT_STORAGE_URL
+- Type: string
+- Required: false
+
+--swift-auth-token
+Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).
+Properties:
+
+- Config: auth_token
+- Env Var: RCLONE_SWIFT_AUTH_TOKEN
+- Type: string
+- Required: false
+
+--swift-application-credential-id
+Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).
+Properties:
+
+- Config: application_credential_id
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_ID
+- Type: string
+- Required: false
+
+--swift-application-credential-name
+Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).
+Properties:
+
+- Config: application_credential_name
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_NAME
+- Type: string
+- Required: false
+
+--swift-application-credential-secret
+Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).
+Properties:
+
+- Config: application_credential_secret
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_SECRET
+- Type: string
+- Required: false
+
+--swift-auth-version
+AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).
+Properties:
+
+- Config: auth_version
+- Env Var: RCLONE_SWIFT_AUTH_VERSION
+- Type: int
+- Default: 0
+
+--swift-endpoint-type
+Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).
+Properties:
+
+- Config: endpoint_type
+- Env Var: RCLONE_SWIFT_ENDPOINT_TYPE
+- Type: string
+- Default: "public"
+- Examples:
+
+- "public"
+
+- Public (default, choose this if not sure)
+
+- "internal"
+
+- Internal (use internal service net)
+
+- "admin"
+
+
+
+--swift-storage-policy
+The storage policy to use when creating a new container.
+This applies the specified storage policy when creating a new container. The policy cannot be changed afterwards. The allowed configuration values and their meaning depend on your Swift storage provider.
+Properties:
+
+- Config: storage_policy
+- Env Var: RCLONE_SWIFT_STORAGE_POLICY
+- Type: string
+- Required: false
+- Examples:
+
+- ""
+
+- "pcs"
+
+- OVH Public Cloud Storage
+
+- "pca"
+
+- OVH Public Cloud Archive
+
+
+
+Advanced options
+Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
+--swift-leave-parts-on-error
+If true avoid calling abort upload on a failure.
+It should be set to true for resuming uploads across different sessions.
+Properties:
+
+- Config: leave_parts_on_error
+- Env Var: RCLONE_SWIFT_LEAVE_PARTS_ON_ERROR
+- Type: bool
+- Default: false
+
+--swift-chunk-size
+Above this size files will be chunked into a _segments container.
+Above this size files will be chunked into a _segments container. The default for this is 5 GiB which is its maximum value.
+Properties:
+
+- Config: chunk_size
+- Env Var: RCLONE_SWIFT_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 5Gi
+
+--swift-no-chunk
+Don't chunk files during streaming upload.
+When doing streaming uploads (e.g. using rcat or mount) setting this flag will cause the swift backend to not upload chunked files.
+This will limit the maximum upload size to 5 GiB. However non chunked files are easier to deal with and have an MD5SUM.
+Rclone will still chunk files bigger than chunk_size when doing normal copy operations.
+Properties:
+
+- Config: no_chunk
+- Env Var: RCLONE_SWIFT_NO_CHUNK
+- Type: bool
+- Default: false
+
+--swift-no-large-objects
+Disable support for static and dynamic large objects
+Swift cannot transparently store files bigger than 5 GiB. There are two schemes for doing that, static or dynamic large objects, and the API does not allow rclone to determine whether a file is a static or dynamic large object without doing a HEAD on the object. Since these need to be treated differently, this means rclone has to issue HEAD requests for objects for example when reading checksums.
+When no_large_objects
is set, rclone will assume that there are no static or dynamic large objects stored. This means it can stop doing the extra HEAD calls which in turn increases performance greatly especially when doing a swift to swift transfer with --checksum
set.
+Setting this option implies no_chunk
and also that no files will be uploaded in chunks, so files bigger than 5 GiB will just fail on upload.
+If you set this option and there are static or dynamic large objects, then this will give incorrect hashes for them. Downloads will succeed, but other operations such as Remove and Copy will fail.
+Properties:
+
+- Config: no_large_objects
+- Env Var: RCLONE_SWIFT_NO_LARGE_OBJECTS
+- Type: bool
+- Default: false
+
+--swift-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_SWIFT_ENCODING
+- Type: Encoding
+- Default: Slash,InvalidUtf8
+
+--swift-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SWIFT_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+The Swift API doesn't return a correct MD5SUM for segmented files (Dynamic or Static Large Objects) so rclone won't check or use the MD5SUM for these.
+Troubleshooting
+Rclone gives Failed to create file system for "remote:": Bad Request
+Due to an oddity of the underlying swift library, it gives a "Bad Request" error rather than a more sensible error when the authentication fails for Swift.
+So this most likely means your username / password is wrong. You can investigate further with the --dump-bodies
flag.
+This may also be caused by specifying the region when you shouldn't have (e.g. OVH).
+Rclone gives Failed to create file system: Response didn't have storage url and auth token
+This is most likely caused by forgetting to specify your tenant when setting up a swift remote.
+OVH Cloud Archive
+To use rclone with OVH cloud archive, first use rclone config
to set up a swift
backend with OVH, choosing pca
as the storage_policy
.
+Uploading Objects
+Uploading objects to OVH cloud archive is no different to object storage, you just simply run the command you like (move, copy or sync) to upload the objects. Once uploaded the objects will show in a "Frozen" state within the OVH control panel.
+Retrieving Objects
+To retrieve objects use rclone copy
as normal. If the objects are in a frozen state then rclone will ask for them all to be unfrozen and it will wait at the end of the output with a message like the following:
+2019/03/23 13:06:33 NOTICE: Received retry after error - sleeping until 2019-03-23T13:16:33.481657164+01:00 (9m59.99985121s)
+Rclone will wait for the time specified then retry the copy.
+pCloud
+Paths are specified as remote:path
+Paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+The initial setup for pCloud involves getting a token from pCloud which you need to do in your browser. rclone config
walks you through it.
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / Pcloud
+ \ "pcloud"
+[snip]
+Storage> pcloud
+Pcloud App Client Id - leave blank normally.
+client_id>
+Pcloud App Client Secret - leave blank normally.
+client_secret>
+Remote config
+Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+If not sure try Y. If Y failed, try N.
+y) Yes
+n) No
+y/n> y
+If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+Log in and authorize rclone for access
+Waiting for code...
+Got code
+--------------------
[remote]
+client_id =
+client_secret =
+token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+See the remote setup docs for how to set it up on a machine with no Internet browser available.
+Note that rclone runs a webserver on your local machine to collect the token as returned from pCloud. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on http://127.0.0.1:53682/
and this it may require you to unblock it temporarily if you are running a host firewall.
+Once configured you can then use rclone
like this,
+List directories in top level of your pCloud
+rclone lsd remote:
+List all the files in your pCloud
+rclone ls remote:
+To copy a local directory to a pCloud directory called backup
+rclone copy /home/source remote:backup
+Modification times and hashes
+pCloud allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not. In order to set a Modification time pCloud requires the object be re-uploaded.
+pCloud supports MD5 and SHA1 hashes in the US region, and SHA1 and SHA256 hashes in the EU region, so you can use the --checksum
flag.
+Restricted filename characters
+In addition to the default restricted characters set the following characters are also replaced:
+
+
+
+
+
+
+\ |
+0x5C |
+\ |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Deleting files
+Deleted files will be moved to the trash. Your subscription level will determine how long items stay in the trash. rclone cleanup
can be used to empty the trash.
+Emptying the trash
+Due to an API limitation, the rclone cleanup
command will only work if you set your username and password in the advanced options for this backend. Since we generally want to avoid storing user passwords in the rclone config file, we advise you to only set this up if you need the rclone cleanup
command to work.
+Root folder ID
+You can set the root_folder_id
for rclone. This is the directory (identified by its Folder ID
) that rclone considers to be the root of your pCloud drive.
+Normally you will leave this blank and rclone will determine the correct root to use itself.
+However you can set this to restrict rclone to a specific folder hierarchy.
+In order to do this you will have to find the Folder ID
of the directory you wish rclone to display. This will be the folder
field of the URL when you open the relevant folder in the pCloud web interface.
+So if the folder you want rclone to use has a URL which looks like https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid
in the browser, then you use 5xxxxxxxx8
as the root_folder_id
in the config.
+Standard options
+Here are the Standard options specific to pcloud (Pcloud).
+--pcloud-client-id
+OAuth Client Id.
+Leave blank normally.
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PCLOUD_CLIENT_ID
+- Type: string
+- Required: false
+
+--pcloud-client-secret
+OAuth Client Secret.
+Leave blank normally.
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PCLOUD_CLIENT_SECRET
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to pcloud (Pcloud).
+--pcloud-token
+OAuth Access Token as a JSON blob.
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PCLOUD_TOKEN
+- Type: string
+- Required: false
+
+--pcloud-auth-url
+Auth server URL.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PCLOUD_AUTH_URL
+- Type: string
+- Required: false
+
+--pcloud-token-url
+Token server url.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PCLOUD_TOKEN_URL
+- Type: string
+- Required: false
+
+--pcloud-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PCLOUD_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--pcloud-root-folder-id
+Fill in for rclone to use a non root folder as its starting point.
+Properties:
+
+- Config: root_folder_id
+- Env Var: RCLONE_PCLOUD_ROOT_FOLDER_ID
+- Type: string
+- Default: "d0"
+
+--pcloud-hostname
+Hostname to connect to.
+This is normally set when rclone initially does the oauth connection, however you will need to set it by hand if you are using remote config with rclone authorize.
+Properties:
+
+- Config: hostname
+- Env Var: RCLONE_PCLOUD_HOSTNAME
+- Type: string
+- Default: "api.pcloud.com"
+- Examples:
+
+- "api.pcloud.com"
+
+- "eapi.pcloud.com"
+
+
+
+--pcloud-username
+Your pcloud username.
+This is only required when you want to use the cleanup command. Due to a bug in the pcloud API the required API does not support OAuth authentication so we have to rely on user password authentication for it.
+Properties:
+
+- Config: username
+- Env Var: RCLONE_PCLOUD_USERNAME
+- Type: string
+- Required: false
+
+--pcloud-password
+Your pcloud password.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: password
+- Env Var: RCLONE_PCLOUD_PASSWORD
+- Type: string
+- Required: false
+
+--pcloud-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PCLOUD_DESCRIPTION
+- Type: string
+- Required: false
+
+PikPak
+PikPak is a private cloud drive.
+Paths are specified as remote:path
, and may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+Here is an example of making a remote for PikPak.
+First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+
+Enter name for new remote.
+name> remote
+
+Option Storage.
+Type of storage to configure.
+Choose a number from below, or type in your own value.
+XX / PikPak
+ \ (pikpak)
+Storage> XX
+
+Option user.
+Pikpak username.
+Enter a value.
+user> USERNAME
+
+Option pass.
+Pikpak password.
+Choose an alternative below.
+y) Yes, type in my own password
+g) Generate random password
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+
+Edit advanced config?
+y) Yes
+n) No (default)
+y/n>
+
+Configuration complete.
+Options:
+- type: pikpak
+- user: USERNAME
+- pass: *** ENCRYPTED ***
+- token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
+Keep this "remote" remote?
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Modification times and hashes
+PikPak keeps modification times on objects, and updates them when uploading objects, but it does not support changing only the modification time
+The MD5 hash algorithm is supported.
+Standard options
+Here are the Standard options specific to pikpak (PikPak).
+--pikpak-user
+Pikpak username.
+Properties:
+
+- Config: user
+- Env Var: RCLONE_PIKPAK_USER
+- Type: string
+- Required: true
+
+--pikpak-pass
+Pikpak password.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_PIKPAK_PASS
+- Type: string
+- Required: true
+
+Advanced options
+Here are the Advanced options specific to pikpak (PikPak).
+--pikpak-client-id
+OAuth Client Id.
+Leave blank normally.
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PIKPAK_CLIENT_ID
+- Type: string
+- Required: false
+
+--pikpak-client-secret
+OAuth Client Secret.
+Leave blank normally.
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+- Type: string
+- Required: false
+
+--pikpak-token
+OAuth Access Token as a JSON blob.
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PIKPAK_TOKEN
+- Type: string
+- Required: false
+
+--pikpak-auth-url
+Auth server URL.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PIKPAK_AUTH_URL
+- Type: string
+- Required: false
+
+--pikpak-token-url
+Token server url.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PIKPAK_TOKEN_URL
+- Type: string
+- Required: false
+
+--pikpak-root-folder-id
+ID of the root folder. Leave blank normally.
+Fill in for rclone to use a non root folder as its starting point.
+Properties:
+
+- Config: root_folder_id
+- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+- Type: string
+- Required: false
+
+--pikpak-use-trash
+Send files to the trash instead of deleting permanently.
+Defaults to true, namely sending files to the trash. Use --pikpak-use-trash=false
to delete files permanently instead.
+Properties:
+
+- Config: use_trash
+- Env Var: RCLONE_PIKPAK_USE_TRASH
+- Type: bool
+- Default: true
+
+--pikpak-trashed-only
+Only show files that are in the trash.
+This will show trashed files in their original directory structure.
+Properties:
+
+- Config: trashed_only
+- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+- Type: bool
+- Default: false
+
+--pikpak-hash-memory-limit
+Files bigger than this will be cached on disk to calculate hash if required.
+Properties:
+
+- Config: hash_memory_limit
+- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+- Type: SizeSuffix
+- Default: 10Mi
+
+--pikpak-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PIKPAK_ENCODING
+- Type: Encoding
+- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+
+--pikpak-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PIKPAK_DESCRIPTION
+- Type: string
+- Required: false
+
+Backend commands
+Here are the commands specific to the pikpak backend.
+Run them with
+rclone backend COMMAND remote:
+The help below will explain what arguments each command takes.
+See the backend command for more info on how to pass options and arguments.
+These can be run on a running backend using the rc command backend/command.
+addurl
+Add offline download task for url
+rclone backend addurl remote: [options] [<arguments>+]
+This command adds offline download task for url.
+Usage:
+rclone backend addurl pikpak:dirpath url
+Downloads will be stored in 'dirpath'. If 'dirpath' is invalid, download will fallback to default 'My Pack' folder.
+decompress
+Request decompress of a file/files in a folder
+rclone backend decompress remote: [options] [<arguments>+]
+This command requests decompress of file/files in a folder.
+Usage:
+rclone backend decompress pikpak:dirpath {filename} -o password=password
+rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+An optional argument 'filename' can be specified for a file located in 'pikpak:dirpath'. You may want to pass '-o password=password' for a password-protected files. Also, pass '-o delete-src-file' to delete source files after decompression finished.
+Result:
+{
+ "Decompressed": 17,
+ "SourceDeleted": 0,
+ "Errors": 0
+}
+Limitations
+Hashes may be empty
+PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
+Deleted files still visible with trashed-only
+Deleted files will still be visible with --pikpak-trashed-only
even after the trash emptied. This goes away after few days.
+premiumize.me
+Paths are specified as remote:path
+Paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+The initial setup for premiumize.me involves getting a token from premiumize.me which you need to do in your browser. rclone config
walks you through it.
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+[snip]
+XX / premiumize.me
+ \ "premiumizeme"
+[snip]
+Storage> premiumizeme
+** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ **
+
+Remote config
+Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+If not sure try Y. If Y failed, try N.
+y) Yes
+n) No
+y/n> y
+If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+Log in and authorize rclone for access
+Waiting for code...
+Got code
+--------------------
+[remote]
+type = premiumizeme
+token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"}
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d>
+See the remote setup docs for how to set it up on a machine with no Internet browser available.
+Note that rclone runs a webserver on your local machine to collect the token as returned from premiumize.me. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on http://127.0.0.1:53682/
and this it may require you to unblock it temporarily if you are running a host firewall.
+Once configured you can then use rclone
like this,
+List directories in top level of your premiumize.me
+rclone lsd remote:
+List all the files in your premiumize.me
+rclone ls remote:
+To copy a local directory to an premiumize.me directory called backup
+rclone copy /home/source remote:backup
+Modification times and hashes
+premiumize.me does not support modification times or hashes, therefore syncing will default to --size-only
checking. Note that using --update
will work.
+Restricted filename characters
+In addition to the default restricted characters set the following characters are also replaced:
+
+
+
+
+
+
+\ |
+0x5C |
+\ |
+
+
+" |
+0x22 |
+" |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Standard options
+Here are the Standard options specific to premiumizeme (premiumize.me).
+--premiumizeme-client-id
+OAuth Client Id.
+Leave blank normally.
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PREMIUMIZEME_CLIENT_ID
+- Type: string
+- Required: false
+
+--premiumizeme-client-secret
+OAuth Client Secret.
+Leave blank normally.
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PREMIUMIZEME_CLIENT_SECRET
+- Type: string
+- Required: false
+
+--premiumizeme-api-key
+API Key.
+This is not normally used - use oauth instead.
+Properties:
+
+- Config: api_key
+- Env Var: RCLONE_PREMIUMIZEME_API_KEY
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to premiumizeme (premiumize.me).
+--premiumizeme-token
+OAuth Access Token as a JSON blob.
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PREMIUMIZEME_TOKEN
+- Type: string
+- Required: false
+
+--premiumizeme-auth-url
+Auth server URL.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PREMIUMIZEME_AUTH_URL
+- Type: string
+- Required: false
+
+--premiumizeme-token-url
+Token server url.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PREMIUMIZEME_TOKEN_URL
+- Type: string
+- Required: false
+
+--premiumizeme-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PREMIUMIZEME_ENCODING
+- Type: Encoding
+- Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--premiumizeme-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PREMIUMIZEME_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+Note that premiumize.me is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".
+premiumize.me file names can't have the \
or "
characters in. rclone maps these to and from an identical looking unicode equivalents \
and "
+premiumize.me only supports filenames up to 255 characters in length.
+Proton Drive
+Proton Drive is an end-to-end encrypted Swiss vault for your files that protects your data.
+This is an rclone backend for Proton Drive which supports the file transfer features of Proton Drive using the same client-side encryption.
+Due to the fact that Proton Drive doesn't publish its API documentation, this backend is implemented with best efforts by reading the open-sourced client source code and observing the Proton Drive traffic in the browser.
+NB This backend is currently in Beta. It is believed to be correct and all the integration tests pass. However the Proton Drive protocol has evolved over time there may be accounts it is not compatible with. Please post on the rclone forum if you find an incompatibility.
+Paths are specified as remote:path
+Paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configurations
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / Proton Drive
+ \ "Proton Drive"
+[snip]
+Storage> protondrive
+User name
+user> you@protonmail.com
+Password.
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank
+y/g/n> y
+Enter the password:
+password:
+Confirm the password:
+password:
+Option 2fa.
+2FA code (if the account requires one)
+Enter a value. Press Enter to leave empty.
+2fa> 123456
+Remote config
+--------------------
+[remote]
+type = protondrive
+user = you@protonmail.com
+pass = *** ENCRYPTED ***
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+NOTE: The Proton Drive encryption keys need to have been already generated after a regular login via the browser, otherwise attempting to use the credentials in rclone
will fail.
+Once configured you can then use rclone
like this,
+List directories in top level of your Proton Drive
+rclone lsd remote:
+List all the files in your Proton Drive
+rclone ls remote:
+To copy a local directory to an Proton Drive directory called backup
+rclone copy /home/source remote:backup
+Modification times and hashes
+Proton Drive Bridge does not support updating modification times yet.
+The SHA1 hash algorithm is supported.
+Restricted filename characters
+Invalid UTF-8 bytes will be replaced, also left and right spaces will be removed (code reference)
+Duplicated files
+Proton Drive can not have two files with exactly the same name and path. If the conflict occurs, depending on the advanced config, the file might or might not be overwritten.
+
+Please set your mailbox password in the advanced config section.
+Caching
+The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.
+Standard options
+Here are the Standard options specific to protondrive (Proton Drive).
+--protondrive-username
+The username of your proton account
+Properties:
+
+- Config: username
+- Env Var: RCLONE_PROTONDRIVE_USERNAME
+- Type: string
+- Required: true
+
+--protondrive-password
+The password of your proton account.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: password
+- Env Var: RCLONE_PROTONDRIVE_PASSWORD
+- Type: string
+- Required: true
+
+--protondrive-2fa
+The 2FA code
+The value can also be provided with --protondrive-2fa=000000
+The 2FA code of your proton drive account if the account is set up with two-factor authentication
+Properties:
+
+- Config: 2fa
+- Env Var: RCLONE_PROTONDRIVE_2FA
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to protondrive (Proton Drive).
+--protondrive-mailbox-password
+The mailbox password of your two-password proton account.
+For more information regarding the mailbox password, please check the following official knowledge base article: https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: mailbox_password
+- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
+- Type: string
+- Required: false
+
+--protondrive-client-uid
+Client uid key (internal use only)
+Properties:
+
+- Config: client_uid
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
+- Type: string
+- Required: false
+
+--protondrive-client-access-token
+Client access token key (internal use only)
+Properties:
+
+- Config: client_access_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-refresh-token
+Client refresh token key (internal use only)
+Properties:
+
+- Config: client_refresh_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-salted-key-pass
+Client salted key pass key (internal use only)
+Properties:
+
+- Config: client_salted_key_pass
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
+- Type: string
+- Required: false
+
+--protondrive-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PROTONDRIVE_ENCODING
+- Type: Encoding
+- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
+
+--protondrive-original-file-size
+Return the file size before encryption
+The size of the encrypted file will be different from (bigger than) the original file size. Unless there is a reason to return the file size after encryption is performed, otherwise, set this option to true, as features like Open() which will need to be supplied with original content size, will fail to operate properly
+Properties:
+
+- Config: original_file_size
+- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
+- Type: bool
+- Default: true
+
+--protondrive-app-version
+The app version string
+The app version string indicates the client that is currently performing the API request. This information is required and will be sent with every API request.
+Properties:
+
+- Config: app_version
+- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
+- Type: string
+- Default: "macos-drive@1.0.0-alpha.1+rclone"
+
+--protondrive-replace-existing-draft
+Create a new revision when filename conflict is detected
+When a file upload is cancelled or failed before completion, a draft will be created and the subsequent upload of the same file to the same location will be reported as a conflict.
+The value can also be set by --protondrive-replace-existing-draft=true
+If the option is set to true, the draft will be replaced and then the upload operation will restart. If there are other clients also uploading at the same file location at the same time, the behavior is currently unknown. Need to set to true for integration tests. If the option is set to false, an error "a draft exist - usually this means a file is being uploaded at another client, or, there was a failed upload attempt" will be returned, and no upload will happen.
+Properties:
+
+- Config: replace_existing_draft
+- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
+- Type: bool
+- Default: false
+
+--protondrive-enable-caching
+Caches the files and folders metadata to reduce API calls
+Notice: If you are mounting ProtonDrive as a VFS, please disable this feature, as the current implementation doesn't update or clear the cache when there are external changes.
+The files and folders on ProtonDrive are represented as links with keyrings, which can be cached to improve performance and be friendly to the API server.
+The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.
+Properties:
+
+- Config: enable_caching
+- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
+- Type: bool
+- Default: true
+
+--protondrive-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+This backend uses the Proton-API-Bridge, which is based on go-proton-api, a fork of the official repo.
+There is no official API documentation available from Proton Drive. But, thanks to Proton open sourcing proton-go-api and the web, iOS, and Android client codebases, we don't need to completely reverse engineer the APIs by observing the web client traffic!
+proton-go-api provides the basic building blocks of API calls and error handling, such as 429 exponential back-off, but it is pretty much just a barebone interface to the Proton API. For example, the encryption and decryption of the Proton Drive file are not provided in this library.
+The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on top of this quickly. This codebase handles the intricate tasks before and after calling Proton APIs, particularly the complex encryption scheme, allowing developers to implement features for other software on top of this codebase. There are likely quite a few errors in this library, as there isn't official documentation available.
+put.io
+Paths are specified as remote:path
+put.io paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configuration
+The initial setup for put.io involves getting a token from put.io which you need to do in your browser. rclone config
walks you through it.
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> putio
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+[snip]
+XX / Put.io
+ \ "putio"
+[snip]
+Storage> putio
+** See help for putio backend at: https://rclone.org/putio/ **
+
+Remote config
+Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+If not sure try Y. If Y failed, try N.
+y) Yes
+n) No
+y/n> y
+If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+Log in and authorize rclone for access
+Waiting for code...
+Got code
+--------------------
+[putio]
+type = putio
+token = {"access_token":"XXXXXXXX","expiry":"0001-01-01T00:00:00Z"}
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+Current remotes:
+
+Name Type
+==== ====
+putio putio
+
+e) Edit existing remote
+n) New remote
+d) Delete remote
+r) Rename remote
+c) Copy remote
+s) Set configuration password
+q) Quit config
+e/n/d/r/c/s/q> q
+See the remote setup docs for how to set it up on a machine with no Internet browser available.
+Note that rclone runs a webserver on your local machine to collect the token as returned from put.io if using web browser to automatically authenticate. This only runs from the moment it opens your browser to the moment you get back the verification code. This is on http://127.0.0.1:53682/
and this it may require you to unblock it temporarily if you are running a host firewall, or use manual mode.
+You can then use it like this,
+List directories in top level of your put.io
+rclone lsd remote:
+List all the files in your put.io
+rclone ls remote:
+To copy a local directory to a put.io directory called backup
+rclone copy /home/source remote:backup
+Restricted filename characters
+In addition to the default restricted characters set the following characters are also replaced:
+
+
+
+
+
+
+\ |
+0x5C |
+\ |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Standard options
+Here are the Standard options specific to putio (Put.io).
+--putio-client-id
+OAuth Client Id.
+Leave blank normally.
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PUTIO_CLIENT_ID
+- Type: string
+- Required: false
+
+--putio-client-secret
+OAuth Client Secret.
+Leave blank normally.
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PUTIO_CLIENT_SECRET
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to putio (Put.io).
+--putio-token
+OAuth Access Token as a JSON blob.
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PUTIO_TOKEN
+- Type: string
+- Required: false
+
+--putio-auth-url
+Auth server URL.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PUTIO_AUTH_URL
+- Type: string
+- Required: false
+
+--putio-token-url
+Token server url.
+Leave blank to use the provider defaults.
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PUTIO_TOKEN_URL
+- Type: string
+- Required: false
+
+--putio-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PUTIO_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--putio-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PUTIO_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+put.io has rate limiting. When you hit a limit, rclone automatically retries after waiting the amount of time requested by the server.
+If you want to avoid ever hitting these limits, you may use the --tpslimit
flag with a low number. Note that the imposed limits may be different for different operations, and may change over time.
+Proton Drive
+Proton Drive is an end-to-end encrypted Swiss vault for your files that protects your data.
+This is an rclone backend for Proton Drive which supports the file transfer features of Proton Drive using the same client-side encryption.
+Due to the fact that Proton Drive doesn't publish its API documentation, this backend is implemented with best efforts by reading the open-sourced client source code and observing the Proton Drive traffic in the browser.
+NB This backend is currently in Beta. It is believed to be correct and all the integration tests pass. However the Proton Drive protocol has evolved over time there may be accounts it is not compatible with. Please post on the rclone forum if you find an incompatibility.
+Paths are specified as remote:path
+Paths may be as deep as required, e.g. remote:directory/subdirectory
.
+Configurations
+Here is an example of how to make a remote called remote
. First run:
+ rclone config
+This will guide you through an interactive setup process:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / Proton Drive
+ \ "Proton Drive"
+[snip]
+Storage> protondrive
+User name
+user> you@protonmail.com
+Password.
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank
+y/g/n> y
+Enter the password:
+password:
+Confirm the password:
+password:
+Option 2fa.
+2FA code (if the account requires one)
+Enter a value. Press Enter to leave empty.
+2fa> 123456
+Remote config
+--------------------
+[remote]
+type = protondrive
+user = you@protonmail.com
+pass = *** ENCRYPTED ***
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+NOTE: The Proton Drive encryption keys need to have been already generated after a regular login via the browser, otherwise attempting to use the credentials in rclone
will fail.
+Once configured you can then use rclone
like this,
+List directories in top level of your Proton Drive
+rclone lsd remote:
+List all the files in your Proton Drive
+rclone ls remote:
+To copy a local directory to an Proton Drive directory called backup
+rclone copy /home/source remote:backup
+Modification times and hashes
+Proton Drive Bridge does not support updating modification times yet.
+The SHA1 hash algorithm is supported.
+Restricted filename characters
+Invalid UTF-8 bytes will be replaced, also left and right spaces will be removed (code reference)
+Duplicated files
+Proton Drive can not have two files with exactly the same name and path. If the conflict occurs, depending on the advanced config, the file might or might not be overwritten.
+
+Please set your mailbox password in the advanced config section.
+Caching
+The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.
+Standard options
+Here are the Standard options specific to protondrive (Proton Drive).
+--protondrive-username
+The username of your proton account
+Properties:
+
+- Config: username
+- Env Var: RCLONE_PROTONDRIVE_USERNAME
+- Type: string
+- Required: true
+
+--protondrive-password
+The password of your proton account.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: password
+- Env Var: RCLONE_PROTONDRIVE_PASSWORD
+- Type: string
+- Required: true
+
+--protondrive-2fa
+The 2FA code
+The value can also be provided with --protondrive-2fa=000000
+The 2FA code of your proton drive account if the account is set up with two-factor authentication
+Properties:
+
+- Config: 2fa
+- Env Var: RCLONE_PROTONDRIVE_2FA
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to protondrive (Proton Drive).
+--protondrive-mailbox-password
+The mailbox password of your two-password proton account.
+For more information regarding the mailbox password, please check the following official knowledge base article: https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: mailbox_password
+- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
+- Type: string
+- Required: false
+
+--protondrive-client-uid
+Client uid key (internal use only)
+Properties:
+
+- Config: client_uid
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
+- Type: string
+- Required: false
+
+--protondrive-client-access-token
+Client access token key (internal use only)
+Properties:
+
+- Config: client_access_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-refresh-token
+Client refresh token key (internal use only)
+Properties:
+
+- Config: client_refresh_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-salted-key-pass
+Client salted key pass key (internal use only)
+Properties:
+
+- Config: client_salted_key_pass
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
+- Type: string
+- Required: false
+
+--protondrive-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PROTONDRIVE_ENCODING
+- Type: Encoding
+- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
+
+--protondrive-original-file-size
+Return the file size before encryption
+The size of the encrypted file will be different from (bigger than) the original file size. Unless there is a reason to return the file size after encryption is performed, otherwise, set this option to true, as features like Open() which will need to be supplied with original content size, will fail to operate properly
+Properties:
+
+- Config: original_file_size
+- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
+- Type: bool
+- Default: true
+
+--protondrive-app-version
+The app version string
+The app version string indicates the client that is currently performing the API request. This information is required and will be sent with every API request.
+Properties:
+
+- Config: app_version
+- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
+- Type: string
+- Default: "macos-drive@1.0.0-alpha.1+rclone"
+
+--protondrive-replace-existing-draft
+Create a new revision when filename conflict is detected
+When a file upload is cancelled or failed before completion, a draft will be created and the subsequent upload of the same file to the same location will be reported as a conflict.
+The value can also be set by --protondrive-replace-existing-draft=true
+If the option is set to true, the draft will be replaced and then the upload operation will restart. If there are other clients also uploading at the same file location at the same time, the behavior is currently unknown. Need to set to true for integration tests. If the option is set to false, an error "a draft exist - usually this means a file is being uploaded at another client, or, there was a failed upload attempt" will be returned, and no upload will happen.
+Properties:
+
+- Config: replace_existing_draft
+- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
+- Type: bool
+- Default: false
+
+--protondrive-enable-caching
+Caches the files and folders metadata to reduce API calls
+Notice: If you are mounting ProtonDrive as a VFS, please disable this feature, as the current implementation doesn't update or clear the cache when there are external changes.
+The files and folders on ProtonDrive are represented as links with keyrings, which can be cached to improve performance and be friendly to the API server.
+The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.
+Properties:
+
+- Config: enable_caching
+- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
+- Type: bool
+- Default: true
+
+--protondrive-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+This backend uses the Proton-API-Bridge, which is based on go-proton-api, a fork of the official repo.
+There is no official API documentation available from Proton Drive. But, thanks to Proton open sourcing proton-go-api and the web, iOS, and Android client codebases, we don't need to completely reverse engineer the APIs by observing the web client traffic!
+proton-go-api provides the basic building blocks of API calls and error handling, such as 429 exponential back-off, but it is pretty much just a barebone interface to the Proton API. For example, the encryption and decryption of the Proton Drive file are not provided in this library.
+The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on top of this quickly. This codebase handles the intricate tasks before and after calling Proton APIs, particularly the complex encryption scheme, allowing developers to implement features for other software on top of this codebase. There are likely quite a few errors in this library, as there isn't official documentation available.
+Seafile
+This is a backend for the Seafile storage service: - It works with both the free community edition or the professional edition. - Seafile versions 6.x, 7.x, 8.x and 9.x are all supported. - Encrypted libraries are also supported. - It supports 2FA enabled users - Using a Library API Token is not supported
+Configuration
+There are two distinct modes you can setup your remote: - you point your remote to the root of the server, meaning you don't specify a library during the configuration: Paths are specified as remote:library
. You may put subdirectories in too, e.g. remote:library/path/to/dir
. - you point your remote to a specific library during the configuration: Paths are specified as remote:path/to/dir
. This is the recommended mode when using encrypted libraries. (This mode is possibly slightly faster than the root mode)
+Configuration in root mode
+Here is an example of making a seafile configuration for a user with no two-factor authentication. First run
+rclone config
+This will guide you through an interactive setup process. To authenticate you will need the URL of your server, your email (or username) and your password.
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> seafile
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+[snip]
+XX / Seafile
+ \ "seafile"
+[snip]
+Storage> seafile
+** See help for seafile backend at: https://rclone.org/seafile/ **
+
+URL of seafile host to connect to
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+ 1 / Connect to cloud.seafile.com
+ \ "https://cloud.seafile.com/"
+url> http://my.seafile.server/
+User name (usually email address)
+Enter a string value. Press Enter for the default ("").
+user> me@example.com
+Password
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank (default)
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+Two-factor authentication ('true' if the account has 2FA enabled)
+Enter a boolean value (true or false). Press Enter for the default ("false").
+2fa> false
+Name of the library. Leave blank to access all non-encrypted libraries.
+Enter a string value. Press Enter for the default ("").
+library>
+Library password (for encrypted libraries only). Leave blank if you pass it through the command line.
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank (default)
+y/g/n> n
+Edit advanced config? (y/n)
+y) Yes
+n) No (default)
+y/n> n
+Remote config
+Two-factor authentication is not enabled on this account.
+--------------------
+[seafile]
+type = seafile
+url = http://my.seafile.server/
+user = me@example.com
+pass = *** ENCRYPTED ***
+2fa = false
+--------------------
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+This remote is called seafile
. It's pointing to the root of your seafile server and can now be used like this:
+See all libraries
+rclone lsd seafile:
+Create a new library
+rclone mkdir seafile:library
+List the contents of a library
+rclone ls seafile:library
+Sync /home/local/directory
to the remote library, deleting any excess files in the library.
+rclone sync --interactive /home/local/directory seafile:library
+Configuration in library mode
+Here's an example of a configuration in library mode with a user that has the two-factor authentication enabled. Your 2FA code will be asked at the end of the configuration, and will attempt to authenticate you:
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> seafile
+Type of storage to configure.
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+[snip]
+XX / Seafile
+ \ "seafile"
+[snip]
+Storage> seafile
+** See help for seafile backend at: https://rclone.org/seafile/ **
+
+URL of seafile host to connect to
+Enter a string value. Press Enter for the default ("").
+Choose a number from below, or type in your own value
+ 1 / Connect to cloud.seafile.com
+ \ "https://cloud.seafile.com/"
+url> http://my.seafile.server/
+User name (usually email address)
+Enter a string value. Press Enter for the default ("").
+user> me@example.com
+Password
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank (default)
+y/g> y
+Enter the password:
+password:
+Confirm the password:
+password:
+Two-factor authentication ('true' if the account has 2FA enabled)
+Enter a boolean value (true or false). Press Enter for the default ("false").
+2fa> true
+Name of the library. Leave blank to access all non-encrypted libraries.
+Enter a string value. Press Enter for the default ("").
+library> My Library
+Library password (for encrypted libraries only). Leave blank if you pass it through the command line.
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank (default)
+y/g/n> n
+Edit advanced config? (y/n)
+y) Yes
+n) No (default)
+y/n> n
+Remote config
+Two-factor authentication: please enter your 2FA code
+2fa code> 123456
+Authenticating...
+Success!
+--------------------
+[seafile]
+type = seafile
+url = http://my.seafile.server/
+user = me@example.com
+pass =
+2fa = true
+library = My Library
+--------------------
+y) Yes this is OK (default)
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+You'll notice your password is blank in the configuration. It's because we only need the password to authenticate you once.
+You specified My Library
during the configuration. The root of the remote is pointing at the root of the library My Library
:
+See all files in the library:
+rclone lsd seafile:
+Create a new directory inside the library
+rclone mkdir seafile:directory
+List the contents of a directory
+rclone ls seafile:directory
+Sync /home/local/directory
to the remote library, deleting any excess files in the library.
+rclone sync --interactive /home/local/directory seafile:
+--fast-list
+Seafile version 7+ supports --fast-list
which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details. Please note this is not supported on seafile server version 6.x
+Restricted filename characters
+In addition to the default restricted characters set the following characters are also replaced:
+
+
+
+
+
+
+/ |
+0x2F |
+/ |
+
+
+" |
+0x22 |
+" |
+
+
+\ |
+0x5C |
+\ |
+
+
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
+Seafile and rclone link
+Rclone supports generating share links for non-encrypted libraries only. They can either be for a file or a directory:
+rclone link seafile:seafile-tutorial.doc
+http://my.seafile.server/f/fdcd8a2f93f84b8b90f4/
+
+or if run on a directory you will get:
+rclone link seafile:dir
+http://my.seafile.server/d/9ea2455f6f55478bbb0d/
+Please note a share link is unique for each file or directory. If you run a link command on a file/dir that has already been shared, you will get the exact same link.
+Compatibility
+It has been actively developed using the seafile docker image of these versions: - 6.3.4 community edition - 7.0.5 community edition - 7.1.3 community edition - 9.0.10 community edition
+Versions below 6.0 are not supported. Versions between 6.0 and 6.3 haven't been tested and might not work properly.
+Each new version of rclone
is automatically tested against the latest docker image of the seafile community server.
+Standard options
+Here are the Standard options specific to seafile (seafile).
+--seafile-url
+URL of seafile host to connect to.
+Properties:
+
+- Config: url
+- Env Var: RCLONE_SEAFILE_URL
+- Type: string
+- Required: true
+- Examples:
+
+- "https://cloud.seafile.com/"
+
+- Connect to cloud.seafile.com.
+
+
+
+--seafile-user
+User name (usually email address).
+Properties:
+
+- Config: user
+- Env Var: RCLONE_SEAFILE_USER
+- Type: string
+- Required: true
+
+--seafile-pass
+Password.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_SEAFILE_PASS
+- Type: string
+- Required: false
+
+--seafile-2fa
+Two-factor authentication ('true' if the account has 2FA enabled).
+Properties:
+
+- Config: 2fa
+- Env Var: RCLONE_SEAFILE_2FA
+- Type: bool
+- Default: false
+
+--seafile-library
+Name of the library.
+Leave blank to access all non-encrypted libraries.
+Properties:
+
+- Config: library
+- Env Var: RCLONE_SEAFILE_LIBRARY
+- Type: string
+- Required: false
+
+--seafile-library-key
+Library password (for encrypted libraries only).
+Leave blank if you pass it through the command line.
+NB Input to this must be obscured - see rclone obscure.
+Properties:
+
+- Config: library_key
+- Env Var: RCLONE_SEAFILE_LIBRARY_KEY
+- Type: string
+- Required: false
+
+--seafile-auth-token
+Authentication token.
+Properties:
+
+- Config: auth_token
+- Env Var: RCLONE_SEAFILE_AUTH_TOKEN
+- Type: string
+- Required: false
+
+Advanced options
+Here are the Advanced options specific to seafile (seafile).
+--seafile-create-library
+Should rclone create a library if it doesn't exist.
+Properties:
+
+- Config: create_library
+- Env Var: RCLONE_SEAFILE_CREATE_LIBRARY
+- Type: bool
+- Default: false
+
+--seafile-encoding
+The encoding for the backend.
+See the encoding section in the overview for more info.
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_SEAFILE_ENCODING
+- Type: Encoding
+- Default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8
+
+--seafile-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SEAFILE_DESCRIPTION
+- Type: string
+- Required: false
+
+SFTP
+SFTP is the Secure (or SSH) File Transfer Protocol.
+The SFTP backend can be used with a number of different providers:
+
+- Hetzner Storage Box
+- rsync.net
+
+SFTP runs over SSH v2 and is installed as standard with most modern SSH installations.
+Paths are specified as remote:path
. If the path does not begin with a /
it is relative to the home directory of the user. An empty path remote:
refers to the user's home directory. For example, rclone lsd remote:
would list the home directory of the user configured in the rclone remote config (i.e /home/sftpuser
). However, rclone lsd remote:/
would list the root directory for remote machine (i.e. /
)
+Note that some SFTP servers will need the leading / - Synology is a good example of this. rsync.net and Hetzner, on the other hand, requires users to OMIT the leading /.
+Note that by default rclone will try to execute shell commands on the server, see shell access considerations.
+Configuration
+Here is an example of making an SFTP configuration. First run
+rclone config
+This will guide you through an interactive setup process.
+No remotes found, make a new one?
+n) New remote
+s) Set configuration password
+q) Quit config
+n/s/q> n
+name> remote
+Type of storage to configure.
+Choose a number from below, or type in your own value
+[snip]
+XX / SSH/SFTP
+ \ "sftp"
+[snip]
+Storage> sftp
+SSH host to connect to
+Choose a number from below, or type in your own value
+ 1 / Connect to example.com
+ \ "example.com"
+host> example.com
+SSH username
+Enter a string value. Press Enter for the default ("$USER").
+user> sftpuser
+SSH port number
+Enter a signed integer. Press Enter for the default (22).
+port>
+SSH password, leave blank to use ssh-agent.
+y) Yes type in my own password
+g) Generate random password
+n) No leave this optional password blank
+y/g/n> n
+Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
+key_file>
+Remote config
+--------------------
+[remote]
+host = example.com
+user = sftpuser
+port =
+pass =
+key_file =
+--------------------
+y) Yes this is OK
+e) Edit this remote
+d) Delete this remote
+y/e/d> y
+This remote is called remote
and can now be used like this:
+See all directories in the home directory
+rclone lsd remote:
+See all directories in the root directory
+rclone lsd remote:/
+Make a new directory
+rclone mkdir remote:path/to/directory
+List the contents of a directory
+rclone ls remote:path/to/directory
+Sync /home/local/directory
to the remote directory, deleting any excess files in the directory.
+rclone sync --interactive /home/local/directory remote:directory
+Mount the remote path /srv/www-data/
to the local path /mnt/www-data
+rclone mount remote:/srv/www-data/ /mnt/www-data
+SSH Authentication
+The SFTP remote supports three authentication methods:
+
+- Password
+- Key file, including certificate signed keys
+- ssh-agent
+
+Key files should be PEM-encoded private key files. For instance /home/$USER/.ssh/id_rsa
. Only unencrypted OpenSSH or PEM encrypted files are supported.
+The key file can be specified in either an external file (key_file) or contained within the rclone config file (key_pem). If using key_pem in the config file, the entry should be on a single line with new line ('' or '') separating lines. i.e.
+key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
+This will generate it correctly for key_pem for use in the config:
+awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
+If you don't specify pass
, key_file
, or key_pem
or ask_password
then rclone will attempt to contact an ssh-agent. You can also specify key_use_agent
to force the usage of an ssh-agent. In this case key_file
or key_pem
can also be specified to force the usage of a specific key in the ssh-agent.
+Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
+If you set the ask_password
option, rclone will prompt for a password when needed and no password has been configured.
+Certificate-signed keys
+With traditional key-based authentication, you configure your private key only, and the public key built into it will be used during the authentication process.
+If you have a certificate you may use it to sign your public key, creating a separate SSH user certificate that should be used instead of the plain public key extracted from the private key. Then you must provide the path to the user certificate public key file in pubkey_file
.
+Note: This is not the traditional public key paired with your private key, typically saved as /home/$USER/.ssh/id_rsa.pub
. Setting this path in pubkey_file
will not work.
+Example:
+[remote]
+type = sftp
+host = example.com
+user = sftpuser
+key_file = ~/id_rsa
+pubkey_file = ~/id_rsa-cert.pub
+If you concatenate a cert with a private key then you can specify the merged file in both places.
+Note: the cert must come first in the file. e.g.
+cat id_rsa-cert.pub id_rsa > merged_key
+Host key validation
+By default rclone will not check the server's host key for validation. This can allow an attacker to replace a server with their own and if you use password authentication then this can lead to that password being exposed.
+Host key matching, using standard known_hosts
files can be turned on by enabling the known_hosts_file
option. This can point to the file maintained by OpenSSH
or can point to a unique file.
+e.g. using the OpenSSH known_hosts
file:
+[remote]
type = sftp
host = example.com
user = sftpuser
@@ -33521,14 +34261,14 @@ known_hosts_file = ~/.ssh/known_hosts
The options md5sum_command
and sha1_command
can be used to customize the command to be executed for calculation of checksums. You can for example set a specific path to where md5sum and sha1sum executables are located, or use them to specify some other tools that print checksums in compatible format. The value can include command-line arguments, or even shell script blocks as with PowerShell. Rclone has subcommands md5sum and sha1sum that use compatible format, which means if you have an rclone executable on the server it can be used. As mentioned above, they will be automatically picked up if found in PATH, but if not you can set something like /path/to/rclone md5sum
as the value of option md5sum_command
to make sure a specific executable is used.
Remote checksumming is recommended and enabled by default. First time rclone is using a SFTP remote, if options md5sum_command
or sha1_command
are not set, it will check if any of the default commands for each of them, as described above, can be used. The result will be saved in the remote configuration, so next time it will use the same. Value none
will be set if none of the default commands could be used for a specific algorithm, and this algorithm will not be supported by the remote.
Disabling the checksumming may be required if you are connecting to SFTP servers which are not under your control, and to which the execution of remote shell commands is prohibited. Set the configuration option disable_hashcheck
to true
to disable checksumming entirely, or set shell_type
to none
to disable all functionality based on remote shell command execution.
-Modification times and hashes
+Modification times and hashes
Modified times are stored on the server to 1 second precision.
Modified times are used in syncing and are fully supported.
Some SFTP servers disable setting/modifying the file modification time after upload (for example, certain configurations of ProFTPd with mod_sftp). If you are using one of these servers, you can set the option set_modtime = false
in your RClone backend configuration to disable this behaviour.
About command
The about
command returns the total space, free space, and used space on the remote for the disk of the specified path on the remote or, if not set, the disk of the root on the remote.
SFTP usually supports the about command, but it depends on the server. If the server implements the vendor-specific VFS statistics extension, which is normally the case with OpenSSH instances, it will be used. If not, but the same login has access to a Unix shell, where the df
command is available (e.g. in the remote's PATH), then this will be used instead. If the server shell is PowerShell, probably with a Windows OpenSSH server, rclone will use a built-in shell command (see shell access). If none of the above is applicable, about
will fail.
-Standard options
+Standard options
Here are the Standard options specific to sftp (SSH/SFTP).
--sftp-host
SSH host to connect to.
@@ -33679,7 +34419,7 @@ known_hosts_file = ~/.ssh/known_hosts
Type: SpaceSepList
Default:
-Advanced options
+Advanced options
Here are the Advanced options specific to sftp (SSH/SFTP).
--sftp-known-hosts-file
Optional path to known_hosts file.
@@ -33981,7 +34721,16 @@ server_command = sudo /usr/libexec/openssh/sftp-server
Type: bool
Default: false
-Limitations
+--sftp-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SFTP_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
On some SFTP servers (e.g. Synology) the paths are different for SSH and SFTP so the hashes can't be calculated properly. For them using disable_hashcheck
is a good idea.
The only ssh agent supported under Windows is Putty's pageant.
The Go SSH library disables the use of the aes128-cbc cipher by default, due to security concerns. This can be re-enabled on a per-connection basis by setting the use_insecure_cipher
setting in the configuration file to true
. Further details on the insecurity of this cipher can be found in this paper.
@@ -34002,7 +34751,7 @@ server_command = sudo /usr/libexec/openssh/sftp-server
The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in smb.conf
(usually in /etc/samba/
) file. You can find shares by querying the root if you're unsure (e.g. rclone lsd remote:
).
You can't access to the shared printers from rclone, obviously.
You can't use Anonymous access for logging in. You have to use the guest
user with an empty password instead. The rclone client tries to avoid 8.3 names when uploading files by encoding trailing spaces and periods. Alternatively, the local backend on Windows can access SMB servers using UNC paths, by \\server\share
. This doesn't apply to non-Windows OSes, such as Linux and macOS.
-Configuration
+Configuration
Here is an example of making a SMB configuration.
First run
rclone config
@@ -34077,7 +34826,7 @@ y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> d
-Standard options
+Standard options
Here are the Standard options specific to smb (SMB / CIFS).
--smb-host
SMB server hostname to connect to.
@@ -34138,7 +34887,7 @@ y/e/d> d
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to smb (SMB / CIFS).
--smb-idle-timeout
Max time before closing idle connections.
@@ -34180,6 +34929,15 @@ y/e/d> d
Type: Encoding
Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
+--smb-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SMB_DESCRIPTION
+- Type: string
+- Required: false
+
Storj
Storj is an encrypted, secure, and cost-effective object storage service that enables you to store, back up, and archive large amounts of data in a decentralized manner.
Backend options
@@ -34239,7 +34997,7 @@ y/e/d> d
S3 backend: secret encryption key is shared with the gateway
-Configuration
+Configuration
To make a new Storj configuration you need one of the following: * Access Grant that someone else shared with you. * API Key of a Storj project you are a member of.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -34336,7 +35094,7 @@ y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
-Standard options
+Standard options
Here are the Standard options specific to storj (Storj Decentralized Cloud Storage).
--storj-provider
Choose an authentication method.
@@ -34415,6 +35173,17 @@ y/e/d> y
Type: string
Required: false
+Advanced options
+Here are the Advanced options specific to storj (Storj Decentralized Cloud Storage).
+--storj-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_STORJ_DESCRIPTION
+- Type: string
+- Required: false
+
Usage
Paths are specified as remote:bucket
(or remote:
for the lsf
command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir
.
Once configured you can then use rclone
like this.
@@ -34469,7 +35238,7 @@ y/e/d> y
rclone sync --interactive --progress remote-us:bucket/path/to/dir/ remote-europe:bucket/path/to/dir/
Or even between another cloud storage and Storj.
rclone sync --interactive --progress s3:bucket/path/to/dir/ storj:bucket/path/to/dir/
-Limitations
+Limitations
rclone about
is not supported by the rclone Storj backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
See List of backends that do not support rclone about and rclone about
Known issues
@@ -34477,7 +35246,7 @@ y/e/d> y
To fix these, please raise your system limits. You can do this issuing a ulimit -n 65536
just before you run rclone. To change the limits more permanently you can add this to your shell startup script, e.g. $HOME/.bashrc
, or change the system-wide configuration, usually /etc/sysctl.conf
and/or /etc/security/limits.conf
, but please refer to your operating system manual.
SugarSync
SugarSync is a cloud service that enables active synchronization of files across computers and other devices for file backup, access, syncing, and sharing.
-Configuration
+Configuration
The initial setup for SugarSync involves getting a token from SugarSync which you can do with rclone. rclone config
walks you through it.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -34542,15 +35311,15 @@ y/e/d> y
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
NB you can't create files in the top level folder you have to create a folder, which rclone will create as a "Sync Folder" with SugarSync.
-Modification times and hashes
+Modification times and hashes
SugarSync does not support modification times or hashes, therefore syncing will default to --size-only
checking. Note that using --update
will work as rclone can read the time files were uploaded.
-Restricted filename characters
+Restricted filename characters
SugarSync replaces the default restricted characters set except for DEL.
Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.
-Deleting files
+Deleting files
Deleted files will be moved to the "Deleted items" folder by default.
However you can supply the flag --sugarsync-hard-delete
or set the config parameter hard_delete = true
if you would like files to be deleted straight away.
-Standard options
+Standard options
Here are the Standard options specific to sugarsync (Sugarsync).
--sugarsync-app-id
Sugarsync App ID.
@@ -34591,7 +35360,7 @@ y/e/d> y
Type: bool
Default: false
-Advanced options
+Advanced options
Here are the Advanced options specific to sugarsync (Sugarsync).
--sugarsync-refresh-token
Sugarsync refresh token.
@@ -34663,7 +35432,16 @@ y/e/d> y
Type: Encoding
Default: Slash,Ctl,InvalidUtf8,Dot
-Limitations
+--sugarsync-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SUGARSYNC_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
rclone about
is not supported by the SugarSync backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs
(most free space) as a member of an rclone union remote.
See List of backends that do not support rclone about and rclone about
Tardigrade
@@ -34672,7 +35450,7 @@ y/e/d> y
This is a Backend for Uptobox file storage service. Uptobox is closer to a one-click hoster than a traditional cloud storage provider and therefore not suitable for long term storage.
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
To configure an Uptobox backend you'll need your personal api token. You'll find it in your account settings
Here is an example of how to make a remote called remote
with the default setup. First run:
rclone config
@@ -34726,9 +35504,9 @@ y/e/d>
rclone ls remote:
To copy a local directory to an Uptobox directory called backup
rclone copy /home/source remote:backup
-Modification times and hashes
+Modification times and hashes
Uptobox supports neither modified times nor checksums. All timestamps will read as that set by --default-time
.
-Restricted filename characters
+Restricted filename characters
In addition to the default restricted characters set the following characters are also replaced:
@@ -34752,7 +35530,7 @@ y/e/d>
Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.
-Standard options
+Standard options
Here are the Standard options specific to uptobox (Uptobox).
--uptobox-access-token
Your access token.
@@ -34764,7 +35542,7 @@ y/e/d>
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to uptobox (Uptobox).
--uptobox-private
Set to make uploaded files private
@@ -34785,7 +35563,16 @@ y/e/d>
Type: Encoding
Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
-Limitations
+--uptobox-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UPTOBOX_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
Uptobox will delete inactive files that have not been accessed in 60 days.
rclone about
is not supported by this backend an overview of used space can however been seen in the uptobox web interface.
Union
@@ -34799,7 +35586,7 @@ y/e/d>
Subfolders can be used in upstream remotes. Assume a union remote named backup
with the remotes mydrive:private/backup
. Invoking rclone mkdir backup:desktop
is exactly the same as invoking rclone mkdir mydrive:private/backup/desktop
.
There is no special handling of paths containing ..
segments. Invoking rclone mkdir backup:../desktop
is exactly the same as invoking rclone mkdir mydrive:private/backup/../desktop
.
-Configuration
+Configuration
Here is an example of how to make a union called remote
for local folders. First run:
rclone config
This will guide you through an interactive setup process:
@@ -34936,7 +35723,7 @@ e/n/d/r/c/s/q> q
To check if your upstream supports the field, run rclone about remote: [flags]
and see if the required field exists.
-Filters
+Filters
Policies basically search upstream remotes and create a list of files / paths for functions to work on. The policy is responsible for filtering and sorting. The policy type defines the sorting but filtering is mostly uniform as described below.
- No search policies filter.
@@ -35032,7 +35819,7 @@ upstreams = /local:writeback remote:dir
When files are written, they will be written to both remote:dir
and /local
.
As many remotes as desired can be added to upstreams
but there should only be one :writeback
tag.
Rclone does not manage the :writeback
remote in any way other than writing files back to it. So if you need to expire old files or manage the size then you will have to do this yourself.
-Standard options
+Standard options
Here are the Standard options specific to union (Union merges the contents of several upstream fs).
--union-upstreams
List of space separated upstreams.
@@ -35081,7 +35868,7 @@ upstreams = /local:writeback remote:dir
- Type: int
- Default: 120
-Advanced options
+Advanced options
Here are the Advanced options specific to union (Union merges the contents of several upstream fs).
--union-min-free-space
Minimum viable free space for lfs/eplfs policies.
@@ -35093,13 +35880,22 @@ upstreams = /local:writeback remote:dir
Type: SizeSuffix
Default: 1Gi
+--union-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UNION_DESCRIPTION
+- Type: string
+- Required: false
+
Any metadata supported by the underlying remote is read and written.
See the metadata docs for more info.
WebDAV
Paths are specified as remote:path
Paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Configuration
+Configuration
To configure the WebDAV remote you will need to have a URL for it, and a username and password. If you know what kind of system you are connecting to then rclone can enable extra features.
Here is an example of how to make a remote called remote
. First run:
rclone config
@@ -35173,10 +35969,10 @@ y/e/d> y
rclone ls remote:
To copy a local directory to an WebDAV directory called backup
rclone copy /home/source remote:backup
-Modification times and hashes
+Modification times and hashes
Plain WebDAV does not support modified times. However when used with Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
Likewise plain WebDAV does not support hashes, however when used with Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes. Depending on the exact version of Owncloud or Nextcloud hashes may appear on all objects, or only on objects which had a hash uploaded with them.
-Standard options
+Standard options
Here are the Standard options specific to webdav (WebDAV).
--webdav-url
URL of http host to connect to.
@@ -35257,7 +36053,7 @@ y/e/d> y
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to webdav (WebDAV).
--webdav-bearer-token-command
Command to run to get a bearer token.
@@ -35312,9 +36108,27 @@ y/e/d> y
Type: SizeSuffix
Default: 10Mi
+--webdav-owncloud-exclude-shares
+Exclude ownCloud shares
+Properties:
+
+- Config: owncloud_exclude_shares
+- Env Var: RCLONE_WEBDAV_OWNCLOUD_EXCLUDE_SHARES
+- Type: bool
+- Default: false
+
+--webdav-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_WEBDAV_DESCRIPTION
+- Type: string
+- Required: false
+
Provider notes
See below for notes on specific providers.
-Fastmail Files
+Fastmail Files
Use https://webdav.fastmail.com/
or a subdirectory as the URL, and your Fastmail email username@domain.tld
as the username. Follow this documentation to create an app password with access to Files (WebDAV)
and use this as the password.
Fastmail supports modified times using the X-OC-Mtime
header.
Owncloud
@@ -35390,7 +36204,7 @@ vendor = other
bearer_token_command = oidc-token XDC
Yandex Disk
Yandex Disk is a cloud storage solution created by Yandex.
-Configuration
+Configuration
Here is an example of making a yandex configuration. First run
rclone config
This will guide you through an interactive setup process:
@@ -35444,17 +36258,17 @@ y/e/d> y
Sync /home/local/directory
to the remote path, deleting any excess files in the path.
rclone sync --interactive /home/local/directory remote:directory
Yandex paths may be as deep as required, e.g. remote:directory/subdirectory
.
-Modification times and hashes
+Modification times and hashes
Modified times are supported and are stored accurate to 1 ns in custom metadata called rclone_modified
in RFC3339 with nanoseconds format.
The MD5 hash algorithm is natively supported by Yandex Disk.
Emptying Trash
If you wish to empty your trash you can use the rclone cleanup remote:
command which will permanently delete all your trashed files. This command does not take any path arguments.
To view your current quota you can use the rclone about remote:
command which will display your usage limit (quota) and the current usage.
-Restricted filename characters
+Restricted filename characters
The default restricted characters set are replaced.
Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.
-Standard options
+Standard options
Here are the Standard options specific to yandex (Yandex Disk).
--yandex-client-id
OAuth Client Id.
@@ -35476,7 +36290,7 @@ y/e/d> y
Type: string
Required: false
-Advanced options
+Advanced options
Here are the Advanced options specific to yandex (Yandex Disk).
--yandex-token
OAuth Access Token as a JSON blob.
@@ -35526,13 +36340,22 @@ y/e/d> y
Type: Encoding
Default: Slash,Del,Ctl,InvalidUtf8,Dot
-Limitations
+--yandex-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_YANDEX_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
When uploading very large files (bigger than about 5 GiB) you will need to increase the --timeout
parameter. This is because Yandex pauses (perhaps to calculate the MD5SUM for the entire file) before returning confirmation that the file has been uploaded. The default handling of timeouts in rclone is to assume a 5 minute pause is an error and close the connection - you'll see net/http: timeout awaiting response headers
errors in the logs if this is happening. Setting the timeout to twice the max size of file in GiB should be enough, so if you want to upload a 30 GiB file set a timeout of 2 * 30 = 60m
, that is --timeout 60m
.
Having a Yandex Mail account is mandatory to use the Yandex.Disk subscription. Token generation will work without a mail account, but Rclone won't be able to complete any actions.
[403 - DiskUnsupportedUserAccountTypeError] User account type is not supported.
Zoho Workdrive
Zoho WorkDrive is a cloud storage solution created by Zoho.
-Configuration
+Configuration
Here is an example of making a zoho configuration. First run
rclone config
This will guide you through an interactive setup process:
@@ -35605,14 +36428,14 @@ y/e/d>
Sync /home/local/directory
to the remote path, deleting any excess files in the path.
rclone sync --interactive /home/local/directory remote:directory
Zoho paths may be as deep as required, eg remote:directory/subdirectory
.
-Modification times and hashes
+Modification times and hashes
Modified times are currently not supported for Zoho Workdrive
No hash algorithms are supported.
To view your current quota you can use the rclone about remote:
command which will display your current usage.
-Restricted filename characters
+Restricted filename characters
Only control characters and invalid UTF-8 are replaced. In addition most Unicode full-width characters are not supported at all and will be removed from filenames during upload.
-Standard options
+Standard options
Here are the Standard options specific to zoho (Zoho).
--zoho-client-id
OAuth Client Id.
@@ -35671,7 +36494,7 @@ y/e/d>
-Advanced options
+Advanced options
Here are the Advanced options specific to zoho (Zoho).
--zoho-token
OAuth Access Token as a JSON blob.
@@ -35712,6 +36535,15 @@ y/e/d>
Type: Encoding
Default: Del,Ctl,InvalidUtf8
+--zoho-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_ZOHO_DESCRIPTION
+- Type: string
+- Required: false
+
Setting up your own client_id
For Zoho we advise you to set up your own client_id. To do so you have to complete the following steps.
@@ -35724,7 +36556,7 @@ y/e/d>
Local paths are specified as normal filesystem paths, e.g. /path/to/wherever
, so
rclone sync --interactive /home/source /tmp/destination
Will sync /home/source
to /tmp/destination
.
-Configuration
+Configuration
For consistencies sake one can also configure a remote of type local
in the config file, and access the local filesystem using rclone remote paths, e.g. remote:path/to/wherever
, but it is probably easier not to.
Modification times
Rclone reads and writes the modification times using an accuracy determined by the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second on OS X.
@@ -36095,7 +36927,7 @@ $ tree /tmp/b
0 file2
NB Rclone (like most unix tools such as du
, rsync
and tar
) treats a bind mount to the same device as being on the same filesystem.
NB This flag is only available on Unix based systems. On systems where it isn't supported (e.g. Windows) it will be ignored.
-Advanced options
+Advanced options
Here are the Advanced options specific to local (Local Disk).
--local-nounc
Disable UNC (long path names) conversion on Windows.
@@ -36259,9 +37091,19 @@ $ tree /tmp/b
- Type: Encoding
- Default: Slash,Dot
+--local-description
+Description of the remote
+Properties:
+
+- Config: description
+- Env Var: RCLONE_LOCAL_DESCRIPTION
+- Type: string
+- Required: false
+
Depending on which OS is in use the local backend may return only some of the system metadata. Setting system metadata is supported on all OSes but setting user metadata is only supported on linux, freebsd, netbsd, macOS and Solaris. It is not supported on Windows yet (see pkg/attrs#47).
User metadata is stored as extended attributes (which may not be supported by all file systems) under the "user.*" prefix.
+Metadata is supported on files and directories.
Here are the possible system metadata items for the local backend.
@@ -36333,7 +37175,7 @@ $ tree /tmp/b
See the metadata docs for more info.
-Backend commands
+Backend commands
Here are the commands specific to the local backend.
Run them with
rclone backend COMMAND remote:
@@ -36350,6 +37192,362 @@ $ tree /tmp/b
- "error": return an error based on option value
Changelog
+v1.66.0 - 2024-03-10
+See commits
+
+- Major features
+
+- Rclone will now sync directory modification times if the backend supports it.
+
+- Rclone will now sync directory metadata if the backend supports it when
-M
/--metadata
is in use.
+
+- See the overview and look for the
D
flags in the Metadata
column to see which backends support it.
+
+- Bisync has received many updates see below for more details or bisync's changelog
+
+- Removed backends
+
+- amazonclouddrive: Remove Amazon Drive backend code and docs (Nick Craig-Wood)
+
+- New Features
+
+- backend
+
+- Add description field for all backends (Paul Stern)
+
+- build
+
+- Update to go1.22 and make go1.20 the minimum required version (Nick Craig-Wood)
+- Fix
CVE-2024-24786
by upgrading google.golang.org/protobuf
(Nick Craig-Wood)
+
+- check: Respect
--no-unicode-normalization
and --ignore-case-sync
for --checkfile
(nielash)
+- cmd: Much improved shell auto completion which reduces the size of the completion file and works faster (Nick Craig-Wood)
+- doc updates (albertony, ben-ba, Eli, emyarod, huajin tong, Jack Provance, kapitainsky, keongalvin, Nick Craig-Wood, nielash, rarspace01, rzitzer, Tera, Vincent Murphy)
+- fs: Add more detailed logging for file includes/excludes (Kyle Reynolds)
+- lsf
+
+- Add
--time-format
flag (nielash)
+- Make metadata appear for directories (Nick Craig-Wood)
+
+- lsjson: Make metadata appear for directories (Nick Craig-Wood)
+- rc
+
+- Add
srcFs
and dstFs
to core/stats
and core/transferred
stats (Nick Craig-Wood)
+- Add
operations/hashsum
to the rc as rclone hashsum
equivalent (Nick Craig-Wood)
+- Add
config/paths
to the rc as rclone config paths
equivalent (Nick Craig-Wood)
+
+- sync
+
+- Optionally report list of synced paths to file (nielash)
+- Implement directory sync for mod times and metadata (Nick Craig-Wood)
+- Don't set directory modtimes if already set (nielash)
+- Don't sync directory modtimes from backends which don't have directories (Nick Craig-Wood)
+
+
+- Bug Fixes
+
+- backend
+
+- Make backends which use oauth implement the
Shutdown
and shutdown the oauth properly (rkonfj)
+
+- bisync
+
+- Handle unicode and case normalization consistently (nielash)
+- Partial uploads known issue on
local
/ftp
/sftp
has been resolved (unless using --inplace
) (nielash)
+- Fixed handling of unicode normalization and case insensitivity, support for
--fix-case
, --ignore-case-sync
, --no-unicode-normalization
(nielash)
+- Bisync no longer fails to find the correct listing file when configs are overridden with backend-specific flags. (nielash)
+
+- nfsmount
+
+- Fix exit after external unmount (nielash)
+- Fix
--volname
being ignored (nielash)
+
+- operations
+
+- Fix renaming a file on macOS (nielash)
+- Fix case-insensitive moves in operations.Move (nielash)
+- Fix TestCaseInsensitiveMoveFileDryRun on chunker integration tests (nielash)
+- Fix TestMkdirModTime test (Nick Craig-Wood)
+- Fix TestSetDirModTime for backends with SetDirModTime but not Metadata (Nick Craig-Wood)
+- Fix typo in log messages (nielash)
+
+- serve nfs: Fix writing files via Finder on macOS (nielash)
+- serve restic: Fix error handling (Michael Eischer)
+- serve webdav: Fix
--baseurl
without leading / (Nick Craig-Wood)
+- stats: Fix race between ResetCounters and stopAverageLoop called from time.AfterFunc (Nick Craig-Wood)
+- sync
+
+--fix-case
flag to rename case insensitive dest (nielash)
+- Use operations.DirMove instead of sync.MoveDir for
--fix-case
(nielash)
+
+- systemd: Fix detection and switch to the coreos package everywhere rather than having 2 separate libraries (Anagh Kumar Baranwal)
+
+- Mount
+
+- Fix macOS not noticing errors with
--daemon
(Nick Craig-Wood)
+- Notice daemon dying much quicker (Nick Craig-Wood)
+
+- VFS
+
+- Fix unicode normalization on macOS (nielash)
+
+- Bisync
+
+- Copies and deletes are now handled in one operation instead of two (nielash)
+--track-renames
and --backup-dir
are now supported (nielash)
+- Final listings are now generated from sync results, to avoid needing to re-list (nielash)
+- Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes (nielash)
+- Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time. (nielash)
+- A few basic terminal colors are now supported, controllable with
--color
(AUTO
|NEVER
|ALWAYS
) (nielash)
+- Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as
check
and sync
, for performance improvements and less risk of error. (nielash)
+--resync
is now much more efficient (especially for users of --create-empty-src-dirs
) (nielash)
+- Google Docs (and other files of unknown size) are now supported (with the same options as in
sync
) (nielash)
+- Equality checks before a sync conflict rename now fall back to
cryptcheck
(when possible) or --download
, (nielash) instead of of --size-only
, when check
is not available.
+- Bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support. (nielash)
+- Bisync now supports a "Graceful Shutdown" mode to cleanly cancel a run early without requiring
--resync
. (nielash)
+- New
--recover
flag allows robust recovery in the event of interruptions, without requiring --resync
. (nielash)
+- A new
--max-lock
setting allows lock files to automatically renew and expire, for better automatic recovery when a run is interrupted. (nielash)
+- Bisync now supports auto-resolving sync conflicts and customizing rename behavior with new
--conflict-resolve
, --conflict-loser
, and --conflict-suffix
flags. (nielash)
+- A new
--resync-mode
flag allows more control over which version of a file gets kept during a --resync
. (nielash)
+- Bisync now supports
--retries
and --retries-sleep
(when --resilient
is set.) (nielash)
+- Clarify file operation directions in dry-run logs (Kyle Reynolds)
+
+- Local
+
+- Fix cleanRootPath on Windows after go1.21.4 stdlib update (nielash)
+- Implement setting modification time on directories (nielash)
+- Implement modtime and metadata for directories (Nick Craig-Wood)
+- Fix setting of btime on directories on Windows (Nick Craig-Wood)
+- Delete backend implementation of Purge to speed up and make stats (Nick Craig-Wood)
+- Support metadata setting and mapping on server side Move (Nick Craig-Wood)
+
+- Cache
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+
+- Crypt
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+- Improve handling of undecryptable file names (nielash)
+- Add missing error check spotted by linter (Nick Craig-Wood)
+
+- Azure Blob
+
+- Implement
--azureblob-delete-snapshots
(Nick Craig-Wood)
+
+- B2
+
+- Clarify exactly what
--b2-download-auth-duration
does in the docs (Nick Craig-Wood)
+
+- Chunker
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+
+- Combine
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+- Fix directory metadata error on upstream root (nielash)
+- Fix directory move across upstreams (nielash)
+
+- Compress
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+
+- Drive
+
+- Implement setting modification time on directories (nielash)
+- Implement modtime and metadata setting for directories (Nick Craig-Wood)
+- Support metadata setting and mapping on server side Move,Copy (Nick Craig-Wood)
+
+- FTP
+
+- Fix mkdir with rsftp which is returning the wrong code (Nick Craig-Wood)
+
+- Hasher
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+- Fix error from trying to stop an already-stopped db (nielash)
+- Look for cached hash if passed hash unexpectedly blank (nielash)
+
+- Imagekit
+
+- Updated docs and web content (Harshit Budhraja)
+- Updated overview - supported operations (Harshit Budhraja)
+
+- Mega
+
+- Fix panic with go1.22 (Nick Craig-Wood)
+
+- Netstorage
+
+- Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+
+- Onedrive
+
+- Add metadata support (nielash)
+
+- Opendrive
+
+- Fix moving file/folder within the same parent dir (nielash)
+
+- Oracle Object Storage
+
+- Support
backend restore
command (Nikhil Ahuja)
+- Support workload identity authentication for OKE (Anders Swanson)
+
+- Protondrive
+
+- Fix encoding of Root method (Nick Craig-Wood)
+
+- Quatrix
+
+- Fix
Content-Range
header (Volodymyr)
+- Add option to skip project folders (Oksana Zhykina)
+- Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+
+- S3
+
+- Add
--s3-version-deleted
to show delete markers in listings when using versions. (Nick Craig-Wood)
+- Add IPv6 support with option
--s3-use-dual-stack
(Anthony Metzidis)
+- Copy parts in parallel when doing chunked server side copy (Nick Craig-Wood)
+- GCS provider: fix server side copy of files bigger than 5G (Nick Craig-Wood)
+- Support metadata setting and mapping on server side Copy (Nick Craig-Wood)
+
+- Seafile
+
+- Fix download/upload error when
FILE_SERVER_ROOT
is relative (DanielEgbers)
+- Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+
+- SFTP
+
+- Implement setting modification time on directories (nielash)
+- Set directory modtimes update on write flag (Nick Craig-Wood)
+- Shorten wait delay for external ssh binaries now that we are using go1.20 (Nick Craig-Wood)
+
+- Swift
+
+- Avoid unnecessary container versioning check (Joe Cai)
+
+- Union
+
+- Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+- Implement setting metadata on directories (Nick Craig-Wood)
+
+- WebDAV
+
+- Reduce priority of chunks upload log (Gabriel Ramos)
+- owncloud: Add config
owncloud_exclude_shares
which allows to exclude shared files and folders when listing remote resources (Thomas Müller)
+
+
+v1.65.2 - 2024-01-24
+See commits
+
+- Bug Fixes
+
+- build: bump github.com/cloudflare/circl from 1.3.6 to 1.3.7 (dependabot)
+- docs updates (Nick Craig-Wood, kapitainsky, nielash, Tera, Harshit Budhraja)
+
+- VFS
+
+- Fix stale data when using
--vfs-cache-mode
full (Nick Craig-Wood)
+
+- Azure Blob
+
+- IMPORTANT Fix data corruption bug - see #7590 (Nick Craig-Wood)
+
+
+v1.65.1 - 2024-01-08
+See commits
+
+- Bug Fixes
+
+- build
+
+- Bump golang.org/x/crypto to fix ssh terrapin CVE-2023-48795 (dependabot)
+- Update to go1.21.5 to fix Windows path problems (Nick Craig-Wood)
+- Fix docker build on arm/v6 (Nick Craig-Wood)
+
+- install.sh: fix harmless error message on install (Nick Craig-Wood)
+- accounting: fix stats to show server side transfers (Nick Craig-Wood)
+- doc fixes (albertony, ben-ba, Eli Orzitzer, emyarod, keongalvin, rarspace01)
+- nfsmount: Compile for all unix oses, add
--sudo
and fix error/option handling (Nick Craig-Wood)
+- operations: Fix files moved by rclone move not being counted as transfers (Nick Craig-Wood)
+- oauthutil: Avoid panic when
*token
and *ts.token
are the same (rkonfj)
+- serve s3: Fix listing oddities (Nick Craig-Wood)
+
+- VFS
+
+- Note that
--vfs-refresh
runs in the background (Nick Craig-Wood)
+
+- Azurefiles
+
+- Fix storage base url (Oksana)
+
+- Crypt
+
+- Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+
+- Chunker
+
+- Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+
+- Compress
+
+- Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+
+- Dropbox
+
+- Fix used space on dropbox team accounts (Nick Craig-Wood)
+
+- FTP
+
+- Fix multi-thread copy (WeidiDeng)
+
+- Googlephotos
+
+- Fix nil pointer exception when batch failed (Nick Craig-Wood)
+
+- Hasher
+
+- Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+- Fix invalid memory address error when MaxAge == 0 (nielash)
+
+- Onedrive
+
+- Fix error listing: unknown object type
<nil>
(Nick Craig-Wood)
+- Fix "unauthenticated: Unauthenticated" errors when uploading (Nick Craig-Wood)
+
+- Oracleobjectstorage
+
+- Fix object storage endpoint for custom endpoints (Manoj Ghosh)
+- Multipart copy create bucket if it doesn't exist. (Manoj Ghosh)
+
+- Protondrive
+
+- Fix CVE-2023-45286 / GHSA-xwh9-gc39-5298 (Nick Craig-Wood)
+
+- S3
+
+- Fix crash if no UploadId in multipart upload (Nick Craig-Wood)
+
+- Smb
+
+- Fix shares not listed by updating go-smb2 (halms)
+
+- Union
+
+- Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+
+
v1.65.0 - 2023-11-26
See commits
@@ -43624,9 +44822,9 @@ $ tree /tmp/b
- Project started
Bugs and Limitations
-Limitations
-Directory timestamps aren't preserved
-Rclone doesn't currently preserve the timestamps of directories. This is because rclone only really considers objects when syncing.
+Limitations
+Directory timestamps aren't preserved on some backends
+As of v1.66
, rclone supports syncing directory modtimes, if the backend supports it. Some backends do not support it -- see overview for a complete list. Additionally, note that empty directories are not synced by default (this can be enabled with --create-empty-src-dirs
.)
Rclone struggles with millions of files in a directory/bucket
Currently rclone loads each directory/bucket entirely into memory before using it. Since each rclone object takes 0.5k-1k of memory this can take a very long time and use a large amount of memory.
Millions of files in a directory tends to occur on bucket-based remotes (e.g. S3 buckets) since those remotes do not segregate subdirectories within the bucket.
@@ -43798,7 +44996,7 @@ THE SOFTWARE.
- Scott McGillivray scott.mcgillivray@gmail.com
- Bjørn Erik Pedersen bjorn.erik.pedersen@gmail.com
- Lukas Loesche lukas@mesosphere.io
-- emyarod allllaboutyou@gmail.com
+- emyarod emyarod@users.noreply.github.com
- T.C. Ferguson tcf909@gmail.com
- Brandur brandur@mutelight.org
- Dario Giovannetti dev@dariogiovannetti.net
@@ -44546,6 +45744,27 @@ THE SOFTWARE.
- Alen Šiljak dev@alensiljak.eu.org
- 你知道未来吗 rkonfj@gmail.com
- Abhinav Dhiman 8640877+ahnv@users.noreply.github.com
+- halms 7513146+halms@users.noreply.github.com
+- ben-ba benjamin.brauner@gmx.de
+- Eli Orzitzer e_orz@yahoo.com
+- Anthony Metzidis anthony.metzidis@gmail.com
+- emyarod afw5059@gmail.com
+- keongalvin keongalvin@gmail.com
+- rarspace01 rarspace01@users.noreply.github.com
+- Paul Stern paulstern45@gmail.com
+- Nikhil Ahuja nikhilahuja@live.com
+- Harshit Budhraja 52413945+harshit-budhraja@users.noreply.github.com
+- Tera 24725862+teraa@users.noreply.github.com
+- Kyle Reynolds kylereynoldsdev@gmail.com
+- Michael Eischer michael.eischer@gmx.de
+- Thomas Müller 1005065+DeepDiver1975@users.noreply.github.com
+- DanielEgbers 27849724+DanielEgbers@users.noreply.github.com
+- Jack Provance 49460795+njprov@users.noreply.github.com
+- Gabriel Ramos 109390599+gabrielramos02@users.noreply.github.com
+- Dan McArdle d@nmcardle.com
+- Joe Cai joe.cai@bigcommerce.com
+- Anders Swanson anders.swanson@oracle.com
+- huajin tong 137764712+thirdkeyword@users.noreply.github.com
Forum
diff --git a/MANUAL.md b/MANUAL.md
index 13bee7b75..1570bb645 100644
--- a/MANUAL.md
+++ b/MANUAL.md
@@ -1,6 +1,6 @@
% rclone(1) User Manual
% Nick Craig-Wood
-% Nov 26, 2023
+% Mar 10, 2024
# Rclone syncs your files to cloud storage
@@ -89,6 +89,7 @@ Rclone helps you:
- Can use multi-threaded downloads to local disk
- [Copy](https://rclone.org/commands/rclone_copy/) new or changed files to cloud storage
- [Sync](https://rclone.org/commands/rclone_sync/) (one way) to make a directory identical
+- [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
- [Move](https://rclone.org/commands/rclone_move/) files to cloud storage deleting the local after verification
- [Check](https://rclone.org/commands/rclone_check/) hashes and for missing/extra files
- [Mount](https://rclone.org/commands/rclone_mount/) your cloud storage as a network disk
@@ -104,7 +105,6 @@ WebDAV or S3, that work out of the box.)
- 1Fichier
- Akamai Netstorage
- Alibaba Cloud (Aliyun) Object Storage System (OSS)
-- Amazon Drive
- Amazon S3
- Backblaze B2
- Box
@@ -127,6 +127,7 @@ WebDAV or S3, that work out of the box.)
- Hetzner Storage Box
- HiDrive
- HTTP
+- ImageKit
- Internet Archive
- Jottacloud
- IBM COS S3
@@ -856,7 +857,6 @@ See the following for detailed instructions for
* [1Fichier](https://rclone.org/fichier/)
* [Akamai Netstorage](https://rclone.org/netstorage/)
* [Alias](https://rclone.org/alias/)
- * [Amazon Drive](https://rclone.org/amazonclouddrive/)
* [Amazon S3](https://rclone.org/s3/)
* [Backblaze B2](https://rclone.org/b2/)
* [Box](https://rclone.org/box/)
@@ -1039,6 +1039,15 @@ recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
+
+Rclone will sync the modification times of files and directories if
+the backend supports it. If metadata syncing is required then use the
+`--metadata` flag.
+
+Note that the modification time and metadata for the root directory
+will **not** be synced. See https://github.com/rclone/rclone/issues/7652
+for more info.
+
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics.
**Note**: Use the `--dry-run` or the `--interactive`/`-i` flag to test without copying anything.
@@ -1070,7 +1079,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1084,6 +1093,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -1185,11 +1195,56 @@ the destination from the sync with a filter rule or by putting an
exclude-if-present file inside the destination directory and sync to a
destination that is inside the source directory.
+Rclone will sync the modification times of files and directories if
+the backend supports it. If metadata syncing is required then use the
+`--metadata` flag.
+
+Note that the modification time and metadata for the root directory
+will **not** be synced. See https://github.com/rclone/rclone/issues/7652
+for more info.
+
**Note**: Use the `-P`/`--progress` flag to view real-time transfer statistics
**Note**: Use the `rclone dedupe` command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info.
+# Logger Flags
+
+The `--differ`, `--missing-on-dst`, `--missing-on-src`, `--match` and `--error` flags write paths, one per line, to the file name (or
+stdout if it is `-`) supplied. What they write is described in the
+help below. For example `--differ` will write all paths which are
+present on both the source and destination but different.
+
+The `--combined` flag will write a file (or stdout) which contains all
+file paths with a symbol and then a space and then the path to tell
+you what happened to it. These are reminiscent of diff files.
+
+- `= path` means path was found in source and destination and was identical
+- `- path` means path was missing on the source, so only in the destination
+- `+ path` means path was missing on the destination, so only in the source
+- `* path` means path was present in source and destination but different.
+- `! path` means there was an error reading or hashing the source or dest.
+
+The `--dest-after` flag writes a list file using the same format flags
+as [`lsf`](https://rclone.org/commands/rclone_lsf/#synopsis) (including [customizable options
+for hash, modtime, etc.](https://rclone.org/commands/rclone_lsf/#synopsis))
+Conceptually it is similar to rsync's `--itemize-changes`, but not identical
+-- it should output an accurate list of what will be on the destination
+after the sync.
+
+Note that these logger flags have a few limitations, and certain scenarios
+are not currently supported:
+
+- `--max-duration` / `CutoffModeHard`
+- `--compare-dest` / `--copy-dest`
+- server-side moves of an entire dir at once
+- High-level retries, because there would be duplicates (use `--retries 1` to disable)
+- Possibly some unusual error scenarios
+
+Note also that each file is logged during the sync, as opposed to after, so it
+is most useful as a predictor of what SHOULD happen to each file
+(which may or may not match what actually DID.)
+
```
rclone sync source:path dest:path [flags]
@@ -1198,8 +1253,24 @@ rclone sync source:path dest:path [flags]
## Options
```
+ --absolute Put a leading / in front of path names
+ --combined string Make a combined report of changes to this file
--create-empty-src-dirs Create empty source dirs on destination after sync
+ --csv Output in CSV format
+ --dest-after string Report all files that exist on the dest post-sync
+ --differ string Report all non-matching files to this file
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --error string Report all files with errors (hashing or reading) to this file
+ --files-only Only list files (default true)
+ -F, --format string Output format - see lsf help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
-h, --help help for sync
+ --match string Report all matching files to this file
+ --missing-on-dst string Report all files missing from the destination to this file
+ --missing-on-src string Report all files missing from the source to this file
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
```
@@ -1217,7 +1288,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1231,6 +1302,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -1250,6 +1322,7 @@ Flags just used for `rclone sync`.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -1345,6 +1418,14 @@ whether rclone lists the destination directory or not. Supplying this
option when moving a small number of files into a large destination
can speed transfers up greatly.
+Rclone will sync the modification times of files and directories if
+the backend supports it. If metadata syncing is required then use the
+`--metadata` flag.
+
+Note that the modification time and metadata for the root directory
+will **not** be synced. See https://github.com/rclone/rclone/issues/7652
+for more info.
+
**Important**: Since this can cause data loss, test first with the
`--dry-run` or the `--interactive`/`-i` flag.
@@ -1378,7 +1459,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1392,6 +1473,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -2783,6 +2865,11 @@ On each successive run it will:
Changes include `New`, `Newer`, `Older`, and `Deleted` files.
- Propagate changes on Path1 to Path2, and vice-versa.
+Bisync is **in beta** and is considered an **advanced command**, so use with care.
+Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
+(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
+or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
+
See [full bisync description](https://rclone.org/bisync/) for details.
@@ -2793,20 +2880,31 @@ rclone bisync remote1:path1 remote2:path2 [flags]
## Options
```
- --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
- --check-filename string Filename for --check-access (default: RCLONE_TEST)
- --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
- --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
- --filters-file string Read filtering patterns from a file
- --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
- -h, --help help for bisync
- --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
- --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
- -1, --resync Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.
- --workdir string Use custom working dir - useful for testing. (default: $HOME/.cache/rclone/bisync)
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
```
@@ -2824,7 +2922,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -2838,6 +2936,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -3956,7 +4055,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -3970,6 +4069,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -4036,7 +4136,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
# rclone copyurl
-Copy url content to dest.
+Copy the contents of the URL supplied content to dest:path.
## Synopsis
@@ -4044,11 +4144,14 @@ Copy url content to dest.
Download a URL's content and copy it to the destination without saving
it in temporary storage.
-Setting `--auto-filename` will attempt to automatically determine the filename from the URL
-(after any redirections) and used in the destination path.
-With `--auto-filename-header` in
-addition, if a specific filename is set in HTTP headers, it will be used instead of the name from the URL.
-With `--print-filename` in addition, the resulting file name will be printed.
+Setting `--auto-filename` will attempt to automatically determine the
+filename from the URL (after any redirections) and used in the
+destination path.
+
+With `--auto-filename-header` in addition, if a specific filename is
+set in HTTP headers, it will be used instead of the name from the URL.
+With `--print-filename` in addition, the resulting file name will be
+printed.
Setting `--no-clobber` will prevent overwriting file on the
destination if there is one with the same name.
@@ -4056,6 +4159,17 @@ destination if there is one with the same name.
Setting `--stdout` or making the output file name `-`
will cause the output to be written to standard output.
+## Troublshooting
+
+If you can't get `rclone copyurl` to work then here are some things you can try:
+
+- `--disable-http2` rclone will use HTTP2 if available - try disabling it
+- `--bind 0.0.0.0` rclone will use IPv6 if available - try disabling it
+- `--bind ::0` to disable IPv4
+- `--user agent curl` - some sites have whitelists for curl's user-agent - try that
+- Make sure the site works with `curl` directly
+
+
```
rclone copyurl https://example.com dest:path [flags]
@@ -4627,7 +4741,7 @@ List all the remotes in the config file and defined in environment variables.
rclone listremotes lists all the available remotes from the config file.
-When used with the `--long` flag it lists the types too.
+When used with the `--long` flag it lists the types and the descriptions too.
```
@@ -4638,7 +4752,7 @@ rclone listremotes [flags]
```
-h, --help help for listremotes
- --long Show the type as well as names
+ --long Show the type and the description as well as names
```
@@ -4750,6 +4864,19 @@ those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
+The default time format is `'2006-01-02 15:04:05'`.
+[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the `--time-format` flag.
+Examples:
+
+ rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
+ rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
+ rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
+ rclone lsf remote:path --format pt --time-format RFC3339
+ rclone lsf remote:path --format pt --time-format DateOnly
+ rclone lsf remote:path --format pt --time-format max
+`--time-format max` will automatically truncate '`2006-01-02 15:04:05.000000000`'
+to the maximum precision supported by the remote.
+
Any of the filtering options can be applied to this command.
@@ -4781,16 +4908,17 @@ rclone lsf remote:path [flags]
## Options
```
- --absolute Put a leading / in front of path names
- --csv Output in CSV format
- -d, --dir-slash Append a slash to directory names (default true)
- --dirs-only Only list directories
- --files-only Only list files
- -F, --format string Output format - see help for details (default "p")
- --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
- -h, --help help for lsf
- -R, --recursive Recurse into the listing
- -s, --separator string Separator for the items in the format (default ";")
+ --absolute Put a leading / in front of path names
+ --csv Output in CSV format
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --files-only Only list files
+ -F, --format string Output format - see help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
+ -h, --help help for lsf
+ -R, --recursive Recurse into the listing
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
```
@@ -5271,12 +5399,21 @@ Mounting on macOS can be done either via [built-in NFS server](https://rclone.or
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
which "mounts" via an NFSv4 local server.
-# NFS mount
+#### Unicode Normalization
+
+It is highly recommended to keep the default of `--no-unicode-normalization=false`
+for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
+
+### NFS mount
This method spins up an NFS server using [serve nfs](https://rclone.org/commands/rclone_serve_nfs/) command and mounts
it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
send SIGTERM signal to the rclone process using |kill| command to stop the mount.
+Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
+This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
+but consider lowering this limit if the server's system resource usage causes problems.
+
### macFUSE Notes
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
@@ -5304,15 +5441,6 @@ As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
to update the modification time of the file. This may make rclone upload a full new copy
of the file.
-
-#### Unicode Normalization
-
-Rclone includes flags for unicode normalization with macFUSE that should be updated
-for FUSE-T. See [this forum post](https://forum.rclone.org/t/some-unicode-forms-break-mount-on-macos-with-fuse-t/36403)
-and [FUSE-T issue #16](https://github.com/macos-fuse-t/fuse-t/issues/16). The following
-flag should be added to the `rclone mount` command.
-
- -o modules=iconv,from_code=UTF-8,to_code=UTF-8
#### Read Only mounts
@@ -5785,6 +5913,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -5843,6 +5993,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -5855,7 +6006,7 @@ rclone mount remote:path /path/to/mountpoint [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -5963,7 +6114,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -5977,6 +6128,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -6164,6 +6316,925 @@ See the [global flags page](https://rclone.org/flags/) for global options not li
* [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends.
+# rclone nfsmount
+
+Mount the remote as file system on a mountpoint.
+
+## Synopsis
+
+rclone nfsmount allows Linux, FreeBSD, macOS and Windows to
+mount any of Rclone's cloud storage systems as a file system with
+FUSE.
+
+First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
+
+On Linux and macOS, you can run mount in either foreground or background (aka
+daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
+to force background mode. On Windows you can run mount in foreground only,
+the flag is ignored.
+
+In background mode rclone acts as a generic Unix mount program: the main
+program starts, spawns background rclone process to setup and maintain the
+mount, waits until success or timeout and exits with appropriate code
+(killing the child process if it fails).
+
+On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
+is an **empty** **existing** directory:
+
+ rclone nfsmount remote:path/to/files /path/to/local/mount
+
+On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
+for details. If foreground mount is used interactively from a console window,
+rclone will serve the mount and occupy the console so another window should be
+used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
+
+The following examples will mount to an automatically assigned drive,
+to specific drive letter `X:`, to path `C:\path\parent\mount`
+(where parent directory or drive must exist, and mount must **not** exist,
+and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
+the last example will mount as network share `\\cloud\remote` and map it to an
+automatically assigned drive:
+
+ rclone nfsmount remote:path/to/files *
+ rclone nfsmount remote:path/to/files X:
+ rclone nfsmount remote:path/to/files C:\path\parent\mount
+ rclone nfsmount remote:path/to/files \\cloud\remote
+
+When the program ends while in foreground mode, either via Ctrl+C or receiving
+a SIGINT or SIGTERM signal, the mount should be automatically stopped.
+
+When running in background mode the user will have to stop the mount manually:
+
+ # Linux
+ fusermount -u /path/to/local/mount
+ # OS X
+ umount /path/to/local/mount
+
+The umount operation can fail, for example when the mountpoint is busy.
+When that happens, it is the user's responsibility to stop the mount manually.
+
+The size of the mounted file system will be set according to information retrieved
+from the remote, the same as returned by the [rclone about](https://rclone.org/commands/rclone_about/)
+command. Remotes with unlimited storage may report the used size only,
+then an additional 1 PiB of free space is assumed. If the remote does not
+[support](https://rclone.org/overview/#optional-features) the about feature
+at all, then 1 PiB is set as both the total and the free size.
+
+## Installing on Windows
+
+To run rclone nfsmount on Windows, you will need to
+download and install [WinFsp](http://www.secfs.net/winfsp/).
+
+[WinFsp](https://github.com/winfsp/winfsp) is an open-source
+Windows File System Proxy which makes it easy to write user space file
+systems for Windows. It provides a FUSE emulation layer which rclone
+uses combination with [cgofuse](https://github.com/winfsp/cgofuse).
+Both of these packages are by Bill Zissimopoulos who was very helpful
+during the implementation of rclone nfsmount for Windows.
+
+### Mounting modes on windows
+
+Unlike other operating systems, Microsoft Windows provides a different filesystem
+type for network and fixed drives. It optimises access on the assumption fixed
+disk drives are fast and reliable, while network drives have relatively high latency
+and less reliability. Some settings can also be differentiated between the two types,
+for example that Windows Explorer should just display icons and not create preview
+thumbnails for image and video files on network drives.
+
+In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
+However, you can also choose to mount it as a remote network drive, often described
+as a network share. If you mount an rclone remote using the default, fixed drive mode
+and experience unexpected program errors, freezes or other issues, consider mounting
+as a network drive instead.
+
+When mounting as a fixed disk drive you can either mount to an unused drive letter,
+or to a path representing a **nonexistent** subdirectory of an **existing** parent
+directory or drive. Using the special value `*` will tell rclone to
+automatically assign the next available drive letter, starting with Z: and moving backward.
+Examples:
+
+ rclone nfsmount remote:path/to/files *
+ rclone nfsmount remote:path/to/files X:
+ rclone nfsmount remote:path/to/files C:\path\parent\mount
+ rclone nfsmount remote:path/to/files X:
+
+Option `--volname` can be used to set a custom volume name for the mounted
+file system. The default is to use the remote name and path.
+
+To mount as network drive, you can add option `--network-mode`
+to your nfsmount command. Mounting to a directory path is not supported in
+this mode, it is a limitation Windows imposes on junctions, so the remote must always
+be mounted to a drive letter.
+
+ rclone nfsmount remote:path/to/files X: --network-mode
+
+A volume name specified with `--volname` will be used to create the network share path.
+A complete UNC path, such as `\\cloud\remote`, optionally with path
+`\\cloud\remote\madeup\path`, will be used as is. Any other
+string will be used as the share part, after a default prefix `\\server\`.
+If no volume name is specified then `\\server\share` will be used.
+You must make sure the volume name is unique when you are mounting more than one drive,
+or else the mount command will fail. The share name will treated as the volume label for
+the mapped drive, shown in Windows Explorer etc, while the complete
+`\\server\share` will be reported as the remote UNC path by
+`net use` etc, just like a normal network drive mapping.
+
+If you specify a full network share UNC path with `--volname`, this will implicitly
+set the `--network-mode` option, so the following two examples have same result:
+
+ rclone nfsmount remote:path/to/files X: --network-mode
+ rclone nfsmount remote:path/to/files X: --volname \\server\share
+
+You may also specify the network share UNC path as the mountpoint itself. Then rclone
+will automatically assign a drive letter, same as with `*` and use that as
+mountpoint, and instead use the UNC path specified as the volume name, as if it were
+specified with the `--volname` option. This will also implicitly set
+the `--network-mode` option. This means the following two examples have same result:
+
+ rclone nfsmount remote:path/to/files \\cloud\remote
+ rclone nfsmount remote:path/to/files * --volname \\cloud\remote
+
+There is yet another way to enable network mode, and to set the share path,
+and that is to pass the "native" libfuse/WinFsp option directly:
+`--fuse-flag --VolumePrefix=\server\share`. Note that the path
+must be with just a single backslash prefix in this case.
+
+
+*Note:* In previous versions of rclone this was the only supported method.
+
+[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
+
+See also [Limitations](#limitations) section below.
+
+### Windows filesystem permissions
+
+The FUSE emulation layer on Windows must convert between the POSIX-based
+permission model used in FUSE, and the permission model used in Windows,
+based on access-control lists (ACL).
+
+The mounted filesystem will normally get three entries in its access-control list (ACL),
+representing permissions for the POSIX permission scopes: Owner, group and others.
+By default, the owner and group will be taken from the current user, and the built-in
+group "Everyone" will be used to represent others. The user/group can be customized
+with FUSE options "UserName" and "GroupName",
+e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
+The permissions on each entry will be set according to [options](#options)
+`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
+[numeric notation](https://en.wikipedia.org/wiki/File-system_permissions#Numeric_notation).
+
+The default permissions corresponds to `--file-perms 0666 --dir-perms 0777`,
+i.e. read and write permissions to everyone. This means you will not be able
+to start any programs from the mount. To be able to do that you must add
+execute permissions, e.g. `--file-perms 0777 --dir-perms 0777` to add it
+to everyone. If the program needs to write files, chances are you will
+have to enable [VFS File Caching](#vfs-file-caching) as well (see also
+[limitations](#limitations)). Note that the default write permission have
+some restrictions for accounts other than the owner, specifically it lacks
+the "write extended attributes", as explained next.
+
+The mapping of permissions is not always trivial, and the result you see in
+Windows Explorer may not be exactly like you expected. For example, when setting
+a value that includes write access for the group or others scope, this will be
+mapped to individual permissions "write attributes", "write data" and
+"append data", but not "write extended attributes". Windows will then show this
+as basic permission "Special" instead of "Write", because "Write" also covers
+the "write extended attributes" permission. When setting digit 0 for group or
+others, to indicate no permissions, they will still get individual permissions
+"read attributes", "read extended attributes" and "read permissions". This is
+done for compatibility reasons, e.g. to allow users without additional
+permissions to be able to read basic metadata about files like in Unix.
+
+WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
+that allows the complete specification of file security descriptors using
+[SDDL](https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format).
+With this you get detailed control of the resulting permissions, compared
+to use of the POSIX permissions described above, and no additional permissions
+will be added automatically for compatibility with Unix. Some example use
+cases will following.
+
+If you set POSIX permissions for only allowing access to the owner,
+using `--file-perms 0600 --dir-perms 0700`, the user group and the built-in
+"Everyone" group will still be given some special permissions, as described
+above. Some programs may then (incorrectly) interpret this as the file being
+accessible by everyone, for example an SSH client may warn about "unprotected
+private key file". You can work around this by specifying
+`-o FileSecurity="D:P(A;;FA;;;OW)"`, which sets file all access (FA) to the
+owner (OW), and nothing else.
+
+When setting write permissions then, except for the owner, this does not
+include the "write extended attributes" permission, as mentioned above.
+This may prevent applications from writing to files, giving permission denied
+error instead. To set working write permissions for the built-in "Everyone"
+group, similar to what it gets by default but with the addition of the
+"write extended attributes", you can specify
+`-o FileSecurity="D:P(A;;FRFW;;;WD)"`, which sets file read (FR) and file
+write (FW) to everyone (WD). If file execute (FX) is also needed, then change
+to `-o FileSecurity="D:P(A;;FRFWFX;;;WD)"`, or set file all access (FA) to
+get full access permissions, including delete, with
+`-o FileSecurity="D:P(A;;FA;;;WD)"`.
+
+### Windows caveats
+
+Drives created as Administrator are not visible to other accounts,
+not even an account that was elevated to Administrator with the
+User Account Control (UAC) feature. A result of this is that if you mount
+to a drive letter from a Command Prompt run as Administrator, and then try
+to access the same drive from Windows Explorer (which does not run as
+Administrator), you will not be able to see the mounted drive.
+
+If you don't need to access the drive from applications running with
+administrative privileges, the easiest way around this is to always
+create the mount from a non-elevated command prompt.
+
+To make mapped drives available to the user account that created them
+regardless if elevated or not, there is a special Windows setting called
+[linked connections](https://docs.microsoft.com/en-us/troubleshoot/windows-client/networking/mapped-drives-not-available-from-elevated-command#detail-to-configure-the-enablelinkedconnections-registry-entry)
+that can be enabled.
+
+It is also possible to make a drive mount available to everyone on the system,
+by running the process creating it as the built-in SYSTEM account.
+There are several ways to do this: One is to use the command-line
+utility [PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec),
+from Microsoft's Sysinternals suite, which has option `-s` to start
+processes as the SYSTEM account. Another alternative is to run the mount
+command from a Windows Scheduled Task, or a Windows Service, configured
+to run as the SYSTEM account. A third alternative is to use the
+[WinFsp.Launcher infrastructure](https://github.com/winfsp/winfsp/wiki/WinFsp-Service-Architecture)).
+Read more in the [install documentation](https://rclone.org/install/).
+Note that when running rclone as another user, it will not use
+the configuration file from your profile unless you tell it to
+with the [`--config`](https://rclone.org/docs/#config-config-file) option.
+Note also that it is now the SYSTEM account that will have the owner
+permissions, and other accounts will have permissions according to the
+group or others scopes. As mentioned above, these will then not get the
+"write extended attributes" permission, and this may prevent writing to
+files. You can work around this with the FileSecurity option, see
+example above.
+
+Note that mapping to a directory path, instead of a drive letter,
+does not suffer from the same limitations.
+
+## Mounting on macOS
+
+Mounting on macOS can be done either via [built-in NFS server](https://rclone.org/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
+(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
+FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
+which "mounts" via an NFSv4 local server.
+
+#### Unicode Normalization
+
+It is highly recommended to keep the default of `--no-unicode-normalization=false`
+for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
+
+### NFS mount
+
+This method spins up an NFS server using [serve nfs](https://rclone.org/commands/rclone_serve_nfs/) command and mounts
+it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
+send SIGTERM signal to the rclone process using |kill| command to stop the mount.
+
+Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
+This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
+but consider lowering this limit if the server's system resource usage causes problems.
+
+### macFUSE Notes
+
+If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
+the website, rclone will locate the macFUSE libraries without any further intervention.
+If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
+the following addition steps are required.
+
+ sudo mkdir /usr/local/lib
+ cd /usr/local/lib
+ sudo ln -s /opt/local/lib/libfuse.2.dylib
+
+### FUSE-T Limitations, Caveats, and Notes
+
+There are some limitations, caveats, and notes about how it works. These are current as
+of FUSE-T version 1.0.14.
+
+#### ModTime update on read
+
+As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
+
+> File access and modification times cannot be set separately as it seems to be an
+> issue with the NFS client which always modifies both. Can be reproduced with
+> 'touch -m' and 'touch -a' commands
+
+This means that viewing files with various tools, notably macOS Finder, will cause rlcone
+to update the modification time of the file. This may make rclone upload a full new copy
+of the file.
+
+#### Read Only mounts
+
+When mounting with `--read-only`, attempts to write to files will fail *silently* as
+opposed to with a clear warning as in macFUSE.
+
+## Limitations
+
+Without the use of `--vfs-cache-mode` this can only write files
+sequentially, it can only seek when reading. This means that many
+applications won't work with their files on an rclone mount without
+`--vfs-cache-mode writes` or `--vfs-cache-mode full`.
+See the [VFS File Caching](#vfs-file-caching) section for more info.
+When using NFS mount on macOS, if you don't specify |--vfs-cache-mode|
+the mount point will be read-only.
+
+The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2)
+do not support the concept of empty directories, so empty
+directories will have a tendency to disappear once they fall out of
+the directory cache.
+
+When `rclone mount` is invoked on Unix with `--daemon` flag, the main rclone
+program will wait for the background mount to become ready or until the timeout
+specified by the `--daemon-wait` flag. On Linux it can check mount status using
+ProcFS so the flag in fact sets **maximum** time to wait, while the real wait
+can be less. On macOS / BSD the time to wait is constant and the check is
+performed only at the end. We advise you to set wait time on macOS reasonably.
+
+Only supported on Linux, FreeBSD, OS X and Windows at the moment.
+
+## rclone nfsmount vs rclone sync/copy
+
+File systems expect things to be 100% reliable, whereas cloud storage
+systems are a long way from 100% reliable. The rclone sync/copy
+commands cope with this with lots of retries. However rclone nfsmount
+can't use retries in the same way without making local copies of the
+uploads. Look at the [VFS File Caching](#vfs-file-caching)
+for solutions to make nfsmount more reliable.
+
+## Attribute caching
+
+You can use the flag `--attr-timeout` to set the time the kernel caches
+the attributes (size, modification time, etc.) for directory entries.
+
+The default is `1s` which caches files just long enough to avoid
+too many callbacks to rclone from the kernel.
+
+In theory 0s should be the correct value for filesystems which can
+change outside the control of the kernel. However this causes quite a
+few problems such as
+[rclone using too much memory](https://github.com/rclone/rclone/issues/2157),
+[rclone not serving files to samba](https://forum.rclone.org/t/rclone-1-39-vs-1-40-mount-issue/5112)
+and [excessive time listing directories](https://github.com/rclone/rclone/issues/2095#issuecomment-371141147).
+
+The kernel can cache the info about a file for the time given by
+`--attr-timeout`. You may see corruption if the remote file changes
+length during this window. It will show up as either a truncated file
+or a file with garbage on the end. With `--attr-timeout 1s` this is
+very unlikely but not impossible. The higher you set `--attr-timeout`
+the more likely it is. The default setting of "1s" is the lowest
+setting which mitigates the problems above.
+
+If you set it higher (`10s` or `1m` say) then the kernel will call
+back to rclone less often making it more efficient, however there is
+more chance of the corruption issue above.
+
+If files don't change on the remote outside of the control of rclone
+then there is no chance of corruption.
+
+This is the same as setting the attr_timeout option in mount.fuse.
+
+## Filters
+
+Note that all the rclone filters can be used to select a subset of the
+files to be visible in the mount.
+
+## systemd
+
+When running rclone nfsmount as a systemd service, it is possible
+to use Type=notify. In this case the service will enter the started state
+after the mountpoint has been successfully set up.
+Units having the rclone nfsmount service specified as a requirement
+will see all files and folders immediately in this mode.
+
+Note that systemd runs mount units without any environment variables including
+`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
+and you should provide `--config` and `--cache-dir` explicitly as absolute
+paths via rclone arguments.
+Since mounting requires the `fusermount` program, rclone will use the fallback
+PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
+is present on this PATH.
+
+## Rclone as Unix mount helper
+
+The core Unix program `/bin/mount` normally takes the `-t FSTYPE` argument
+then runs the `/sbin/mount.FSTYPE` helper program passing it mount options
+as `-o key=val,...` or `--opt=...`. Automount (classic or systemd) behaves
+in a similar way.
+
+rclone by default expects GNU-style flags `--key val`. To run it as a mount
+helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
+`/usr/bin/rclonefs`, e.g. `ln -s /usr/bin/rclone /sbin/mount.rclone`.
+rclone will detect it and translate command-line arguments appropriately.
+
+Now you can run classic mounts like this:
+```
+mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
+```
+
+or create systemd mount units:
+```
+# /etc/systemd/system/mnt-data.mount
+[Unit]
+Description=Mount for /mnt/data
+[Mount]
+Type=rclone
+What=sftp1:subdir
+Where=/mnt/data
+Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+```
+
+optionally accompanied by systemd automount unit
+```
+# /etc/systemd/system/mnt-data.automount
+[Unit]
+Description=AutoMount for /mnt/data
+[Automount]
+Where=/mnt/data
+TimeoutIdleSec=600
+[Install]
+WantedBy=multi-user.target
+```
+
+or add in `/etc/fstab` a line like
+```
+sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
+```
+
+or use classic Automountd.
+Remember to provide explicit `config=...,cache-dir=...` as a workaround for
+mount units being run without `HOME`.
+
+Rclone in the mount helper mode will split `-o` argument(s) by comma, replace `_`
+by `-` and prepend `--` to get the command-line flags. Options containing commas
+or spaces can be wrapped in single or double quotes. Any inner quotes inside outer
+quotes of the same type should be doubled.
+
+Mount option syntax includes a few extra options treated specially:
+
+- `env.NAME=VALUE` will set an environment variable for the mount process.
+ This helps with Automountd and Systemd.mount which don't allow setting
+ custom environment for mount helpers.
+ Typically you will use `env.HTTPS_PROXY=proxy.host:3128` or `env.HOME=/root`
+- `command=cmount` can be used to run `cmount` or any other rclone command
+ rather than the default `mount`.
+- `args2env` will pass mount options to the mount helper running in background
+ via environment variables instead of command line arguments. This allows to
+ hide secrets from such commands as `ps` or `pgrep`.
+- `vv...` will be transformed into appropriate `--verbose=N`
+- standard mount options like `x-systemd.automount`, `_netdev`, `nosuid` and alike
+ are intended only for Automountd and ignored by rclone.
+## VFS - Virtual File System
+
+This command uses the VFS layer. This adapts the cloud storage objects
+that rclone uses into something which looks much more like a disk
+filing system.
+
+Cloud storage objects have lots of properties which aren't like disk
+files - you can't extend them or write to the middle of them, so the
+VFS layer has to deal with that. Because there is no one right way of
+doing this there are various options explained below.
+
+The VFS layer also implements a directory cache - this caches info
+about files and directories (but not the data) in memory.
+
+## VFS Directory Cache
+
+Using the `--dir-cache-time` flag, you can control how long a
+directory should be considered up to date and not refreshed from the
+backend. Changes made through the VFS will appear immediately or
+invalidate the cache.
+
+ --dir-cache-time duration Time to cache directory entries for (default 5m0s)
+ --poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
+
+However, changes made directly on the cloud storage by the web
+interface or a different copy of rclone will only be picked up once
+the directory cache expires if the backend configured does not support
+polling for changes. If the backend supports polling, changes will be
+picked up within the polling interval.
+
+You can send a `SIGHUP` signal to rclone for it to flush all
+directory caches, regardless of how old they are. Assuming only one
+rclone instance is running, you can reset the cache like this:
+
+ kill -SIGHUP $(pidof rclone)
+
+If you configure rclone with a [remote control](/rc) then you can use
+rclone rc to flush the whole directory cache:
+
+ rclone rc vfs/forget
+
+Or individual files or directories:
+
+ rclone rc vfs/forget file=path/to/file dir=path/to/dir
+
+## VFS File Buffering
+
+The `--buffer-size` flag determines the amount of memory,
+that will be used to buffer data in advance.
+
+Each open file will try to keep the specified amount of data in memory
+at all times. The buffered data is bound to one open file and won't be
+shared.
+
+This flag is a upper limit for the used memory per open file. The
+buffer will only use memory for data that is downloaded but not not
+yet read. If the buffer is empty, only a small amount of memory will
+be used.
+
+The maximum memory used by rclone for buffering can be up to
+`--buffer-size * open files`.
+
+## VFS File Caching
+
+These flags control the VFS file caching options. File caching is
+necessary to make the VFS layer appear compatible with a normal file
+system. It can be disabled at the cost of some compatibility.
+
+For example you'll need to enable VFS caching if you want to read and
+write simultaneously to a file. See below for more details.
+
+Note that the VFS cache is separate from the cache backend and you may
+find that you need one or the other or both.
+
+ --cache-dir string Directory rclone will use for caching.
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
+
+If run with `-vv` rclone will print the location of the file cache. The
+files are stored in the user cache file area which is OS dependent but
+can be controlled with `--cache-dir` or setting the appropriate
+environment variable.
+
+The cache has 4 different modes selected by `--vfs-cache-mode`.
+The higher the cache mode the more compatible rclone becomes at the
+cost of using disk space.
+
+Note that files are written back to the remote only when they are
+closed and if they haven't been accessed for `--vfs-write-back`
+seconds. If rclone is quit or dies with files that haven't been
+uploaded, these will be uploaded next time rclone is run with the same
+flags.
+
+If using `--vfs-cache-max-size` or `--vfs-cache-min-free-size` note
+that the cache may exceed these quotas for two reasons. Firstly
+because it is only checked every `--vfs-cache-poll-interval`. Secondly
+because open files cannot be evicted from the cache. When
+`--vfs-cache-max-size` or `--vfs-cache-min-free-size` is exceeded,
+rclone will attempt to evict the least accessed files from the cache
+first. rclone will start with files that haven't been accessed for the
+longest. This cache flushing strategy is efficient and more relevant
+files are likely to remain cached.
+
+The `--vfs-cache-max-age` will evict files from the cache
+after the set time since last access has passed. The default value of
+1 hour will start evicting files from cache that haven't been accessed
+for 1 hour. When a cached file is accessed the 1 hour timer is reset to 0
+and will wait for 1 more hour before evicting. Specify the time with
+standard notation, s, m, h, d, w .
+
+You **should not** run two copies of rclone using the same VFS cache
+with the same or overlapping remotes if using `--vfs-cache-mode > off`.
+This can potentially cause data corruption if you do. You can work
+around this by giving each rclone its own cache hierarchy with
+`--cache-dir`. You don't need to worry about this if the remotes in
+use don't overlap.
+
+### --vfs-cache-mode off
+
+In this mode (the default) the cache will read directly from the remote and write
+directly to the remote without caching anything on disk.
+
+This will mean some operations are not possible
+
+ * Files can't be opened for both read AND write
+ * Files opened for write can't be seeked
+ * Existing files opened for write must have O_TRUNC set
+ * Files open for read with O_TRUNC will be opened write only
+ * Files open for write only will behave as if O_TRUNC was supplied
+ * Open modes O_APPEND, O_TRUNC are ignored
+ * If an upload fails it can't be retried
+
+### --vfs-cache-mode minimal
+
+This is very similar to "off" except that files opened for read AND
+write will be buffered to disk. This means that files opened for
+write will be a lot more compatible, but uses the minimal disk space.
+
+These operations are not possible
+
+ * Files opened for write only can't be seeked
+ * Existing files opened for write must have O_TRUNC set
+ * Files opened for write only will ignore O_APPEND, O_TRUNC
+ * If an upload fails it can't be retried
+
+### --vfs-cache-mode writes
+
+In this mode files opened for read only are still read directly from
+the remote, write only and read/write files are buffered to disk
+first.
+
+This mode should support all normal file system operations.
+
+If an upload fails it will be retried at exponentially increasing
+intervals up to 1 minute.
+
+### --vfs-cache-mode full
+
+In this mode all reads and writes are buffered to and from disk. When
+data is read from the remote this is buffered to disk as well.
+
+In this mode the files in the cache will be sparse files and rclone
+will keep track of which bits of the files it has downloaded.
+
+So if an application only reads the starts of each file, then rclone
+will only buffer the start of the file. These files will appear to be
+their full size in the cache, but they will be sparse files with only
+the data that has been downloaded present in them.
+
+This mode should support all normal file system operations and is
+otherwise identical to `--vfs-cache-mode` writes.
+
+When reading a file rclone will read `--buffer-size` plus
+`--vfs-read-ahead` bytes ahead. The `--buffer-size` is buffered in memory
+whereas the `--vfs-read-ahead` is buffered on disk.
+
+When using this mode it is recommended that `--buffer-size` is not set
+too large and `--vfs-read-ahead` is set large if required.
+
+**IMPORTANT** not all file systems support sparse files. In particular
+FAT/exFAT do not. Rclone will perform very badly if the cache
+directory is on a filesystem which doesn't support sparse files and it
+will log an ERROR message if one is detected.
+
+### Fingerprinting
+
+Various parts of the VFS use fingerprinting to see if a local file
+copy has changed relative to a remote file. Fingerprints are made
+from:
+
+- size
+- modification time
+- hash
+
+where available on an object.
+
+On some backends some of these attributes are slow to read (they take
+an extra API call per object, or extra work per object).
+
+For example `hash` is slow with the `local` and `sftp` backends as
+they have to read the entire file and hash it, and `modtime` is slow
+with the `s3`, `swift`, `ftp` and `qinqstor` backends because they
+need to do an extra API call to fetch it.
+
+If you use the `--vfs-fast-fingerprint` flag then rclone will not
+include the slow operations in the fingerprint. This makes the
+fingerprinting less accurate but much faster and will improve the
+opening time of cached files.
+
+If you are running a vfs cache over `local`, `s3` or `swift` backends
+then using this flag is recommended.
+
+Note that if you change the value of this flag, the fingerprints of
+the files in the cache may be invalidated and the files will need to
+be downloaded again.
+
+## VFS Chunked Reading
+
+When rclone reads files from a remote it reads them in chunks. This
+means that rather than requesting the whole file rclone reads the
+chunk specified. This can reduce the used download quota for some
+remotes by requesting only chunks from the remote that are actually
+read, at the cost of an increased number of requests.
+
+These flags control the chunking:
+
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
+ --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
+
+Rclone will start reading a chunk of size `--vfs-read-chunk-size`,
+and then double the size for each read. When `--vfs-read-chunk-size-limit` is
+specified, and greater than `--vfs-read-chunk-size`, the chunk size for each
+open file will get doubled only until the specified value is reached. If the
+value is "off", which is the default, the limit is disabled and the chunk size
+will grow indefinitely.
+
+With `--vfs-read-chunk-size 100M` and `--vfs-read-chunk-size-limit 0`
+the following parts will be downloaded: 0-100M, 100M-200M, 200M-300M, 300M-400M and so on.
+When `--vfs-read-chunk-size-limit 500M` is specified, the result would be
+0-100M, 100M-300M, 300M-700M, 700M-1200M, 1200M-1700M and so on.
+
+Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading.
+
+## VFS Performance
+
+These flags may be used to enable/disable features of the VFS for
+performance or other reasons. See also the [chunked reading](#vfs-chunked-reading)
+feature.
+
+In particular S3 and Swift benefit hugely from the `--no-modtime` flag
+(or use `--use-server-modtime` for a slightly different effect) as each
+read of the modification time takes a transaction.
+
+ --no-checksum Don't compare checksums on up/download.
+ --no-modtime Don't read/write the modification time (can speed things up).
+ --no-seek Don't allow seeking in files.
+ --read-only Only allow read-only access.
+
+Sometimes rclone is delivered reads or writes out of order. Rather
+than seeking rclone will wait a short time for the in sequence read or
+write to come in. These flags only come into effect when not using an
+on disk cache file.
+
+ --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
+
+When using VFS write caching (`--vfs-cache-mode` with value writes or full),
+the global flag `--transfers` can be set to adjust the number of parallel uploads of
+modified files from the cache (the related global flag `--checkers` has no effect on the VFS).
+
+ --transfers int Number of file transfers to run in parallel (default 4)
+
+## VFS Case Sensitivity
+
+Linux file systems are case-sensitive: two files can differ only
+by case, and the exact case must be used when opening a file.
+
+File systems in modern Windows are case-insensitive but case-preserving:
+although existing files can be opened using any case, the exact case used
+to create the file is preserved and available for programs to query.
+It is not allowed for two files in the same directory to differ only by case.
+
+Usually file systems on macOS are case-insensitive. It is possible to make macOS
+file systems case-sensitive but that is not the default.
+
+The `--vfs-case-insensitive` VFS flag controls how rclone handles these
+two cases. If its value is "false", rclone passes file names to the remote
+as-is. If the flag is "true" (or appears without a value on the
+command line), rclone may perform a "fixup" as explained below.
+
+The user may specify a file name to open/delete/rename/etc with a case
+different than what is stored on the remote. If an argument refers
+to an existing file with exactly the same name, then the case of the existing
+file on the disk will be used. However, if a file name with exactly the same
+name is not found but a name differing only by case exists, rclone will
+transparently fixup the name. This fixup happens only when an existing file
+is requested. Case sensitivity of file names created anew by rclone is
+controlled by the underlying remote.
+
+Note that case sensitivity of the operating system running rclone (the target)
+may differ from case sensitivity of a file system presented by rclone (the source).
+The flag controls whether "fixup" is performed to satisfy the target.
+
+If the flag is not provided on the command line, then its default value depends
+on the operating system where rclone runs: "true" on Windows and macOS, "false"
+otherwise. If the flag is provided without a value, then it is "true".
+
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
+## VFS Disk Options
+
+This flag allows you to manually set the statistics about the filing system.
+It can be useful when those statistics cannot be read correctly automatically.
+
+ --vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
+
+## Alternate report of used bytes
+
+Some backends, most notably S3, do not report the amount of bytes used.
+If you need this information to be available when running `df` on the
+filesystem, then pass the flag `--vfs-used-is-size` to rclone.
+With this flag set, instead of relying on the backend to report this
+information, rclone will scan the whole remote similar to `rclone size`
+and compute the total used space itself.
+
+_WARNING._ Contrary to `rclone size`, this flag ignores filters so that the
+result is accurate. However, this is very inefficient and may cost lots of API
+calls resulting in extra charges. Use it as a last resort and only with caching.
+
+
+```
+rclone nfsmount remote:path /path/to/mountpoint [flags]
+```
+
+## Options
+
+```
+ --addr string IPaddress:Port or :Port to bind server to
+ --allow-non-empty Allow mounting over a non-empty directory (not supported on Windows)
+ --allow-other Allow access to other users (not supported on Windows)
+ --allow-root Allow access to root user (not supported on Windows)
+ --async-read Use asynchronous reads (not supported on Windows) (default true)
+ --attr-timeout Duration Time for which file/directory attributes are cached (default 1s)
+ --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)
+ --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s)
+ --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
+ --debug-fuse Debug the FUSE internals - needs -v
+ --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
+ --devname string Set the device name - default is remote:path
+ --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
+ --dir-perms FileMode Directory permissions (default 0777)
+ --file-perms FileMode File permissions (default 0666)
+ --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)
+ --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
+ -h, --help help for nfsmount
+ --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
+ --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
+ --no-checksum Don't compare checksums on up/download
+ --no-modtime Don't read/write the modification time (can speed things up)
+ --no-seek Don't allow seeking in files
+ --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true)
+ --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only)
+ -o, --option stringArray Option for libfuse/WinFsp (repeat if required)
+ --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
+ --read-only Only allow read-only access
+ --sudo Use sudo to run the mount command as root.
+ --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
+ --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-case-insensitive If a file name not found, find a case insensitive match
+ --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
+ --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
+ --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
+ --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
+ --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
+ --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
+ --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+ --volname string Set the volume name (supported on Windows and OSX only)
+ --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)
+```
+
+
+## Filter Options
+
+Flags for filtering directory listings.
+
+```
+ --delete-excluded Delete files on dest excluded from sync
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --ignore-case Ignore case in filters (case insensitive)
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+```
+
+See the [global flags page](https://rclone.org/flags/) for global options not listed here.
+
+# SEE ALSO
+
+* [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends.
+
# rclone obscure
Obscure password for use in the rclone config file.
@@ -7052,6 +8123,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -7097,6 +8190,7 @@ rclone serve dlna remote:path [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -7109,7 +8203,7 @@ rclone serve dlna remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -7506,6 +8600,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -7569,6 +8685,7 @@ rclone serve docker [flags]
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -7581,7 +8698,7 @@ rclone serve docker [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -7962,6 +9079,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -7994,7 +9133,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
ignored.
There is an example program
-[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
+[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
in the rclone source code.
The program's job is to take a `user` and `pass` on the input and turn
@@ -8091,6 +9230,7 @@ rclone serve ftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -8103,7 +9243,7 @@ rclone serve ftp remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -8583,6 +9723,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -8615,7 +9777,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
ignored.
There is an example program
-[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
+[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
in the rclone source code.
The program's job is to take a `user` and `pass` on the input and turn
@@ -8721,6 +9883,7 @@ rclone serve http remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -8733,7 +9896,7 @@ rclone serve http remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -8794,7 +9957,9 @@ NFS mount over local network, you need to specify the listening address and port
Modifying files through NFS protocol requires VFS caching. Usually you will need to specify `--vfs-cache-mode`
in order to be able to write to the mountpoint (full is recommended). If you don't specify VFS cache mode,
-the mount will be read-only.
+the mount will be read-only. Note also that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the caching handler.
+This should not be set too low or you may experience errors when trying to access files. The default is `1000000`, but consider lowering this limit if
+the server's system resource usage causes problems.
To serve NFS over the network use following command:
@@ -9121,6 +10286,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -9155,6 +10342,7 @@ rclone serve nfs remote:path [flags]
--file-perms FileMode File permissions (default 0666)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for nfs
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
@@ -9162,6 +10350,7 @@ rclone serve nfs remote:path [flags]
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -9174,7 +10363,7 @@ rclone serve nfs remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -9894,6 +11083,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -9948,6 +11159,7 @@ rclone serve s3 remote:path [flags]
--server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -9960,7 +11172,7 @@ rclone serve s3 remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -10371,6 +11583,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -10403,7 +11637,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
ignored.
There is an example program
-[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
+[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
in the rclone source code.
The program's job is to take a `user` and `pass` on the input and turn
@@ -10500,6 +11734,7 @@ rclone serve sftp remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -10512,7 +11747,7 @@ rclone serve sftp remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -11021,6 +12256,28 @@ If the flag is not provided on the command line, then its default value depends
on the operating system where rclone runs: "true" on Windows and macOS, "false"
otherwise. If the flag is provided without a value, then it is "true".
+The `--no-unicode-normalization` flag controls whether a similar "fixup" is
+performed for filenames that differ but are [canonically
+equivalent](https://en.wikipedia.org/wiki/Unicode_equivalence) with respect to
+unicode. Unicode normalization can be particularly helpful for users of macOS,
+which prefers form NFD instead of the NFC used by most other platforms. It is
+therefore highly recommended to keep the default of `false` on macOS, to avoid
+encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the `--vfs-block-norm-dupes`
+flag allows hiding these duplicates. This comes with a performance tradeoff, as
+rclone will have to scan the entire directory for duplicates when listing a
+directory. For this reason, it is recommended to leave this disabled if not
+needed. However, macOS users may wish to consider using it, as otherwise, if a
+remote directory contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the mount,
+and both will appear to be editable, however, editing either version will
+actually result in only the NFD version getting edited under the hood. `--vfs-block-
+norm-dupes` prevents this confusion by detecting this scenario, hiding the
+duplicates, and logging an error, similar to how this is handled in `rclone
+sync`.
+
## VFS Disk Options
This flag allows you to manually set the statistics about the filing system.
@@ -11053,7 +12310,7 @@ together, if `--auth-proxy` is set the authorized keys option will be
ignored.
There is an example program
-[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/test_proxy.py)
+[bin/test_proxy.py](https://github.com/rclone/rclone/blob/master/bin/test_proxy.py)
in the rclone source code.
The program's job is to take a `user` and `pass` on the input and turn
@@ -11161,6 +12418,7 @@ rclone serve webdav remote:path [flags]
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -11173,7 +12431,7 @@ rclone serve webdav remote:path [flags]
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -11978,18 +13236,21 @@ This can be used when scripting to make aged backups efficiently, e.g.
## Metadata support {#metadata}
-Metadata is data about a file which isn't the contents of the file.
-Normally rclone only preserves the modification time and the content
-(MIME) type where possible.
+Metadata is data about a file (or directory) which isn't the contents
+of the file (or directory). Normally rclone only preserves the
+modification time and the content (MIME) type where possible.
-Rclone supports preserving all the available metadata on files (not
-directories) when using the `--metadata` or `-M` flag.
+Rclone supports preserving all the available metadata on files and
+directories when using the `--metadata` or `-M` flag.
Exactly what metadata is supported and what that support means depends
on the backend. Backends that support metadata have a metadata section
in their docs and are listed in the [features table](https://rclone.org/overview/#features)
(Eg [local](https://rclone.org/local/#metadata), [s3](/s3/#metadata))
+Some backends don't support metadata, some only support metadata on
+files and some support metadata on both files and directories.
+
Rclone only supports a one-time sync of metadata. This means that
metadata will be synced from the source object to the destination
object only when the source object has changed and needs to be
@@ -12010,6 +13271,14 @@ The [--metadata-mapper](#metadata-mapper) flag can be used to pass the
name of a program in which can transform metadata when it is being
copied from source to destination.
+Rclone supports `--metadata-set` and `--metadata-mapper` when doing
+sever side `Move` and server side `Copy`, but not when doing server
+side `DirMove` (renaming a directory) as this would involve recursing
+into the directory. Note that you can disable `DirMove` with
+`--disable DirMove` and rclone will revert back to using `Move` for
+each individual object where `--metadata-set` and `--metadata-mapper`
+are supported.
+
### Types of metadata
Metadata is divided into two type. System metadata and User metadata.
@@ -12639,6 +13908,26 @@ triggering follow-on actions if data was copied, or skipping if not.
NB: Enabling this option turns a usually non-fatal error into a potentially
fatal one - please check and adjust your scripts accordingly!
+### --fix-case ###
+
+Normally, a sync to a case insensitive dest (such as macOS / Windows) will
+not result in a matching filename if the source and dest filenames have
+casing differences but are otherwise identical. For example, syncing `hello.txt`
+to `HELLO.txt` will normally result in the dest filename remaining `HELLO.txt`.
+If `--fix-case` is set, then `HELLO.txt` will be renamed to `hello.txt`
+to match the source.
+
+NB:
+- directory names with incorrect casing will also be fixed
+- `--fix-case` will be ignored if `--immutable` is set
+- using `--local-case-sensitive` instead is not advisable;
+it will cause `HELLO.txt` to get deleted!
+- the old dest filename must not be excluded by filters.
+Be especially careful with [`--files-from`](https://rclone.org/filtering/#files-from-read-list-of-source-file-names),
+which does not respect [`--ignore-case`](https://rclone.org/filtering/#ignore-case-make-searches-case-insensitive)!
+- on remotes that do not support server-side move, `--fix-case` will require
+downloading the file and re-uploading it. To avoid this, do not use `--fix-case`.
+
### --fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5
@@ -13072,10 +14361,10 @@ some context for the `Metadata` which may be important.
- `SrcFsType` is the name of the source backend.
- `DstFs` is the config string for the remote that the object is being copied to
- `DstFsType` is the name of the destination backend.
-- `Remote` is the path of the file relative to the root.
-- `Size`, `MimeType`, `ModTime` are attributes of the file.
+- `Remote` is the path of the object relative to the root.
+- `Size`, `MimeType`, `ModTime` are attributes of the object.
- `IsDir` is `true` if this is a directory (not yet implemented).
-- `ID` is the source `ID` of the file if known.
+- `ID` is the source `ID` of the object if known.
- `Metadata` is the backend specific metadata as described in the backend docs.
```json
@@ -13145,7 +14434,7 @@ json.dump(o, sys.stdout, indent="\t")
```
You can find this example (slightly expanded) in the rclone source code at
-[bin/test_metadata_mapper.py](https://github.com/rclone/rclone/blob/master/test_metadata_mapper.py).
+[bin/test_metadata_mapper.py](https://github.com/rclone/rclone/blob/master/bin/test_metadata_mapper.py).
If you want to see the input to the metadata mapper and the output
returned from it in the log you can use `-vv --dump mapper`.
@@ -13205,7 +14494,7 @@ use multiple threads to transfer the file (default 256M).
Capable backends are marked in the
[overview](https://rclone.org/overview/#optional-features) as `MultithreadUpload`. (They
-need to implement either the `OpenWriterAt` or `OpenChunkedWriter`
+need to implement either the `OpenWriterAt` or `OpenChunkWriter`
internal interfaces). These include include, `local`, `s3`,
`azureblob`, `b2`, `oracleobjectstorage` and `smb` at the time of
writing.
@@ -13318,6 +14607,11 @@ files if they are incorrect as it would normally.
This can be used if the remote is being synced with another tool also
(e.g. the Google Drive client).
+### --no-update-dir-modtime ###
+
+When using this flag, rclone won't update modification times of remote
+directories if they are incorrect as it would normally.
+
### --order-by string ###
The `--order-by` flag controls the order in which files in the backlog
@@ -14415,7 +15709,7 @@ For more help and alternate methods see: https://rclone.org/remote_setup/
Execute the following on the machine with the web browser (same rclone
version recommended):
- rclone authorize "amazon cloud drive"
+ rclone authorize "dropbox"
Then paste the result below:
result>
@@ -14424,7 +15718,7 @@ result>
Then on your main desktop machine
```
-rclone authorize "amazon cloud drive"
+rclone authorize "dropbox"
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
@@ -15024,7 +16318,7 @@ E.g. for an alternative `filter-file.txt`:
- *
Files `file1.jpg`, `file3.png` and `file2.avi` are listed whilst
-`secret17.jpg` and files without the suffix .jpg` or `.png` are excluded.
+`secret17.jpg` and files without the suffix `.jpg` or `.png` are excluded.
E.g. for an alternative `filter-file.txt`:
@@ -16001,6 +17295,26 @@ See the [config password](https://rclone.org/commands/rclone_config_password/) c
**Authentication is required for this call.**
+### config/paths: Reads the config file path and other important paths. {#config-paths}
+
+Returns a JSON object with the following keys:
+
+- config: path to config file
+- cache: path to root of cache directory
+- temp: path to root of temporary directory
+
+Eg
+
+ {
+ "cache": "/home/USER/.cache/rclone",
+ "config": "/home/USER/.rclone.conf",
+ "temp": "/tmp"
+ }
+
+See the [config paths](https://rclone.org/commands/rclone_config_paths/) command for more information on the above.
+
+**Authentication is required for this call.**
+
### config/providers: Shows how providers are configured in the config file. {#config-providers}
Returns a JSON object:
@@ -16786,6 +18100,50 @@ This command does not have a command line equivalent so use this instead:
rclone rc --loopback operations/fsinfo fs=remote:
+### operations/hashsum: Produces a hashsum file for all the objects in the path. {#operations-hashsum}
+
+Produces a hash file for all the objects in the path using the hash
+named. The output is in the same format as the standard
+md5sum/sha1sum tool.
+
+This takes the following parameters:
+
+- fs - a remote name string e.g. "drive:" for the source, "/" for local filesystem
+ - this can point to a file and just that file will be returned in the listing.
+- hashType - type of hash to be used
+- download - check by downloading rather than with hash (boolean)
+- base64 - output the hashes in base64 rather than hex (boolean)
+
+If you supply the download flag, it will download the data from the
+remote and create the hash on the fly. This can be useful for remotes
+that don't support the given hash or if you really want to check all
+the data.
+
+Note that if you wish to supply a checkfile to check hashes against
+the current files then you should use operations/check instead of
+operations/hashsum.
+
+Returns:
+
+- hashsum - array of strings of the hashes
+- hashType - type of hash used
+
+Example:
+
+ $ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
+ {
+ "hashType": "md5",
+ "hashsum": [
+ "WTSVLpuiXyJO_kGzJerRLg== backend-versions.sh",
+ "v1b_OlWCJO9LtNq3EIKkNQ== bisect-go-rclone.sh",
+ "VHbmHzHh4taXzgag8BAIKQ== bisect-rclone.sh",
+ ]
+ }
+
+See the [hashsum](https://rclone.org/commands/rclone_hashsum/) command for more information on the above.
+
+**Authentication is required for this call.**
+
### operations/list: List the given remote and path in JSON format {#operations-list}
This takes the following parameters:
@@ -17152,7 +18510,9 @@ This takes the following parameters
- ignoreListingChecksum - Do not use checksums for listings
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
Use at your own risk!
-- workdir - server directory for history files (default: /home/ncw/.cache/rclone/bisync)
+- workdir - server directory for history files (default: `~/.cache/rclone/bisync`)
+- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
- noCleanup - retain working files
See [bisync command help](https://rclone.org/commands/rclone_bisync/)
@@ -17552,7 +18912,6 @@ Here is an overview of the major features of each cloud storage system.
| ---------------------------- |:-----------------:|:-------:|:----------------:|:---------------:|:---------:|:--------:|
| 1Fichier | Whirlpool | - | No | Yes | R | - |
| Akamai Netstorage | MD5, SHA256 | R/W | No | No | R | - |
-| Amazon Drive | MD5 | - | Yes | No | R | - |
| Amazon S3 (or S3 compatible) | MD5 | R/W | No | No | R/W | RWU |
| Backblaze B2 | SHA1 | R/W | No | No | R/W | - |
| Box | SHA1 | R/W | Yes | No | - | - |
@@ -17561,7 +18920,7 @@ Here is an overview of the major features of each cloud storage system.
| Enterprise File Fabric | - | R/W | Yes | No | R/W | - |
| FTP | - | R/W ¹⁰ | No | No | - | - |
| Google Cloud Storage | MD5 | R/W | No | No | R/W | - |
-| Google Drive | MD5, SHA1, SHA256 | R/W | No | Yes | R/W | - |
+| Google Drive | MD5, SHA1, SHA256 | DR/W | No | Yes | R/W | DRWU |
| Google Photos | - | - | No | Yes | R | - |
| HDFS | - | R/W | No | No | - | - |
| HiDrive | HiDrive ¹² | R/W | No | No | - | - |
@@ -17575,7 +18934,7 @@ Here is an overview of the major features of each cloud storage system.
| Memory | MD5 | R/W | No | No | - | - |
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
| Microsoft Azure Files Storage | MD5 | R/W | Yes | No | R/W | - |
-| Microsoft OneDrive | QuickXorHash ⁵ | R/W | Yes | No | R | - |
+| Microsoft OneDrive | QuickXorHash ⁵ | DR/W | Yes | No | R | DRW |
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
@@ -17587,7 +18946,7 @@ Here is an overview of the major features of each cloud storage system.
| QingStor | MD5 | - ⁹ | No | No | R/W | - |
| Quatrix by Maytech | - | R/W | No | No | - | - |
| Seafile | - | - | No | No | - | - |
-| SFTP | MD5, SHA1 ² | R/W | Depends | No | - | - |
+| SFTP | MD5, SHA1 ² | DR/W | Depends | No | - | - |
| Sia | - | - | No | No | - | - |
| SMB | - | R/W | Yes | No | - | - |
| SugarSync | - | - | No | No | - | - |
@@ -17596,7 +18955,7 @@ Here is an overview of the major features of each cloud storage system.
| WebDAV | MD5, SHA1 ³ | R ⁴ | Depends | No | - | - |
| Yandex Disk | MD5 | R/W | No | No | R | - |
| Zoho WorkDrive | - | - | No | No | - | - |
-| The local filesystem | All | R/W | Depends | No | - | RWU |
+| The local filesystem | All | DR/W | Depends | No | - | DRWU |
¹ Dropbox supports [its own custom
hash](https://www.dropbox.com/developers/reference/content-hash).
@@ -17650,13 +19009,21 @@ systems they must support a common hash type.
Almost all cloud storage systems store some sort of timestamp
on objects, but several of them not something that is appropriate
to use for syncing. E.g. some backends will only write a timestamp
-that represent the time of the upload. To be relevant for syncing
+that represents the time of the upload. To be relevant for syncing
it should be able to store the modification time of the source
object. If this is not the case, rclone will only check the file
size by default, though can be configured to check the file hash
(with the `--checksum` flag). Ideally it should also be possible to
change the timestamp of an existing file without having to re-upload it.
+| Key | Explanation |
+|-----|-------------|
+| `-` | ModTimes not supported - times likely the upload time |
+| `R` | ModTimes supported on files but can't be changed without re-upload |
+| `R/W` | Read and Write ModTimes fully supported on files |
+| `DR` | ModTimes supported on files and directories but can't be changed without re-upload |
+| `DR/W` | Read and Write ModTimes fully supported on files and directories |
+
Storage systems with a `-` in the ModTime column, means the
modification read on objects is not the modification time of the
file when uploaded. It is most likely the time the file was uploaded,
@@ -17678,6 +19045,9 @@ in a `mount` will be silently ignored.
Storage systems with `R/W` (for read/write) in the ModTime column,
means they do also support modtime-only operations.
+Storage systems with `D` in the ModTime column means that the
+following symbols apply to directories as well as files.
+
### Case Insensitive ###
If a cloud storage systems is case sensitive then it is possible to
@@ -17990,9 +19360,12 @@ The levels of metadata support are
| Key | Explanation |
|-----|-------------|
-| `R` | Read only System Metadata |
-| `RW` | Read and write System Metadata |
-| `RWU` | Read and write System Metadata and read and write User Metadata |
+| `R` | Read only System Metadata on files only|
+| `RW` | Read and write System Metadata on files only|
+| `RWU` | Read and write System Metadata and read and write User Metadata on files only|
+| `DR` | Read only System Metadata on files and directories |
+| `DRW` | Read and write System Metadata on files and directories|
+| `DRWU` | Read and write System Metadata and read and write User Metadata on files and directories |
See [the metadata docs](https://rclone.org/docs/#metadata) for more info.
@@ -18005,7 +19378,6 @@ upon backend-specific capabilities.
| ---------------------------- |:-----:|:----:|:----:|:-------:|:-------:|:-----:|:------------:|:------------------|:------------:|:-----:|:--------:|
| 1Fichier | No | Yes | Yes | No | No | No | No | No | Yes | No | Yes |
| Akamai Netstorage | Yes | No | No | No | No | Yes | Yes | No | No | No | Yes |
-| Amazon Drive | Yes | No | Yes | Yes | No | No | No | No | No | No | Yes |
| Amazon S3 (or S3 compatible) | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
| Backblaze B2 | No | Yes | No | No | Yes | Yes | Yes | Yes | Yes | No | No |
| Box | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
@@ -18019,6 +19391,7 @@ upon backend-specific capabilities.
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes |
| HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes |
| HTTP | No | No | No | No | No | No | No | No | No | No | Yes |
+| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes |
| Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No |
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes |
| Koofr | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes |
@@ -18048,7 +19421,7 @@ upon backend-specific capabilities.
| WebDAV | Yes | Yes | Yes | Yes | No | No | Yes ³ | No | No | Yes | Yes |
| Yandex Disk | Yes | Yes | Yes | Yes | Yes | No | Yes | No | Yes | Yes | Yes |
| Zoho WorkDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes |
-| The local filesystem | Yes | No | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
+| The local filesystem | No | No | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes |
¹ Note Swift implements this in order to delete directory markers but
it doesn't actually have a quicker way of deleting files other than
@@ -18167,7 +19540,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -18181,6 +19554,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -18201,6 +19575,7 @@ Flags just used for `rclone sync`.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -18256,7 +19631,7 @@ General networking and HTTP stuff.
--tpslimit float Limit HTTP transactions per second to this
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
--use-cookies Enable session cookiejar
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.65.0")
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.66.0")
```
@@ -18440,14 +19815,7 @@ Flags to control the Remote Control API.
Backend only flags. These can be set in the config file also.
```
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-description string Description of the remote
--alias-remote string Remote or path to alias
--azureblob-access-tier string Access tier of blob: hot, cool, cold or archive
--azureblob-account string Azure Storage Account Name
@@ -18458,6 +19826,8 @@ Backend only flags. These can be set in the config file also.
--azureblob-client-id string The ID of the client in use
--azureblob-client-secret string One of the service principal's client secrets
--azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-delete-snapshots string Set to specify how to deal with snapshots on blob deletion
+ --azureblob-description string Description of the remote
--azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
--azureblob-disable-checksum Don't store MD5 checksum with object metadata
--azureblob-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
@@ -18488,6 +19858,7 @@ Backend only flags. These can be set in the config file also.
--azurefiles-client-secret string One of the service principal's client secrets
--azurefiles-client-send-certificate-chain Send the certificate chain when using certificate auth
--azurefiles-connection-string string Azure Files Connection String
+ --azurefiles-description string Description of the remote
--azurefiles-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot)
--azurefiles-endpoint string Endpoint for the service
--azurefiles-env-auth Read credentials from runtime (environment variables, CLI or MSI)
@@ -18507,8 +19878,9 @@ Backend only flags. These can be set in the config file also.
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-description string Description of the remote
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-auth-duration Duration Time before the public link authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
--b2-download-url string Custom endpoint for downloads
--b2-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--b2-endpoint string Endpoint for the service
@@ -18527,6 +19899,7 @@ Backend only flags. These can be set in the config file also.
--box-client-id string OAuth Client Id
--box-client-secret string OAuth Client Secret
--box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-description string Description of the remote
--box-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
--box-impersonate string Impersonate this user ID when using a service account
--box-list-chunk int Size of listing chunk 1-1000 (default 1000)
@@ -18543,6 +19916,7 @@ Backend only flags. These can be set in the config file also.
--cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-description string Description of the remote
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
--cache-plex-password string The password of the Plex user (obscured)
@@ -18556,15 +19930,19 @@ Backend only flags. These can be set in the config file also.
--cache-workers int How many workers should run in parallel to download chunks (default 4)
--cache-writes Cache file data on writes through the FS
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-description string Description of the remote
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
--chunker-hash-type string Choose how chunker handles hash sums (default "md5")
--chunker-remote string Remote to chunk/unchunk
+ --combine-description string Description of the remote
--combine-upstreams SpaceSepList Upstreams for combining
+ --compress-description string Description of the remote
--compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
--compress-remote string Remote to compress
-L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-description string Description of the remote
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
--crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
--crypt-filename-encryption string How to encrypt the filenames (default "standard")
@@ -18575,6 +19953,7 @@ Backend only flags. These can be set in the config file also.
--crypt-remote string Remote to encrypt/decrypt
--crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
--crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-strict-names If set, this will raise an error when crypt comes across a filename that can't be decrypted
--crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs
@@ -18584,6 +19963,7 @@ Backend only flags. These can be set in the config file also.
--drive-client-id string Google Application Client Id
--drive-client-secret string OAuth Client Secret
--drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-description string Description of the remote
--drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding Encoding The encoding for the backend (default InvalidUtf8)
--drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
@@ -18632,6 +20012,7 @@ Backend only flags. These can be set in the config file also.
--dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
--dropbox-client-id string OAuth Client Id
--dropbox-client-secret string OAuth Client Secret
+ --dropbox-description string Description of the remote
--dropbox-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
--dropbox-impersonate string Impersonate this user when using a business account
--dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
@@ -18641,10 +20022,12 @@ Backend only flags. These can be set in the config file also.
--dropbox-token-url string Token server url
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
--fichier-cdn Set if you wish to use CDN download links
+ --fichier-description string Description of the remote
--fichier-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
--fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
--fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
--fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-description string Description of the remote
--filefabric-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--filefabric-permanent-token string Permanent Authentication Token
--filefabric-root-folder-id string ID of the root folder
@@ -18655,6 +20038,7 @@ Backend only flags. These can be set in the config file also.
--ftp-ask-password Allow asking for FTP password when needed
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-description string Description of the remote
--ftp-disable-epsv Disable using EPSV even if server advertises support
--ftp-disable-mlsd Disable using MLSD even if server advertises support
--ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
@@ -18680,6 +20064,7 @@ Backend only flags. These can be set in the config file also.
--gcs-client-id string OAuth Client Id
--gcs-client-secret string OAuth Client Secret
--gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-description string Description of the remote
--gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
--gcs-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gcs-endpoint string Endpoint for the service
@@ -18700,6 +20085,7 @@ Backend only flags. These can be set in the config file also.
--gphotos-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
--gphotos-client-id string OAuth Client Id
--gphotos-client-secret string OAuth Client Secret
+ --gphotos-description string Description of the remote
--gphotos-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gphotos-include-archived Also view and download archived media
--gphotos-read-only Set to make the Google Photos backend read only
@@ -18708,10 +20094,12 @@ Backend only flags. These can be set in the config file also.
--gphotos-token string OAuth Access Token as a JSON blob
--gphotos-token-url string Token server url
--hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-description string Description of the remote
--hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
--hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
--hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
--hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-description string Description of the remote
--hdfs-encoding Encoding The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
--hdfs-namenode CommaSepList Hadoop name nodes and ports
--hdfs-service-principal-name string Kerberos service principal name for the namenode
@@ -18720,6 +20108,7 @@ Backend only flags. These can be set in the config file also.
--hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
--hidrive-client-id string OAuth Client Id
--hidrive-client-secret string OAuth Client Secret
+ --hidrive-description string Description of the remote
--hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
--hidrive-encoding Encoding The encoding for the backend (default Slash,Dot)
--hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
@@ -18730,10 +20119,12 @@ Backend only flags. These can be set in the config file also.
--hidrive-token-url string Token server url
--hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
--hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-description string Description of the remote
--http-headers CommaSepList Set HTTP headers for all transactions
--http-no-head Don't use HEAD requests
--http-no-slash Set this if the site doesn't end directories with /
--http-url string URL of HTTP host to connect to
+ --imagekit-description string Description of the remote
--imagekit-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket)
--imagekit-endpoint string You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
--imagekit-only-signed Restrict unsigned image URLs If you have configured Restrict unsigned image URLs in your dashboard settings, set this to true
@@ -18742,6 +20133,7 @@ Backend only flags. These can be set in the config file also.
--imagekit-upload-tags string Tags to add to the uploaded files, e.g. "tag1,tag2"
--imagekit-versions Include old versions in directory listings
--internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-description string Description of the remote
--internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
--internetarchive-encoding Encoding The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
--internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
@@ -18751,6 +20143,7 @@ Backend only flags. These can be set in the config file also.
--jottacloud-auth-url string Auth server URL
--jottacloud-client-id string OAuth Client Id
--jottacloud-client-secret string OAuth Client Secret
+ --jottacloud-description string Description of the remote
--jottacloud-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
@@ -18759,6 +20152,7 @@ Backend only flags. These can be set in the config file also.
--jottacloud-token-url string Token server url
--jottacloud-trashed-only Only show files that are in the trash
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-description string Description of the remote
--koofr-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--koofr-endpoint string The Koofr API endpoint to use
--koofr-mountid string Mount ID of the mount to use
@@ -18766,10 +20160,12 @@ Backend only flags. These can be set in the config file also.
--koofr-provider string Choose your storage provider
--koofr-setmtime Does the backend support setting modification time (default true)
--koofr-user string Your user name
+ --linkbox-description string Description of the remote
--linkbox-token string Token from https://www.linkbox.to/admin/account
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-case-insensitive Force the filesystem to report itself as case insensitive
--local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-description string Description of the remote
--local-encoding Encoding The encoding for the backend (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload
--local-no-preallocate Disable preallocation of disk space for transferred files
@@ -18782,6 +20178,7 @@ Backend only flags. These can be set in the config file also.
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
--mailru-client-id string OAuth Client Id
--mailru-client-secret string OAuth Client Secret
+ --mailru-description string Description of the remote
--mailru-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--mailru-pass string Password (obscured)
--mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
@@ -18792,12 +20189,15 @@ Backend only flags. These can be set in the config file also.
--mailru-token-url string Token server url
--mailru-user string User name (usually email)
--mega-debug Output more debug from Mega
+ --mega-description string Description of the remote
--mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--mega-hard-delete Delete files permanently rather than putting them into the trash
--mega-pass string Password (obscured)
--mega-use-https Use HTTPS for transfers
--mega-user string User name
+ --memory-description string Description of the remote
--netstorage-account string Set the NetStorage account name
+ --netstorage-description string Description of the remote
--netstorage-host string Domain+path of NetStorage host to connect to
--netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
--netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
@@ -18809,6 +20209,7 @@ Backend only flags. These can be set in the config file also.
--onedrive-client-id string OAuth Client Id
--onedrive-client-secret string OAuth Client Secret
--onedrive-delta If set rclone will use delta listing to implement recursive listings
+ --onedrive-description string Description of the remote
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
--onedrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
@@ -18818,6 +20219,7 @@ Backend only flags. These can be set in the config file also.
--onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
--onedrive-link-type string Set the type of the links created by the link command (default "view")
--onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-metadata-permissions Bits Control whether permissions should be read or written in metadata (default off)
--onedrive-no-versions Remove all versions on modifying operations
--onedrive-region string Choose national cloud region for OneDrive (default "global")
--onedrive-root-folder-id string ID of the root folder
@@ -18831,6 +20233,7 @@ Backend only flags. These can be set in the config file also.
--oos-config-profile string Profile name inside the oci config file (default "Default")
--oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-description string Description of the remote
--oos-disable-checksum Don't store MD5 checksum with object metadata
--oos-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--oos-endpoint string Endpoint for Object storage API
@@ -18849,12 +20252,14 @@ Backend only flags. These can be set in the config file also.
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-description string Description of the remote
--opendrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--opendrive-password string Password (obscured)
--opendrive-username string Username
--pcloud-auth-url string Auth server URL
--pcloud-client-id string OAuth Client Id
--pcloud-client-secret string OAuth Client Secret
+ --pcloud-description string Description of the remote
--pcloud-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
--pcloud-password string Your pcloud password (obscured)
@@ -18865,6 +20270,7 @@ Backend only flags. These can be set in the config file also.
--pikpak-auth-url string Auth server URL
--pikpak-client-id string OAuth Client Id
--pikpak-client-secret string OAuth Client Secret
+ --pikpak-description string Description of the remote
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
--pikpak-pass string Pikpak password (obscured)
@@ -18877,11 +20283,13 @@ Backend only flags. These can be set in the config file also.
--premiumizeme-auth-url string Auth server URL
--premiumizeme-client-id string OAuth Client Id
--premiumizeme-client-secret string OAuth Client Secret
+ --premiumizeme-description string Description of the remote
--premiumizeme-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--premiumizeme-token string OAuth Access Token as a JSON blob
--premiumizeme-token-url string Token server url
--protondrive-2fa string The 2FA code
--protondrive-app-version string The app version string (default "macos-drive@1.0.0-alpha.1+rclone")
+ --protondrive-description string Description of the remote
--protondrive-enable-caching Caches the files and folders metadata to reduce API calls (default true)
--protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot)
--protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured)
@@ -18892,12 +20300,14 @@ Backend only flags. These can be set in the config file also.
--putio-auth-url string Auth server URL
--putio-client-id string OAuth Client Id
--putio-client-secret string OAuth Client Secret
+ --putio-description string Description of the remote
--putio-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--putio-token string OAuth Access Token as a JSON blob
--putio-token-url string Token server url
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
--qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-description string Description of the remote
--qingstor-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8)
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API
--qingstor-env-auth Get QingStor credentials from runtime
@@ -18906,18 +20316,21 @@ Backend only flags. These can be set in the config file also.
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--qingstor-zone string Zone to connect to
--quatrix-api-key string API key for accessing Quatrix account
+ --quatrix-description string Description of the remote
--quatrix-effective-upload-time string Wanted upload time for one chunk (default "4s")
--quatrix-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--quatrix-hard-delete Delete files permanently rather than putting them into the trash
--quatrix-host string Host name of Quatrix account
--quatrix-maximal-summary-chunk-size SizeSuffix The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size' (default 95.367Mi)
--quatrix-minimal-chunk-size SizeSuffix The minimal size for one chunk (default 9.537Mi)
+ --quatrix-skip-project-folders Skip project folders in operations
--s3-access-key-id string AWS Access Key ID
--s3-acl string Canned ACL used when creating buckets and storing or copying objects
--s3-bucket-acl string Canned ACL used when creating buckets
--s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--s3-decompress If set this will decompress gzip encoded objects
+ --s3-description string Description of the remote
--s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-disable-http2 Disable usage of http2 for S3 backends
@@ -18952,19 +20365,22 @@ Backend only flags. These can be set in the config file also.
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
--s3-storage-class string The storage class to use when storing new objects in S3
--s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
--s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
--s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset)
+ --s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support)
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
--s3-v2-auth If true use v2 authentication
--s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-version-deleted Show deleted file markers when using versions
--s3-versions Include old versions in directory listings
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
--seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-description string Description of the remote
--seafile-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
--seafile-library string Name of the library
--seafile-library-key string Library password (for encrypted libraries only) (obscured)
@@ -18976,6 +20392,7 @@ Backend only flags. These can be set in the config file also.
--sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
--sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
--sftp-copy-is-hardlink Set to enable server side copies using hardlinks
+ --sftp-description string Description of the remote
--sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-concurrent-writes If set don't use concurrent writes
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
@@ -19010,6 +20427,7 @@ Backend only flags. These can be set in the config file also.
--sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
--sharefile-client-id string OAuth Client Id
--sharefile-client-secret string OAuth Client Secret
+ --sharefile-description string Description of the remote
--sharefile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
--sharefile-endpoint string Endpoint for API calls
--sharefile-root-folder-id string ID of the root folder
@@ -19018,10 +20436,12 @@ Backend only flags. These can be set in the config file also.
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
--sia-api-password string Sia Daemon API Password (obscured)
--sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-description string Description of the remote
--sia-encoding Encoding The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
--sia-user-agent string Siad User Agent (default "Sia-Agent")
--skip-links Don't warn about skipped symlinks
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-description string Description of the remote
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
--smb-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
--smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
@@ -19033,6 +20453,7 @@ Backend only flags. These can be set in the config file also.
--smb-user string SMB username (default "$USER")
--storj-access-grant string Access grant
--storj-api-key string API key
+ --storj-description string Description of the remote
--storj-passphrase string Encryption passphrase
--storj-provider string Choose an authentication method (default "existing")
--storj-satellite-address string Satellite address (default "us1.storj.io")
@@ -19041,6 +20462,7 @@ Backend only flags. These can be set in the config file also.
--sugarsync-authorization string Sugarsync authorization
--sugarsync-authorization-expiry string Sugarsync authorization expiry
--sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-description string Description of the remote
--sugarsync-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
--sugarsync-hard-delete Permanently delete files if true
--sugarsync-private-access-key string Sugarsync Private Access Key
@@ -19054,6 +20476,7 @@ Backend only flags. These can be set in the config file also.
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-description string Description of the remote
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
@@ -19073,17 +20496,21 @@ Backend only flags. These can be set in the config file also.
--union-action-policy string Policy to choose upstream on ACTION category (default "epall")
--union-cache-time int Cache time of usage and free space (in seconds) (default 120)
--union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-description string Description of the remote
--union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
--union-upstreams string List of space separated upstreams
--uptobox-access-token string Your access token
+ --uptobox-description string Description of the remote
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
--uptobox-private Set to make uploaded files private
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-description string Description of the remote
--webdav-encoding string The encoding for the backend
--webdav-headers CommaSepList Set HTTP headers for all transactions
--webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-owncloud-exclude-shares Exclude ownCloud shares
--webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
--webdav-pass string Password (obscured)
--webdav-url string URL of http host to connect to
@@ -19092,6 +20519,7 @@ Backend only flags. These can be set in the config file also.
--yandex-auth-url string Auth server URL
--yandex-client-id string OAuth Client Id
--yandex-client-secret string OAuth Client Secret
+ --yandex-description string Description of the remote
--yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-hard-delete Delete files permanently rather than putting them into the trash
--yandex-token string OAuth Access Token as a JSON blob
@@ -19099,6 +20527,7 @@ Backend only flags. These can be set in the config file also.
--zoho-auth-url string Auth server URL
--zoho-client-id string OAuth Client Id
--zoho-client-secret string OAuth Client Secret
+ --zoho-description string Description of the remote
--zoho-encoding Encoding The encoding for the backend (default Del,Ctl,InvalidUtf8)
--zoho-region string Zoho region to connect to
--zoho-token string OAuth Access Token as a JSON blob
@@ -19661,23 +21090,34 @@ docker volume inspect my_vol
If docker refuses to remove the volume, you should find containers
or swarm services that use it and stop them first.
+## Bisync
+`bisync` is **in beta** and is considered an **advanced command**, so use with care.
+Make sure you have read and understood the entire [manual](https://rclone.org/bisync) (especially the [Limitations](#limitations) section) before using, or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
+
## Getting started {#getting-started}
- [Install rclone](https://rclone.org/install/) and setup your remotes.
- Bisync will create its working directory
- at `~/.cache/rclone/bisync` on Linux
+ at `~/.cache/rclone/bisync` on Linux, `/Users/yourusername/Library/Caches/rclone/bisync` on Mac,
or `C:\Users\MyLogin\AppData\Local\rclone\bisync` on Windows.
Make sure that this location is writable.
- Run bisync with the `--resync` flag, specifying the paths
to the local and remote sync directory roots.
-- For successive sync runs, leave off the `--resync` flag.
+- For successive sync runs, leave off the `--resync` flag. (**Important!**)
- Consider using a [filters file](#filtering) for excluding
unnecessary files and directories from the sync.
- Consider setting up the [--check-access](#check-access) feature
for safety.
-- On Linux, consider setting up a [crontab entry](#cron). bisync can
+- On Linux or Mac, consider setting up a [crontab entry](#cron). bisync can
safely run in concurrent cron jobs thanks to lock files it maintains.
+For example, your first command might look like this:
+
+```
+rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
+```
+If all looks good, run it again without `--dry-run`. After that, remove `--resync` as well.
+
Here is a typical run log (with timestamps removed for clarity):
```
@@ -19736,36 +21176,36 @@ Positional arguments:
Type 'rclone listremotes' for list of configured remotes.
Optional Flags:
- --check-access Ensure expected `RCLONE_TEST` files are found on
- both Path1 and Path2 filesystems, else abort.
- --check-filename FILENAME Filename for `--check-access` (default: `RCLONE_TEST`)
- --check-sync CHOICE Controls comparison of final listings:
- `true | false | only` (default: true)
- If set to `only`, bisync will only compare listings
- from the last run but skip actual sync.
- --filters-file PATH Read filtering patterns from a file
- --max-delete PERCENT Safety check on maximum percentage of deleted files allowed.
- If exceeded, the bisync run will abort. (default: 50%)
- --force Bypass `--max-delete` safety check and run the sync.
- Consider using with `--verbose`
- --create-empty-src-dirs Sync creation and deletion of empty directories.
- (Not compatible with --remove-empty-dirs)
- --remove-empty-dirs Remove empty directories at the final cleanup step.
- -1, --resync Performs the resync run.
- Warning: Path1 files may overwrite Path2 versions.
- Consider using `--verbose` or `--dry-run` first.
- --ignore-listing-checksum Do not use checksums for listings
- (add --ignore-checksum to additionally skip post-copy checksum checks)
- --resilient Allow future runs to retry after certain less-serious errors,
- instead of requiring --resync. Use at your own risk!
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --workdir PATH Use custom working directory (useful for testing).
- (default: `~/.cache/rclone/bisync`)
- -n, --dry-run Go through the motions - No files are copied/deleted.
- -v, --verbose Increases logging verbosity.
- May be specified more than once for more details.
- -h, --help help for bisync
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --retries int Retry operations this many times if they fail (requires --resilient). (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
+ --max-delete PERCENT Safety check on maximum percentage of deleted files allowed. If exceeded, the bisync run will abort. (default: 50%)
+ -n, --dry-run Go through the motions - No files are copied/deleted.
+ -v, --verbose Increases logging verbosity. May be specified more than once for more details.
```
Arbitrary rclone flags may be specified on the
@@ -19799,22 +21239,16 @@ as the last step in the process.
## Command-line flags
-#### --resync
+### --resync
This will effectively make both Path1 and Path2 filesystems contain a
-matching superset of all files. Path2 files that do not exist in Path1 will
+matching superset of all files. By default, Path2 files that do not exist in Path1 will
be copied to Path1, and the process will then copy the Path1 tree to Path2.
-The `--resync` sequence is roughly equivalent to:
+The `--resync` sequence is roughly equivalent to the following (but see [`--resync-mode`](#resync-mode) for other options):
```
-rclone copy Path2 Path1 --ignore-existing
-rclone copy Path1 Path2
-```
-Or, if using `--create-empty-src-dirs`:
-```
-rclone copy Path2 Path1 --ignore-existing
-rclone copy Path1 Path2 --create-empty-src-dirs
-rclone copy Path2 Path1 --create-empty-src-dirs
+rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
+rclone copy Path1 Path2 [--create-empty-src-dirs]
```
The base directories on both Path1 and Path2 filesystems must exist
@@ -19822,13 +21256,10 @@ or bisync will fail. This is required for safety - that bisync can verify
that both paths are valid.
When using `--resync`, a newer version of a file on the Path2 filesystem
-will be overwritten by the Path1 filesystem version.
-(Note that this is [NOT entirely symmetrical](https://github.com/rclone/rclone/issues/5681#issuecomment-938761815).)
+will (by default) be overwritten by the Path1 filesystem version.
+(Note that this is [NOT entirely symmetrical](https://github.com/rclone/rclone/issues/5681#issuecomment-938761815), and more symmetrical options can be specified with the [`--resync-mode`](#resync-mode) flag.)
Carefully evaluate deltas using [--dry-run](https://rclone.org/flags/#non-backend-flags).
-[//]: # (I reverted a recent change in the above paragraph, as it was incorrect.
-https://github.com/rclone/rclone/commit/dd72aff98a46c6e20848ac7ae5f7b19d45802493 )
-
For a resync run, one of the paths may be empty (no files in the path tree).
The resync run should result in files on both paths, else a normal non-resync
run will fail.
@@ -19838,7 +21269,100 @@ For a non-resync run, either path being empty (no files in the tree) fails with
This is a safety check that an unexpected empty path does not result in
deleting **everything** in the other path.
-#### --check-access
+Note that `--resync` implies `--resync-mode path1` unless a different
+[`--resync-mode`](#resync-mode) is explicitly specified.
+It is not necessary to use both the `--resync` and `--resync-mode` flags --
+either one is sufficient without the other.
+
+**Note:** `--resync` (including `--resync-mode`) should only be used under three specific (rare) circumstances:
+1. It is your _first_ bisync run (between these two paths)
+2. You've just made changes to your bisync settings (such as editing the contents of your `--filters-file`)
+3. There was an error on the prior run, and as a result, bisync now requires `--resync` to recover
+
+The rest of the time, you should _omit_ `--resync`. The reason is because `--resync` will only _copy_ (not _sync_) each side to the other.
+Therefore, if you included `--resync` for every bisync run, it would never be possible to delete a file --
+the deleted file would always keep reappearing at the end of every run (because it's being copied from the other side where it still exists).
+Similarly, renaming a file would always result in a duplicate copy (both old and new name) on both sides.
+
+If you find that frequent interruptions from #3 are an issue, rather than
+automatically running `--resync`, the recommended alternative is to use the
+[`--resilient`](#resilient), [`--recover`](#recover), and
+[`--conflict-resolve`](#conflict-resolve) flags, (along with [Graceful
+Shutdown](#graceful-shutdown) mode, when needed) for a very robust
+"set-it-and-forget-it" bisync setup that can automatically bounce back from
+almost any interruption it might encounter. Consider adding something like the
+following:
+
+```
+--resilient --recover --max-lock 2m --conflict-resolve newer
+```
+
+### --resync-mode CHOICE {#resync-mode}
+
+In the event that a file differs on both sides during a `--resync`,
+`--resync-mode` controls which version will overwrite the other. The supported
+options are similar to [`--conflict-resolve`](#conflict-resolve). For all of
+the following options, the version that is kept is referred to as the "winner",
+and the version that is overwritten (deleted) is referred to as the "loser".
+The options are named after the "winner":
+
+- `path1` - (the default) - the version from Path1 is unconditionally
+considered the winner (regardless of `modtime` and `size`, if any). This can be
+useful if one side is more trusted or up-to-date than the other, at the time of
+the `--resync`.
+- `path2` - same as `path1`, except the path2 version is considered the winner.
+- `newer` - the newer file (by `modtime`) is considered the winner, regardless
+of which side it came from. This may result in having a mix of some winners
+from Path1, and some winners from Path2. (The implementation is analogous to
+running `rclone copy --update` in both directions.)
+- `older` - same as `newer`, except the older file is considered the winner,
+and the newer file is considered the loser.
+- `larger` - the larger file (by `size`) is considered the winner (regardless
+of `modtime`, if any). This can be a useful option for remotes without
+`modtime` support, or with the kinds of files (such as logs) that tend to grow
+but not shrink, over time.
+- `smaller` - the smaller file (by `size`) is considered the winner (regardless
+of `modtime`, if any).
+
+For all of the above options, note the following:
+- If either of the underlying remotes lacks support for the chosen method, it
+will be ignored and will fall back to the default of `path1`. (For example, if
+`--resync-mode newer` is set, but one of the paths uses a remote that doesn't
+support `modtime`.)
+- If a winner can't be determined because the chosen method's attribute is
+missing or equal, it will be ignored, and bisync will instead try to determine
+whether the files differ by looking at the other `--compare` methods in effect.
+(For example, if `--resync-mode newer` is set, but the Path1 and Path2 modtimes
+are identical, bisync will compare the sizes.) If bisync concludes that they
+differ, preference is given to whichever is the "source" at that moment. (In
+practice, this gives a slight advantage to Path2, as the 2to1 copy comes before
+the 1to2 copy.) If the files _do not_ differ, nothing is copied (as both sides
+are already correct).
+- These options apply only to files that exist on both sides (with the same
+name and relative path). Files that exist *only* on one side and not the other
+are *always* copied to the other, during `--resync` (this is one of the main
+differences between resync and non-resync runs.).
+- `--conflict-resolve`, `--conflict-loser`, and `--conflict-suffix` do not
+apply during `--resync`, and unlike these flags, nothing is renamed during
+`--resync`. When a file differs on both sides during `--resync`, one version
+always overwrites the other (much like in `rclone copy`.) (Consider using
+[`--backup-dir`](#backup-dir1-and-backup-dir2) to retain a backup of the losing
+version.)
+- Unlike for `--conflict-resolve`, `--resync-mode none` is not a valid option
+(or rather, it will be interpreted as "no resync", unless `--resync` has also
+been specified, in which case it will be ignored.)
+- Winners and losers are decided at the individual file-level only (there is
+not currently an option to pick an entire winning directory atomically,
+although the `path1` and `path2` options typically produce a similar result.)
+- To maintain backward-compatibility, the `--resync` flag implies
+`--resync-mode path1` unless a different `--resync-mode` is explicitly
+specified. Similarly, all `--resync-mode` options (except `none`) imply
+`--resync`, so it is not necessary to use both the `--resync` and
+`--resync-mode` flags simultaneously -- either one is sufficient without the
+other.
+
+
+### --check-access
Access check files are an additional safety measure against data loss.
bisync will ensure it can find matching `RCLONE_TEST` files in the same places
@@ -19867,7 +21391,7 @@ bisync assuming a bunch of deleted files if the linked-to tree should not be
accessible.
See also the [--check-filename](--check-filename) flag.
-#### --check-filename
+### --check-filename
Name of the file(s) used in access health validation.
The default `--check-filename` is `RCLONE_TEST`.
@@ -19875,7 +21399,154 @@ One or more files having this filename must exist, synchronized between your
source and destination filesets, in order for `--check-access` to succeed.
See [--check-access](#check-access) for additional details.
-#### --max-delete
+### --compare
+
+As of `v1.66`, bisync fully supports comparing based on any combination of
+size, modtime, and checksum (lifting the prior restriction on backends without
+modtime support.)
+
+By default (without the `--compare` flag), bisync inherits the same comparison
+options as `sync`
+(that is: `size` and `modtime` by default, unless modified with flags such as
+[`--checksum`](https://rclone.org/docs/#c-checksum) or [`--size-only`](/docs/#size-only).)
+
+If the `--compare` flag is set, it will override these defaults. This can be
+useful if you wish to compare based on combinations not currently supported in
+`sync`, such as comparing all three of `size` AND `modtime` AND `checksum`
+simultaneously (or just `modtime` AND `checksum`).
+
+`--compare` takes a comma-separated list, with the currently supported values
+being `size`, `modtime`, and `checksum`. For example, if you want to compare
+size and checksum, but not modtime, you would do:
+```
+--compare size,checksum
+```
+
+Or if you want to compare all three:
+```
+--compare size,modtime,checksum
+```
+
+`--compare` overrides any conflicting flags. For example, if you set the
+conflicting flags `--compare checksum --size-only`, `--size-only` will be
+ignored, and bisync will compare checksum and not size. To avoid confusion, it
+is recommended to use _either_ `--compare` or the normal `sync` flags, but not
+both.
+
+If `--compare` includes `checksum` and both remotes support checksums but have
+no hash types in common with each other, checksums will be considered _only_
+for comparisons within the same side (to determine what has changed since the
+prior sync), but not for comparisons against the opposite side. If one side
+supports checksums and the other does not, checksums will only be considered on
+the side that supports them.
+
+When comparing with `checksum` and/or `size` without `modtime`, bisync cannot
+determine whether a file is `newer` or `older` -- only whether it is `changed`
+or `unchanged`. (If it is `changed` on both sides, bisync still does the
+standard equality-check to avoid declaring a sync conflict unless it absolutely
+has to.)
+
+It is recommended to do a `--resync` when changing `--compare` settings, as
+otherwise your prior listing files may not contain the attributes you wish to
+compare (for example, they will not have stored checksums if you were not
+previously comparing checksums.)
+
+### --ignore-listing-checksum
+
+When `--checksum` or `--compare checksum` is set, bisync will retrieve (or
+generate) checksums (for backends that support them) when creating the listings
+for both paths, and store the checksums in the listing files.
+`--ignore-listing-checksum` will disable this behavior, which may speed things
+up considerably, especially on backends (such as [local](https://rclone.org/local/)) where hashes
+must be computed on the fly instead of retrieved. Please note the following:
+
+* As of `v1.66`, `--ignore-listing-checksum` is now automatically set when
+neither `--checksum` nor `--compare checksum` are in use (as the checksums
+would not be used for anything.)
+* `--ignore-listing-checksum` is NOT the same as
+[`--ignore-checksum`](https://rclone.org/docs/#ignore-checksum),
+and you may wish to use one or the other, or both. In a nutshell:
+`--ignore-listing-checksum` controls whether checksums are considered when
+scanning for diffs,
+while `--ignore-checksum` controls whether checksums are considered during the
+copy/sync operations that follow,
+if there ARE diffs.
+* Unless `--ignore-listing-checksum` is passed, bisync currently computes
+hashes for one path
+*even when there's no common hash with the other path*
+(for example, a [crypt](https://rclone.org/crypt/#modification-times-and-hashes) remote.)
+This can still be beneficial, as the hashes will still be used to detect
+changes within the same side
+(if `--checksum` or `--compare checksum` is set), even if they can't be used to
+compare against the opposite side.
+* If you wish to ignore listing checksums _only_ on remotes where they are slow
+to compute, consider using
+[`--no-slow-hash`](#no-slow-hash) (or
+[`--slow-hash-sync-only`](#slow-hash-sync-only)) instead of
+`--ignore-listing-checksum`.
+* If `--ignore-listing-checksum` is used simultaneously with `--compare
+checksum` (or `--checksum`), checksums will be ignored for bisync deltas,
+but still considered during the sync operations that follow (if deltas are
+detected based on modtime and/or size.)
+
+### --no-slow-hash
+
+On some remotes (notably `local`), checksums can dramatically slow down a
+bisync run, because hashes cannot be stored and need to be computed in
+real-time when they are requested. On other remotes (such as `drive`), they add
+practically no time at all. The `--no-slow-hash` flag will automatically skip
+checksums on remotes where they are slow, while still comparing them on others
+(assuming [`--compare`](#compare) includes `checksum`.) This can be useful when one of your
+bisync paths is slow but you still want to check checksums on the other, for a more
+robust sync.
+
+### --slow-hash-sync-only
+
+Same as [`--no-slow-hash`](#no-slow-hash), except slow hashes are still
+considered during sync calls. They are still NOT considered for determining
+deltas, nor or they included in listings. They are also skipped during
+`--resync`. The main use case for this flag is when you have a large number of
+files, but relatively few of them change from run to run -- so you don't want
+to check your entire tree every time (it would take too long), but you still
+want to consider checksums for the smaller group of files for which a `modtime`
+or `size` change was detected. Keep in mind that this speed savings comes with
+a safety trade-off: if a file's content were to change without a change to its
+`modtime` or `size`, bisync would not detect it, and it would not be synced.
+
+`--slow-hash-sync-only` is only useful if both remotes share a common hash
+type (if they don't, bisync will automatically fall back to `--no-slow-hash`.)
+Both `--no-slow-hash` and `--slow-hash-sync-only` have no effect without
+`--compare checksum` (or `--checksum`).
+
+### --download-hash
+
+If `--download-hash` is set, bisync will use best efforts to obtain an MD5
+checksum by downloading and computing on-the-fly, when checksums are not
+otherwise available (for example, a remote that doesn't support them.) Note
+that since rclone has to download the entire file, this may dramatically slow
+down your bisync runs, and is also likely to use a lot of data, so it is
+probably not practical for bisync paths with a large total file size. However,
+it can be a good option for syncing small-but-important files with maximum
+accuracy (for example, a source code repo on a `crypt` remote.) An additional
+advantage over methods like [`cryptcheck`](https://rclone.org/commands/rclone_cryptcheck/) is
+that the original file is not required for comparison (for example,
+`--download-hash` can be used to bisync two different crypt remotes with
+different passwords.)
+
+When `--download-hash` is set, bisync still looks for more efficient checksums
+first, and falls back to downloading only when none are found. It takes
+priority over conflicting flags such as `--no-slow-hash`. `--download-hash` is
+not suitable for [Google Docs](#gdocs) and other files of unknown size, as
+their checksums would change from run to run (due to small variances in the
+internals of the generated export file.) Therefore, bisync automatically skips
+`--download-hash` for files with a size less than 0.
+
+See also: [`Hasher`](https://rclone.org/hasher/) backend,
+[`cryptcheck`](https://rclone.org/commands/rclone_cryptcheck/) command, [`rclone check
+--download`](https://rclone.org/commands/rclone_check/) option,
+[`md5sum`](https://rclone.org/commands/rclone_md5sum/) command
+
+### --max-delete
As a safety check, if greater than the `--max-delete` percent of files were
deleted on either the Path1 or Path2 filesystem, then bisync will abort with
@@ -19893,7 +21564,7 @@ to bypass the check.
Also see the [all files changed](#all-files-changed) check.
-#### --filters-file {#filters-file}
+### --filters-file {#filters-file}
By using rclone filter features you can exclude file types or directory
sub-trees from the sync.
@@ -19917,7 +21588,153 @@ of the current filters file and compares it to the hash stored in the `.md5` fil
If they don't match, the run aborts with a critical error and thus forces you
to do a `--resync`, likely avoiding a disaster.
-#### --check-sync
+### --conflict-resolve CHOICE {#conflict-resolve}
+
+In bisync, a "conflict" is a file that is *new* or *changed* on *both sides*
+(relative to the prior run) AND is *not currently identical* on both sides.
+`--conflict-resolve` controls how bisync handles such a scenario. The currently
+supported options are:
+
+- `none` - (the default) - do not attempt to pick a winner, keep and rename
+both files according to [`--conflict-loser`](#conflict-loser) and
+[`--conflict-suffix`](#conflict-suffix) settings. For example, with the default
+settings, `file.txt` on Path1 is renamed `file.txt.conflict1` and `file.txt` on
+Path2 is renamed `file.txt.conflict2`. Both are copied to the opposite path
+during the run, so both sides end up with a copy of both files. (As `none` is
+the default, it is not necessary to specify `--conflict-resolve none` -- you
+can just omit the flag.)
+- `newer` - the newer file (by `modtime`) is considered the winner and is
+copied without renaming. The older file (the "loser") is handled according to
+`--conflict-loser` and `--conflict-suffix` settings (either renamed or
+deleted.) For example, if `file.txt` on Path1 is newer than `file.txt` on
+Path2, the result on both sides (with other default settings) will be `file.txt`
+(winner from Path1) and `file.txt.conflict1` (loser from Path2).
+- `older` - same as `newer`, except the older file is considered the winner,
+and the newer file is considered the loser.
+- `larger` - the larger file (by `size`) is considered the winner (regardless
+of `modtime`, if any).
+- `smaller` - the smaller file (by `size`) is considered the winner (regardless
+of `modtime`, if any).
+- `path1` - the version from Path1 is unconditionally considered the winner
+(regardless of `modtime` and `size`, if any). This can be useful if one side is
+usually more trusted or up-to-date than the other.
+- `path2` - same as `path1`, except the path2 version is considered the
+winner.
+
+For all of the above options, note the following:
+- If either of the underlying remotes lacks support for the chosen method, it
+will be ignored and fall back to `none`. (For example, if `--conflict-resolve
+newer` is set, but one of the paths uses a remote that doesn't support
+`modtime`.)
+- If a winner can't be determined because the chosen method's attribute is
+missing or equal, it will be ignored and fall back to `none`. (For example, if
+`--conflict-resolve newer` is set, but the Path1 and Path2 modtimes are
+identical, even if the sizes may differ.)
+- If the file's content is currently identical on both sides, it is not
+considered a "conflict", even if new or changed on both sides since the prior
+sync. (For example, if you made a change on one side and then synced it to the
+other side by other means.) Therefore, none of the conflict resolution flags
+apply in this scenario.
+- The conflict resolution flags do not apply during a `--resync`, as there is
+no "prior run" to speak of (but see [`--resync-mode`](#resync-mode) for similar
+options.)
+
+### --conflict-loser CHOICE {#conflict-loser}
+
+`--conflict-loser` determines what happens to the "loser" of a sync conflict
+(when [`--conflict-resolve`](#conflict-resolve) determines a winner) or to both
+files (when there is no winner.) The currently supported options are:
+
+- `num` - (the default) - auto-number the conflicts by automatically appending
+the next available number to the `--conflict-suffix`, in chronological order.
+For example, with the default settings, the first conflict for `file.txt` will
+be renamed `file.txt.conflict1`. If `file.txt.conflict1` already exists,
+`file.txt.conflict2` will be used instead (etc., up to a maximum of
+9223372036854775807 conflicts.)
+- `pathname` - rename the conflicts according to which side they came from,
+which was the default behavior prior to `v1.66`. For example, with
+`--conflict-suffix path`, `file.txt` from Path1 will be renamed
+`file.txt.path1`, and `file.txt` from Path2 will be renamed `file.txt.path2`.
+If two non-identical suffixes are provided (ex. `--conflict-suffix
+cloud,local`), the trailing digit is omitted. Importantly, note that with
+`pathname`, there is no auto-numbering beyond `2`, so if `file.txt.path2`
+somehow already exists, it will be overwritten. Using a dynamic date variable
+in your `--conflict-suffix` (see below) is one possible way to avoid this. Note
+also that conflicts-of-conflicts are possible, if the original conflict is not
+manually resolved -- for example, if for some reason you edited
+`file.txt.path1` on both sides, and those edits were different, the result
+would be `file.txt.path1.path1` and `file.txt.path1.path2` (in addition to
+`file.txt.path2`.)
+- `delete` - keep the winner only and delete the loser, instead of renaming it.
+If a winner cannot be determined (see `--conflict-resolve` for details on how
+this could happen), `delete` is ignored and the default `num` is used instead
+(i.e. both versions are kept and renamed, and neither is deleted.) `delete` is
+inherently the most destructive option, so use it only with care.
+
+For all of the above options, note that if a winner cannot be determined (see
+`--conflict-resolve` for details on how this could happen), or if
+`--conflict-resolve` is not in use, *both* files will be renamed.
+
+### --conflict-suffix STRING[,STRING] {#conflict-suffix}
+
+`--conflict-suffix` controls the suffix that is appended when bisync renames a
+[`--conflict-loser`](#conflict-loser) (default: `conflict`).
+`--conflict-suffix` will accept either one string or two comma-separated
+strings to assign different suffixes to Path1 vs. Path2. This may be helpful
+later in identifying the source of the conflict. (For example,
+`--conflict-suffix dropboxconflict,laptopconflict`)
+
+With `--conflict-loser num`, a number is always appended to the suffix. With
+`--conflict-loser pathname`, a number is appended only when one suffix is
+specified (or when two identical suffixes are specified.) i.e. with
+`--conflict-loser pathname`, all of the following would produce exactly the
+same result:
+
+```
+--conflict-suffix path
+--conflict-suffix path,path
+--conflict-suffix path1,path2
+```
+
+Suffixes may be as short as 1 character. By default, the suffix is appended
+after any other extensions (ex. `file.jpg.conflict1`), however, this can be
+changed with the [`--suffix-keep-extension`](https://rclone.org/docs/#suffix-keep-extension) flag
+(i.e. to instead result in `file.conflict1.jpg`).
+
+`--conflict-suffix` supports several *dynamic date variables* when enclosed in
+curly braces as globs. This can be helpful to track the date and/or time that
+each conflict was handled by bisync. For example:
+
+```
+--conflict-suffix {DateOnly}-conflict
+// result: myfile.txt.2006-01-02-conflict1
+```
+
+All of the formats described [here](https://pkg.go.dev/time#pkg-constants) and
+[here](https://pkg.go.dev/time#example-Time.Format) are supported, but take
+care to ensure that your chosen format does not use any characters that are
+illegal on your remotes (for example, macOS does not allow colons in
+filenames, and slashes are also best avoided as they are often interpreted as
+directory separators.) To address this particular issue, an additional
+`{MacFriendlyTime}` (or just `{mac}`) option is supported, which results in
+`2006-01-02 0304PM`.
+
+Note that `--conflict-suffix` is entirely separate from rclone's main
+[`--sufix`](https://rclone.org/docs/#suffix-suffix) flag. This is intentional, as users may wish
+to use both flags simultaneously, if also using
+[`--backup-dir`](#backup-dir1-and-backup-dir2).
+
+Finally, note that the default in bisync prior to `v1.66` was to rename
+conflicts with `..path1` and `..path2` (with two periods, and `path` instead of
+`conflict`.) Bisync now defaults to a single dot instead of a double dot, but
+additional dots can be added by including them in the specified suffix string.
+For example, for behavior equivalent to the previous default, use:
+
+```
+[--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
+```
+
+### --check-sync
Enabled by default, the check-sync function checks that all of the same
files exist in both the Path1 and Path2 history listings. This _check-sync_
@@ -19934,59 +21751,183 @@ sync run times for very large numbers of files.
The check may be run manually with `--check-sync=only`. It runs only the
integrity check and terminates without actually synching.
-See also: [Concurrent modifications](#concurrent-modifications)
+Note that currently, `--check-sync` **only checks listing snapshots and NOT the
+actual files on the remotes.** Note also that the listing snapshots will not
+know about any changes that happened during or after the latest bisync run, as
+those will be discovered on the next run. Therefore, while listings should
+always match _each other_ at the end of a bisync run, it is _expected_ that
+they will not match the underlying remotes, nor will the remotes match each
+other, if there were changes during or after the run. This is normal, and any
+differences will be detected and synced on the next run.
+For a robust integrity check of the current state of the remotes (as opposed to just their listing snapshots), consider using [`check`](commands/rclone_check/)
+(or [`cryptcheck`](https://rclone.org/commands/rclone_cryptcheck/), if at least one path is a `crypt` remote) instead of `--check-sync`,
+keeping in mind that differences are expected if files changed during or after your last bisync run.
-#### --ignore-listing-checksum
+For example, a possible sequence could look like this:
-By default, bisync will retrieve (or generate) checksums (for backends that support them)
-when creating the listings for both paths, and store the checksums in the listing files.
-`--ignore-listing-checksum` will disable this behavior, which may speed things up considerably,
-especially on backends (such as [local](https://rclone.org/local/)) where hashes must be computed on the fly instead of retrieved.
-Please note the following:
+1. Normally scheduled bisync run:
-* While checksums are (by default) generated and stored in the listing files,
-they are NOT currently used for determining diffs (deltas).
-It is anticipated that full checksum support will be added in a future version.
-* `--ignore-listing-checksum` is NOT the same as [`--ignore-checksum`](https://rclone.org/docs/#ignore-checksum),
-and you may wish to use one or the other, or both. In a nutshell:
-`--ignore-listing-checksum` controls whether checksums are considered when scanning for diffs,
-while `--ignore-checksum` controls whether checksums are considered during the copy/sync operations that follow,
-if there ARE diffs.
-* Unless `--ignore-listing-checksum` is passed, bisync currently computes hashes for one path
-*even when there's no common hash with the other path*
-(for example, a [crypt](https://rclone.org/crypt/#modification-times-and-hashes) remote.)
-* If both paths support checksums and have a common hash,
-AND `--ignore-listing-checksum` was not specified when creating the listings,
-`--check-sync=only` can be used to compare Path1 vs. Path2 checksums (as of the time the previous listings were created.)
-However, `--check-sync=only` will NOT include checksums if the previous listings
-were generated on a run using `--ignore-listing-checksum`. For a more robust integrity check of the current state,
-consider using [`check`](commands/rclone_check/)
-(or [`cryptcheck`](https://rclone.org/commands/rclone_cryptcheck/), if at least one path is a `crypt` remote.)
+```
+rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
+```
-#### --resilient
+2. Periodic independent integrity check (perhaps scheduled nightly or weekly):
+
+```
+rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
+```
+
+3. If diffs are found, you have some choices to correct them.
+If one side is more up-to-date and you want to make the other side match it, you could run:
+
+```
+rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
+```
+(or switch Path1 and Path2 to make Path2 the source-of-truth)
+
+Or, if neither side is totally up-to-date, you could run a `--resync` to bring them back into agreement
+(but remember that this could cause deleted files to re-appear.)
+
+*Note also that `rclone check` does not currently include empty directories,
+so if you want to know if any empty directories are out of sync,
+consider alternatively running the above `rclone sync` command with `--dry-run` added.
+
+See also: [Concurrent modifications](#concurrent-modifications), [`--resilient`](#resilient)
+
+### --resilient
***Caution: this is an experimental feature. Use at your own risk!***
-By default, most errors or interruptions will cause bisync to abort and
-require [`--resync`](#resync) to recover. This is a safety feature,
-to prevent bisync from running again until a user checks things out.
-However, in some cases, bisync can go too far and enforce a lockout when one isn't actually necessary,
-like for certain less-serious errors that might resolve themselves on the next run.
-When `--resilient` is specified, bisync tries its best to recover and self-correct,
-and only requires `--resync` as a last resort when a human's involvement is absolutely necessary.
-The intended use case is for running bisync as a background process (such as via scheduled [cron](#cron)).
+By default, most errors or interruptions will cause bisync to abort and
+require [`--resync`](#resync) to recover. This is a safety feature, to prevent
+bisync from running again until a user checks things out. However, in some
+cases, bisync can go too far and enforce a lockout when one isn't actually
+necessary, like for certain less-serious errors that might resolve themselves
+on the next run. When `--resilient` is specified, bisync tries its best to
+recover and self-correct, and only requires `--resync` as a last resort when a
+human's involvement is absolutely necessary. The intended use case is for
+running bisync as a background process (such as via scheduled [cron](#cron)).
-When using `--resilient` mode, bisync will still report the error and abort,
-however it will not lock out future runs -- allowing the possibility of retrying at the next normally scheduled time,
-without requiring a `--resync` first. Examples of such retryable errors include
-access test failures, missing listing files, and filter change detections.
-These safety features will still prevent the *current* run from proceeding --
-the difference is that if conditions have improved by the time of the *next* run,
-that next run will be allowed to proceed.
-Certain more serious errors will still enforce a `--resync` lockout, even in `--resilient` mode, to prevent data loss.
+When using `--resilient` mode, bisync will still report the error and abort,
+however it will not lock out future runs -- allowing the possibility of
+retrying at the next normally scheduled time, without requiring a `--resync`
+first. Examples of such retryable errors include access test failures, missing
+listing files, and filter change detections. These safety features will still
+prevent the *current* run from proceeding -- the difference is that if
+conditions have improved by the time of the *next* run, that next run will be
+allowed to proceed. Certain more serious errors will still enforce a
+`--resync` lockout, even in `--resilient` mode, to prevent data loss.
-Behavior of `--resilient` may change in a future version.
+Behavior of `--resilient` may change in a future version. (See also:
+[`--recover`](#recover), [`--max-lock`](#max-lock), [Graceful
+Shutdown](#graceful-shutdown))
+
+### --recover
+
+If `--recover` is set, in the event of a sudden interruption or other
+un-graceful shutdown, bisync will attempt to automatically recover on the next
+run, instead of requiring `--resync`. Bisync is able to recover robustly by
+keeping one "backup" listing at all times, representing the state of both paths
+after the last known successful sync. Bisync can then compare the current state
+with this snapshot to determine which changes it needs to retry. Changes that
+were synced after this snapshot (during the run that was later interrupted)
+will appear to bisync as if they are "new or changed on both sides", but in
+most cases this is not a problem, as bisync will simply do its usual "equality
+check" and learn that no action needs to be taken on these files, since they
+are already identical on both sides.
+
+In the rare event that a file is synced successfully during a run that later
+aborts, and then that same file changes AGAIN before the next run, bisync will
+think it is a sync conflict, and handle it accordingly. (From bisync's
+perspective, the file has changed on both sides since the last trusted sync,
+and the files on either side are not currently identical.) Therefore,
+`--recover` carries with it a slightly increased chance of having conflicts --
+though in practice this is pretty rare, as the conditions required to cause it
+are quite specific. This risk can be reduced by using bisync's ["Graceful
+Shutdown"](#graceful-shutdown) mode (triggered by sending `SIGINT` or
+`Ctrl+C`), when you have the choice, instead of forcing a sudden termination.
+
+`--recover` and `--resilient` are similar, but distinct -- the main difference
+is that `--resilient` is about _retrying_, while `--recover` is about
+_recovering_. Most users will probably want both. `--resilient` allows retrying
+when bisync has chosen to abort itself due to safety features such as failing
+`--check-access` or detecting a filter change. `--resilient` does not cover
+external interruptions such as a user shutting down their computer in the
+middle of a sync -- that is what `--recover` is for.
+
+### --max-lock
+
+Bisync uses [lock files](#lock-file) as a safety feature to prevent
+interference from other bisync runs while it is running. Bisync normally
+removes these lock files at the end of a run, but if bisync is abruptly
+interrupted, these files will be left behind. By default, they will lock out
+all future runs, until the user has a chance to manually check things out and
+remove the lock. As an alternative, `--max-lock` can be used to make them
+automatically expire after a certain period of time, so that future runs are
+not locked out forever, and auto-recovery is possible. `--max-lock` can be any
+duration `2m` or greater (or `0` to disable). If set, lock files older than
+this will be considered "expired", and future runs will be allowed to disregard
+them and proceed. (Note that the `--max-lock` duration must be set by the
+process that left the lock file -- not the later one interpreting it.)
+
+If set, bisync will also "renew" these lock files every `--max-lock minus one
+minute` throughout a run, for extra safety. (For example, with `--max-lock 5m`,
+bisync would renew the lock file (for another 5 minutes) every 4 minutes until
+the run has completed.) In other words, it should not be possible for a lock
+file to pass its expiration time while the process that created it is still
+running -- and you can therefore be reasonably sure that any _expired_ lock
+file you may find was left there by an interrupted run, not one that is still
+running and just taking awhile.
+
+If `--max-lock` is `0` or not set, the default is that lock files will never
+expire, and will block future runs (of these same two bisync paths)
+indefinitely.
+
+For maximum resilience from disruptions, consider setting a relatively short
+duration like `--max-lock 2m` along with [`--resilient`](#resilient) and
+[`--recover`](#recover), and a relatively frequent [cron schedule](#cron). The
+result will be a very robust "set-it-and-forget-it" bisync run that can
+automatically bounce back from almost any interruption it might encounter,
+without requiring the user to get involved and run a `--resync`. (See also:
+[Graceful Shutdown](#graceful-shutdown) mode)
+
+
+### --backup-dir1 and --backup-dir2
+
+As of `v1.66`, [`--backup-dir`](https://rclone.org/docs/#backup-dir-dir) is supported in bisync.
+Because `--backup-dir` must be a non-overlapping path on the same remote,
+Bisync has introduced new `--backup-dir1` and `--backup-dir2` flags to support
+separate backup-dirs for `Path1` and `Path2` (bisyncing between different
+remotes with `--backup-dir` would not otherwise be possible.) `--backup-dir1`
+and `--backup-dir2` can use different remotes from each other, but
+`--backup-dir1` must use the same remote as `Path1`, and `--backup-dir2` must
+use the same remote as `Path2`. Each backup directory must not overlap its
+respective bisync Path without being excluded by a filter rule.
+
+The standard `--backup-dir` will also work, if both paths use the same remote
+(but note that deleted files from both paths would be mixed together in the
+same dir). If either `--backup-dir1` and `--backup-dir2` are set, they will
+override `--backup-dir`.
+
+Example:
+```
+rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
+```
+
+In this example, if the user deletes a file in
+`/Users/someuser/some/local/path/Bisync`, bisync will propagate the delete to
+the other side by moving the corresponding file from `gdrive:Bisync` to
+`gdrive:BackupDir`. If the user deletes a file from `gdrive:Bisync`, bisync
+moves it from `/Users/someuser/some/local/path/Bisync` to
+`/Users/someuser/some/local/path/BackupDir`.
+
+In the event of a [rename due to a sync conflict](#conflict-loser), the
+rename is not considered a delete, unless a previous conflict with the same
+name already exists and would get overwritten.
+
+See also: [`--suffix`](https://rclone.org/docs/#suffix-suffix),
+[`--suffix-keep-extension`](https://rclone.org/docs/#suffix-keep-extension)
## Operation
@@ -20005,7 +21946,8 @@ On each successive run it will:
- Lock file prevents multiple simultaneous runs when taking a while.
This can be particularly useful if bisync is run by cron scheduler.
- Handle change conflicts non-destructively by creating
- `..path1` and `..path2` file versions.
+ `.conflict1`, `.conflict2`, etc. file versions, according to
+ [`--conflict-resolve`](#conflict-resolve), [`--conflict-loser`](#conflict-loser), and [`--conflict-suffix`](#conflict-suffix) settings.
- File system access health check using `RCLONE_TEST` files
(see the `--check-access` flag).
- Abort on excessive deletes - protects against a failed listing
@@ -20032,8 +21974,8 @@ Path1 deleted | File no longer exists on Path1 | File is deleted
Type | Description | Result | Implementation
--------------------------------|---------------------------------------|------------------------------------|-----------------------
Path1 new/changed AND Path2 new/changed AND Path1 == Path2 | File is new/changed on Path1 AND new/changed on Path2 AND Path1 version is currently identical to Path2 | No change | None
-Path1 new AND Path2 new | File is new on Path1 AND new on Path2 (and Path1 version is NOT identical to Path2) | Files renamed to _Path1 and _Path2 | `rclone copy` _Path2 file to Path1, `rclone copy` _Path1 file to Path2
-Path2 newer AND Path1 changed | File is newer on Path2 AND also changed (newer/older/size) on Path1 (and Path1 version is NOT identical to Path2) | Files renamed to _Path1 and _Path2 | `rclone copy` _Path2 file to Path1, `rclone copy` _Path1 file to Path2
+Path1 new AND Path2 new | File is new on Path1 AND new on Path2 (and Path1 version is NOT identical to Path2) | Conflicts handled according to [`--conflict-resolve`](#conflict-resolve) & [`--conflict-loser`](#conflict-loser) settings | default: `rclone copy` renamed `Path2.conflict2` file to Path1, `rclone copy` renamed `Path1.conflict1` file to Path2
+Path2 newer AND Path1 changed | File is newer on Path2 AND also changed (newer/older/size) on Path1 (and Path1 version is NOT identical to Path2) | Conflicts handled according to [`--conflict-resolve`](#conflict-resolve) & [`--conflict-loser`](#conflict-loser) settings | default: `rclone copy` renamed `Path2.conflict2` file to Path1, `rclone copy` renamed `Path1.conflict1` file to Path2
Path2 newer AND Path1 deleted | File is newer on Path2 AND also deleted on Path1 | Path2 version survives | `rclone copy` Path2 to Path1
Path2 deleted AND Path1 changed | File is deleted on Path2 AND changed (newer/older/size) on Path1 | Path1 version survives |`rclone copy` Path1 to Path2
Path1 deleted AND Path2 changed | File is deleted on Path1 AND changed (newer/older/size) on Path2 | Path2 version survives | `rclone copy` Path2 to Path1
@@ -20044,7 +21986,7 @@ Now, when bisync comes to a file that it wants to rename (because it is new/chan
it first checks whether the Path1 and Path2 versions are currently *identical*
(using the same underlying function as [`check`](commands/rclone_check/).)
If bisync concludes that the files are identical, it will skip them and move on.
-Otherwise, it will create renamed `..Path1` and `..Path2` duplicates, as before.
+Otherwise, it will create renamed duplicates, as before.
This behavior also [improves the experience of renaming directories](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=Renamed%20directories),
as a `--resync` is no longer required, so long as the same change has been made on both sides.
@@ -20061,19 +22003,12 @@ before you commit to the changes.
### Modification times
-Bisync relies on file timestamps to identify changed files and will
-_refuse_ to operate if backend lacks the modification time support.
-
+By default, bisync compares files by modification time and size.
If you or your application should change the content of a file
-without changing the modification time then bisync will _not_
+without changing the modification time and size, then bisync will _not_
notice the change, and thus will not copy it to the other side.
-
-Note that on some cloud storage systems it is not possible to have file
-timestamps that match _precisely_ between the local and other filesystems.
-
-Bisync's approach to this problem is by tracking the changes on each side
-_separately_ over time with a local database of files in that side then
-applying the resulting changes on the other side.
+As an alternative, consider comparing by checksum (if your remotes support it).
+See [`--compare`](#compare) for details.
### Error handling {#error-handling}
@@ -20097,7 +22032,8 @@ typically at `${HOME}/.cache/rclone/bisync/` on Linux.
Some errors are considered temporary and re-running the bisync is not blocked.
The _critical return_ blocks further bisync runs.
-See also: [`--resilient`](#resilient)
+See also: [`--resilient`](#resilient), [`--recover`](#recover),
+[`--max-lock`](#max-lock), [Graceful Shutdown](#graceful-shutdown)
### Lock file
@@ -20109,6 +22045,8 @@ Delete the lock file as part of debugging the situation.
The lock file effectively blocks follow-on (e.g., scheduled by _cron_) runs
when the prior invocation is taking a long time.
The lock file contains _PID_ of the blocking process, which may help in debug.
+Lock files can be set to automatically expire after a certain amount of time,
+using the [`--max-lock`](#max-lock) flag.
**Note**
that while concurrent bisync runs are allowed, _be very cautious_
@@ -20122,6 +22060,32 @@ lest there be replicated files, deleted files and general mayhem.
- `1` for a non-critical failing run (a rerun may be successful),
- `2` for a critically aborted run (requires a `--resync` to recover).
+### Graceful Shutdown
+
+Bisync has a "Graceful Shutdown" mode which is activated by sending `SIGINT` or
+pressing `Ctrl+C` during a run. Once triggered, bisync will use best efforts to
+exit cleanly before the timer runs out. If bisync is in the middle of
+transferring files, it will attempt to cleanly empty its queue by finishing
+what it has started but not taking more. If it cannot do so within 30 seconds,
+it will cancel the in-progress transfers at that point and then give itself a
+maximum of 60 seconds to wrap up, save its state for next time, and exit. With
+the `-vP` flags you will see constant status updates and a final confirmation
+of whether or not the graceful shutdown was successful.
+
+At any point during the "Graceful Shutdown" sequence, a second `SIGINT` or
+`Ctrl+C` will trigger an immediate, un-graceful exit, which will leave things
+in a messier state. Usually a robust recovery will still be possible if using
+[`--recover`](#recover) mode, otherwise you will need to do a `--resync`.
+
+If you plan to use Graceful Shutdown mode, it is recommended to use
+[`--resilient`](#resilient) and [`--recover`](#recover), and it is important to
+NOT use [`--inplace`](https://rclone.org/docs/#inplace), otherwise you risk leaving
+partially-written files on one side, which may be confused for real files on
+the next run. Note also that in the event of an abrupt interruption, a [lock
+file](#lock-file) will be left behind to block concurrent runs. You will need
+to delete it before you can proceed with the next run (or wait for it to
+expire on its own, if using `--max-lock`.)
+
## Limitations
### Supported backends
@@ -20134,62 +22098,39 @@ Bisync is considered _BETA_ and has been tested with the following backends:
- S3
- SFTP
- Yandex Disk
+- Crypt
It has not been fully tested with other services yet.
If it works, or sorta works, please let us know and we'll update the list.
Run the test suite to check for proper operation as described below.
-First release of `rclone bisync` requires that underlying backend supports
-the modification time feature and will refuse to run otherwise.
-This limitation will be lifted in a future `rclone bisync` release.
+The first release of `rclone bisync` required both underlying backends to support
+modification times, and refused to run otherwise.
+This limitation has been lifted as of `v1.66`, as bisync now supports comparing
+checksum and/or size instead of (or in addition to) modtime.
+See [`--compare`](#compare) for details.
### Concurrent modifications
-When using **Local, FTP or SFTP** remotes rclone does not create _temporary_
+When using **Local, FTP or SFTP** remotes with [`--inplace`](https://rclone.org/docs/#inplace), rclone does not create _temporary_
files at the destination when copying, and thus if the connection is lost
the created file may be corrupt, which will likely propagate back to the
original path on the next sync, resulting in data loss.
-This will be solved in a future release, there is no workaround at the moment.
+It is therefore recommended to _omit_ `--inplace`.
Files that **change during** a bisync run may result in data loss.
-This has been seen in a highly dynamic environment, where the filesystem
-is getting hammered by running processes during the sync.
-The currently recommended solution is to sync at quiet times or [filter out](#filtering)
-unnecessary directories and files.
-
-As an [alternative approach](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=scans%2C%20to%20avoid-,errors%20if%20files%20changed%20during%20sync,-Given%20the%20number),
-consider using `--check-sync=false` (and possibly `--resilient`) to make bisync more forgiving
-of filesystems that change during the sync.
-Be advised that this may cause bisync to miss events that occur during a bisync run,
-so it is a good idea to supplement this with a periodic independent integrity check,
-and corrective sync if diffs are found. For example, a possible sequence could look like this:
-
-1. Normally scheduled bisync run:
-
-```
-rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --check-sync=false --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
-```
-
-2. Periodic independent integrity check (perhaps scheduled nightly or weekly):
-
-```
-rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
-```
-
-3. If diffs are found, you have some choices to correct them.
-If one side is more up-to-date and you want to make the other side match it, you could run:
-
-```
-rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
-```
-(or switch Path1 and Path2 to make Path2 the source-of-truth)
-
-Or, if neither side is totally up-to-date, you could run a `--resync` to bring them back into agreement
-(but remember that this could cause deleted files to re-appear.)
-
-*Note also that `rclone check` does not currently include empty directories,
-so if you want to know if any empty directories are out of sync,
-consider alternatively running the above `rclone sync` command with `--dry-run` added.
+Prior to `rclone v1.66`, this was commonly seen in highly dynamic environments, where the filesystem
+was getting hammered by running processes during the sync.
+As of `rclone v1.66`, bisync was redesigned to use a "snapshot" model,
+greatly reducing the risks from changes during a sync.
+Changes that are not detected during the current sync will now be detected during the following sync,
+and will no longer cause the entire run to throw a critical error.
+There is additionally a mechanism to mark files as needing to be internally rechecked next time, for added safety.
+It should therefore no longer be necessary to sync only at quiet times --
+however, note that an error can still occur if a file happens to change at the exact moment it's
+being read/written by bisync (same as would happen in `rclone sync`.)
+(See also: [`--ignore-checksum`](https://rclone.org/docs/#ignore-checksum),
+[`--local-no-check-updated`](https://rclone.org/local/#local-no-check-updated))
### Empty directories
@@ -20209,11 +22150,17 @@ and use `--resync` when you need to switch.
### Renamed directories
-Renaming a folder on the Path1 side results in deleting all files on
+By default, renaming a folder on the Path1 side results in deleting all files on
the Path2 side and then copying all files again from Path1 to Path2.
Bisync sees this as all files in the old directory name as deleted and all
files in the new directory name as new.
-Currently, the most effective and efficient method of renaming a directory
+
+A recommended solution is to use [`--track-renames`](https://rclone.org/docs/#track-renames),
+which is now supported in bisync as of `rclone v1.66`.
+Note that `--track-renames` is not available during `--resync`,
+as `--resync` does not delete anything (`--track-renames` only supports `sync`, not `copy`.)
+
+Otherwise, the most effective and efficient method of renaming a directory
is to rename it to the same name on both sides. (As of `rclone v1.64`,
a `--resync` is no longer required after doing so, as bisync will automatically
detect that Path1 and Path2 are in agreement.)
@@ -20227,25 +22174,20 @@ and there is also a [known issue concerning Google Drive users with many empty d
For now, the recommended way to avoid using `--fast-list` is to add `--disable ListR`
to all bisync commands. The default behavior may change in a future version.
-### Overridden Configs
+### Case (and unicode) sensitivity {#case-sensitivity}
-When rclone detects an overridden config, it adds a suffix like `{ABCDE}` on the fly
-to the internal name of the remote. Bisync follows suit by including this suffix in its listing filenames.
-However, this suffix does not necessarily persist from run to run, especially if different flags are provided.
-So if next time the suffix assigned is `{FGHIJ}`, bisync will get confused,
-because it's looking for a listing file with `{FGHIJ}`, when the file it wants has `{ABCDE}`.
-As a result, it throws
-`Bisync critical error: cannot find prior Path1 or Path2 listings, likely due to critical error on prior run`
-and refuses to run again until the user runs a `--resync` (unless using `--resilient`).
-The best workaround at the moment is to set any backend-specific flags in the [config file](https://rclone.org/commands/rclone_config/)
-instead of specifying them with command flags. (You can still override them as needed for other rclone commands.)
+As of `v1.66`, case and unicode form differences no longer cause critical errors,
+and normalization (when comparing between filesystems) is handled according to the same flags and defaults as `rclone sync`.
+See the following options (all of which are supported by bisync) to control this behavior more granularly:
+- [`--fix-case`](https://rclone.org/docs/#fix-case)
+- [`--ignore-case-sync`](https://rclone.org/docs/#ignore-case-sync)
+- [`--no-unicode-normalization`](https://rclone.org/docs/#no-unicode-normalization)
+- [`--local-unicode-normalization`](https://rclone.org/local/#local-unicode-normalization) and
+[`--local-case-sensitive`](https://rclone.org/local/#local-case-sensitive) (caution: these are normally not what you want.)
-### Case sensitivity
-
-Synching with **case-insensitive** filesystems, such as Windows or `Box`,
-can result in file name conflicts. This will be fixed in a future release.
-The near-term workaround is to make sure that files on both sides
-don't have spelling case differences (`Smile.jpg` vs. `smile.jpg`).
+Note that in the (probably rare) event that `--fix-case` is used AND a file is new/changed on both sides
+AND the checksums match AND the filename case does not match, the Path1 filename is considered the winner,
+for the purposes of `--fix-case` (Path2 will be renamed to match it).
## Windows support {#windows}
@@ -20526,23 +22468,58 @@ files are generating complaints. If the error is
consider using the flag
[--drive-acknowledge-abuse](https://rclone.org/drive/#drive-acknowledge-abuse).
-### Google Doc files
+### Google Docs (and other files of unknown size) {#gdocs}
-Google docs exist as virtual files on Google Drive and cannot be transferred
-to other filesystems natively. While it is possible to export a Google doc to
-a normal file (with `.xlsx` extension, for example), it is not possible
-to import a normal file back into a Google document.
+As of `v1.66`, [Google Docs](https://rclone.org/drive/#import-export-of-google-documents)
+(including Google Sheets, Slides, etc.) are now supported in bisync, subject to
+the same options, defaults, and limitations as in `rclone sync`. When bisyncing
+drive with non-drive backends, the drive -> non-drive direction is controlled
+by [`--drive-export-formats`](https://rclone.org/drive/#drive-export-formats) (default
+`"docx,xlsx,pptx,svg"`) and the non-drive -> drive direction is controlled by
+[`--drive-import-formats`](https://rclone.org/drive/#drive-import-formats) (default none.)
-Bisync's handling of Google Doc files is to flag them in the run log output
-for user's attention and ignore them for any file transfers, deletes, or syncs.
-They will show up with a length of `-1` in the listings.
-This bisync run is otherwise successful:
+For example, with the default export/import formats, a Google Sheet on the
+drive side will be synced to an `.xlsx` file on the non-drive side. In the
+reverse direction, `.xlsx` files with filenames that match an existing Google
+Sheet will be synced to that Google Sheet, while `.xlsx` files that do NOT
+match an existing Google Sheet will be copied to drive as normal `.xlsx` files
+(without conversion to Sheets, although the Google Drive web browser UI may
+still give you the option to open it as one.)
-```
-2021/05/11 08:23:15 INFO : Synching Path1 "/path/to/local/tree/base/" with Path2 "GDrive:"
-2021/05/11 08:23:15 INFO : ...path2.lst-new: Ignoring incorrect line: "- -1 - - 2018-07-29T08:49:30.136000000+0000 GoogleDoc.docx"
-2021/05/11 08:23:15 INFO : Bisync successful
-```
+If `--drive-import-formats` is set (it's not, by default), then all of the
+specified formats will be converted to Google Docs, if there is no existing
+Google Doc with a matching name. Caution: such conversion can be quite lossy,
+and in most cases it's probably not what you want!
+
+To bisync Google Docs as URL shortcut links (in a manner similar to "Drive for
+Desktop"), use: `--drive-export-formats url` (or
+[alternatives](https://rclone.org/drive/#exportformats:~:text=available%20Google%20Documents.-,Extension,macOS,-Standard%20options).)
+
+Note that these link files cannot be edited on the non-drive side -- you will
+get errors if you try to sync an edited link file back to drive. They CAN be
+deleted (it will result in deleting the corresponding Google Doc.) If you
+create a `.url` file on the non-drive side that does not match an existing
+Google Doc, bisyncing it will just result in copying the literal `.url` file
+over to drive (no Google Doc will be created.) So, as a general rule of thumb,
+think of them as read-only placeholders on the non-drive side, and make all
+your changes on the drive side.
+
+Likewise, even with other export-formats, it is best to only move/rename Google
+Docs on the drive side. This is because otherwise, bisync will interpret this
+as a file deleted and another created, and accordingly, it will delete the
+Google Doc and create a new file at the new path. (Whether or not that new file
+is a Google Doc depends on `--drive-import-formats`.)
+
+Lastly, take note that all Google Docs on the drive side have a size of `-1`
+and no checksum. Therefore, they cannot be reliably synced with the
+`--checksum` or `--size-only` flags. (To be exact: they will still get
+created/deleted, and bisync's delta engine will notice changes and queue them
+for syncing, but the underlying sync function will consider them identical and
+skip them.) To work around this, use the default (modtime and size) instead of
+`--checksum` or `--size-only`.
+
+To ignore Google Docs entirely, use
+[`--drive-skip-gdocs`](https://rclone.org/drive/#drive-skip-gdocs).
## Usage examples
@@ -20920,6 +22897,30 @@ about _Unison_ and synchronization in general.
## Changelog
+### `v1.66`
+* Copies and deletes are now handled in one operation instead of two
+* `--track-renames` and `--backup-dir` are now supported
+* Partial uploads known issue on `local`/`ftp`/`sftp` has been resolved (unless using `--inplace`)
+* Final listings are now generated from sync results, to avoid needing to re-list
+* Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes
+* Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time.
+* A few basic terminal colors are now supported, controllable with [`--color`](https://rclone.org/docs/#color-when) (`AUTO`|`NEVER`|`ALWAYS`)
+* Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as `check` and `sync`,
+for performance improvements and less [risk of error](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=4.%20Listings%20should%20alternate%20between%20paths%20to%20minimize%20errors).
+* Fixed handling of unicode normalization and case insensitivity, support for [`--fix-case`](https://rclone.org/docs/#fix-case), [`--ignore-case-sync`](/docs/#ignore-case-sync), [`--no-unicode-normalization`](/docs/#no-unicode-normalization)
+* `--resync` is now much more efficient (especially for users of `--create-empty-src-dirs`)
+* Google Docs (and other files of unknown size) are now supported (with the same options as in `sync`)
+* Equality checks before a sync conflict rename now fall back to `cryptcheck` (when possible) or `--download`,
+instead of of `--size-only`, when `check` is not available.
+* Bisync no longer fails to find the correct listing file when configs are overridden with backend-specific flags.
+* Bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support.
+* Bisync now supports a "Graceful Shutdown" mode to cleanly cancel a run early without requiring `--resync`.
+* New `--recover` flag allows robust recovery in the event of interruptions, without requiring `--resync`.
+* A new `--max-lock` setting allows lock files to automatically renew and expire, for better automatic recovery when a run is interrupted.
+* Bisync now supports auto-resolving sync conflicts and customizing rename behavior with new [`--conflict-resolve`](#conflict-resolve), [`--conflict-loser`](#conflict-loser), and [`--conflict-suffix`](#conflict-suffix) flags.
+* A new [`--resync-mode`](#resync-mode) flag allows more control over which version of a file gets kept during a `--resync`.
+* Bisync now supports [`--retries`](https://rclone.org/docs/#retries-int) and [`--retries-sleep`](/docs/#retries-sleep-time) (when [`--resilient`](#resilient) is set.)
+
### `v1.64`
* Fixed an [issue](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=1.%20Dry%20runs%20are%20not%20completely%20dry)
causing dry runs to inadvertently commit filter changes
@@ -21282,6 +23283,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
+#### --fichier-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FICHIER_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -21401,341 +23413,22 @@ Properties:
- Type: string
- Required: true
-
-
-# Amazon Drive
-
-Amazon Drive, formerly known as Amazon Cloud Drive, is a cloud storage
-service run by Amazon for consumers.
-
-## Status
-
-**Important:** rclone supports Amazon Drive only if you have your own
-set of API keys. Unfortunately the [Amazon Drive developer
-program](https://developer.amazon.com/amazon-drive) is now closed to
-new entries so if you don't already have your own set of keys you will
-not be able to use rclone with Amazon Drive.
-
-For the history on why rclone no longer has a set of Amazon Drive API
-keys see [the forum](https://forum.rclone.org/t/rclone-has-been-banned-from-amazon-drive/2314).
-
-If you happen to know anyone who works at Amazon then please ask them
-to re-instate rclone into the Amazon Drive developer program - thanks!
-
-## Configuration
-
-The initial setup for Amazon Drive involves getting a token from
-Amazon which you need to do in your browser. `rclone config` walks
-you through it.
-
-The configuration process for Amazon Drive may involve using an [oauth
-proxy](https://github.com/ncw/oauthproxy). This is used to keep the
-Amazon credentials out of the source code. The proxy runs in Google's
-very secure App Engine environment and doesn't store any credentials
-which pass through it.
-
-Since rclone doesn't currently have its own Amazon Drive credentials
-so you will either need to have your own `client_id` and
-`client_secret` with Amazon Drive, or use a third-party oauth proxy
-in which case you will need to enter `client_id`, `client_secret`,
-`auth_url` and `token_url`.
-
-Note also if you are not using Amazon's `auth_url` and `token_url`,
-(ie you filled in something for those) then if setting up on a remote
-machine you can only use the [copying the config method of
-configuration](https://rclone.org/remote_setup/#configuring-by-copying-the-config-file)
-- `rclone authorize` will not work.
-
-Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
-```
-No remotes found, make a new one?
-n) New remote
-r) Rename remote
-c) Copy remote
-s) Set configuration password
-q) Quit config
-n/r/c/s/q> n
-name> remote
-Type of storage to configure.
-Choose a number from below, or type in your own value
-[snip]
-XX / Amazon Drive
- \ "amazon cloud drive"
-[snip]
-Storage> amazon cloud drive
-Amazon Application Client Id - required.
-client_id> your client ID goes here
-Amazon Application Client Secret - required.
-client_secret> your client secret goes here
-Auth server URL - leave blank to use Amazon's.
-auth_url> Optional auth URL
-Token server url - leave blank to use Amazon's.
-token_url> Optional token URL
-Remote config
-Make sure your Redirect URL is set to "http://127.0.0.1:53682/" in your custom config.
-Use web browser to automatically authenticate rclone with remote?
- * Say Y if the machine running rclone has a web browser you can use
- * Say N if running rclone on a (remote) machine without web browser access
-If not sure try Y. If Y failed, try N.
-y) Yes
-n) No
-y/n> y
-If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
-Log in and authorize rclone for access
-Waiting for code...
-Got code
---------------------
-[remote]
-client_id = your client ID goes here
-client_secret = your client secret goes here
-auth_url = Optional auth URL
-token_url = Optional token URL
-token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
---------------------
-y) Yes this is OK
-e) Edit this remote
-d) Delete this remote
-y/e/d> y
-```
-
-See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
-machine with no Internet browser available.
-
-Note that rclone runs a webserver on your local machine to collect the
-token as returned from Amazon. This only runs from the moment it
-opens your browser to the moment you get back the verification
-code. This is on `http://127.0.0.1:53682/` and this it may require
-you to unblock it temporarily if you are running a host firewall.
-
-Once configured you can then use `rclone` like this,
-
-List directories in top level of your Amazon Drive
-
- rclone lsd remote:
-
-List all the files in your Amazon Drive
-
- rclone ls remote:
-
-To copy a local directory to an Amazon Drive directory called backup
-
- rclone copy /home/source remote:backup
-
-### Modification times and hashes
-
-Amazon Drive doesn't allow modification times to be changed via
-the API so these won't be accurate or used for syncing.
-
-It does support the MD5 hash algorithm, so for a more accurate sync,
-you can use the `--checksum` flag.
-
-### Restricted filename characters
-
-| Character | Value | Replacement |
-| --------- |:-----:|:-----------:|
-| NUL | 0x00 | ␀ |
-| / | 0x2F | / |
-
-Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
-as they can't be used in JSON strings.
-
-### Deleting files
-
-Any files you delete with rclone will end up in the trash. Amazon
-don't provide an API to permanently delete files, nor to empty the
-trash, so you will have to do that with one of Amazon's apps or via
-the Amazon Drive website. As of November 17, 2016, files are
-automatically deleted by Amazon from the trash after 30 days.
-
-### Using with non `.com` Amazon accounts
-
-Let's say you usually use `amazon.co.uk`. When you authenticate with
-rclone it will take you to an `amazon.com` page to log in. Your
-`amazon.co.uk` email and password should work here just fine.
-
-
-### Standard options
-
-Here are the Standard options specific to amazon cloud drive (Amazon Drive).
-
-#### --acd-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_ACD_CLIENT_ID
-- Type: string
-- Required: false
-
-#### --acd-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_ACD_CLIENT_SECRET
-- Type: string
-- Required: false
-
### Advanced options
-Here are the Advanced options specific to amazon cloud drive (Amazon Drive).
+Here are the Advanced options specific to alias (Alias for an existing remote).
-#### --acd-token
+#### --alias-description
-OAuth Access Token as a JSON blob.
+Description of the remote
Properties:
-- Config: token
-- Env Var: RCLONE_ACD_TOKEN
+- Config: description
+- Env Var: RCLONE_ALIAS_DESCRIPTION
- Type: string
- Required: false
-#### --acd-auth-url
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_ACD_AUTH_URL
-- Type: string
-- Required: false
-
-#### --acd-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_ACD_TOKEN_URL
-- Type: string
-- Required: false
-
-#### --acd-checkpoint
-
-Checkpoint for internal polling (debug).
-
-Properties:
-
-- Config: checkpoint
-- Env Var: RCLONE_ACD_CHECKPOINT
-- Type: string
-- Required: false
-
-#### --acd-upload-wait-per-gb
-
-Additional time per GiB to wait after a failed complete upload to see if it appears.
-
-Sometimes Amazon Drive gives an error when a file has been fully
-uploaded but the file appears anyway after a little while. This
-happens sometimes for files over 1 GiB in size and nearly every time for
-files bigger than 10 GiB. This parameter controls the time rclone waits
-for the file to appear.
-
-The default value for this parameter is 3 minutes per GiB, so by
-default it will wait 3 minutes for every GiB uploaded to see if the
-file appears.
-
-You can disable this feature by setting it to 0. This may cause
-conflict errors as rclone retries the failed upload but the file will
-most likely appear correctly eventually.
-
-These values were determined empirically by observing lots of uploads
-of big files for a range of file sizes.
-
-Upload with the "-v" flag to see more info about what rclone is doing
-in this situation.
-
-Properties:
-
-- Config: upload_wait_per_gb
-- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
-- Type: Duration
-- Default: 3m0s
-
-#### --acd-templink-threshold
-
-Files >= this size will be downloaded via their tempLink.
-
-Files this size or more will be downloaded via their "tempLink". This
-is to work around a problem with Amazon Drive which blocks downloads
-of files bigger than about 10 GiB. The default for this is 9 GiB which
-shouldn't need to be changed.
-
-To download files above this threshold, rclone requests a "tempLink"
-which downloads the file through a temporary URL directly from the
-underlying S3 storage.
-
-Properties:
-
-- Config: templink_threshold
-- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
-- Type: SizeSuffix
-- Default: 9Gi
-
-#### --acd-encoding
-
-The encoding for the backend.
-
-See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_ACD_ENCODING
-- Type: Encoding
-- Default: Slash,InvalidUtf8,Dot
-
-
-
-## Limitations
-
-Note that Amazon Drive is case insensitive so you can't have a
-file called "Hello.doc" and one called "hello.doc".
-
-Amazon Drive has rate limiting so you may notice errors in the
-sync (429 errors). rclone will automatically retry the sync up to 3
-times by default (see `--retries` flag) which should hopefully work
-around this problem.
-
-Amazon Drive has an internal limit of file sizes that can be uploaded
-to the service. This limit is not officially published, but all files
-larger than this will fail.
-
-At the time of writing (Jan 2016) is in the area of 50 GiB per file.
-This means that larger files are likely to fail.
-
-Unfortunately there is no way for rclone to see that this failure is
-because of file size, so it will retry the operation, as any other
-failure. To avoid this problem, use `--max-size 50000M` option to limit
-the maximum size of uploaded files. Note that `--max-size` does not split
-files into segments, it only ignores files over this size.
-
-`rclone about` is not supported by the Amazon Drive backend. Backends without
-this capability cannot determine free space for an rclone mount or
-use policy `mfs` (most free space) as a member of an rclone union
-remote.
-
-See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
# Amazon S3 Storage Providers
@@ -21817,7 +23510,7 @@ name> remote
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -22323,6 +24016,7 @@ permissions are required to be available on the bucket being written to:
* `GetObject`
* `PutObject`
* `PutObjectACL`
+* `CreateBucket` (unless using [s3-no-check-bucket](#s3-no-check-bucket))
When using the `lsd` subcommand, the `ListAllMyBuckets` permission is required.
@@ -22364,6 +24058,7 @@ Notes on above:
that `USER_NAME` has been created.
2. The Resource entry must include both resource ARNs, as one implies
the bucket and the other implies the bucket's objects.
+3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
that will generate one or more buckets that will work with `rclone sync`.
@@ -23097,10 +24792,10 @@ Properties:
#### --s3-upload-concurrency
-Concurrency for multipart uploads.
+Concurrency for multipart uploads and copies.
This is the number of chunks of the same file that are uploaded
-concurrently.
+concurrently for multipart uploads and copies.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
@@ -23149,6 +24844,19 @@ Properties:
- Type: bool
- Default: false
+#### --s3-use-dual-stack
+
+If true use AWS S3 dual-stack endpoint (IPv6 support).
+
+See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html)
+
+Properties:
+
+- Config: use_dual_stack
+- Env Var: RCLONE_S3_USE_DUAL_STACK
+- Type: bool
+- Default: false
+
#### --s3-use-accelerate-endpoint
If true use the AWS S3 accelerated endpoint.
@@ -23453,6 +25161,25 @@ Properties:
- Type: Time
- Default: off
+#### --s3-version-deleted
+
+Show deleted file markers when using versions.
+
+This shows deleted file markers in the listing when using versions. These will appear
+as 0 size files. The only operation which can be performed on them is deletion.
+
+Deleting a delete marker will reveal the previous version.
+
+Deleted files will always show with a timestamp.
+
+
+Properties:
+
+- Config: version_deleted
+- Env Var: RCLONE_S3_VERSION_DELETED
+- Type: bool
+- Default: false
+
#### --s3-decompress
If set this will decompress gzip encoded objects.
@@ -23603,6 +25330,17 @@ Properties:
- Type: Tristate
- Default: unset
+#### --s3-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_S3_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.
@@ -24183,10 +25921,10 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
-Storage> 5
+Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
@@ -24307,18 +26045,11 @@ To configure access to IBM COS S3, follow the steps below:
3. Select "s3" storage.
```
Choose a number from below, or type in your own value
- 1 / Alias for an existing remote
- \ "alias"
- 2 / Amazon Drive
- \ "amazon cloud drive"
- 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS)
- \ "s3"
- 4 / Backblaze B2
- \ "b2"
[snip]
- 23 / HTTP
- \ "http"
-Storage> 3
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+[snip]
+Storage> s3
```
4. Select IBM COS as the S3 Storage Provider.
@@ -24478,7 +26209,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -24584,7 +26315,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -24822,15 +26553,8 @@ name> qiniu
```
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -25095,7 +26819,7 @@ Choose `s3` backend
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
-XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -25396,7 +27120,7 @@ Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
[snip]
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -25506,7 +27230,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
...
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
...
Storage> s3
@@ -25762,15 +27486,8 @@ name> leviia
```
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -25983,7 +27700,7 @@ Option Storage.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- X / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
+XX / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
\ (s3)
[snip]
Storage> s3
@@ -26228,13 +27945,8 @@ name> cos
```
Choose a number from below, or type in your own value
-1 / 1Fichier
- \ "fichier"
- 2 / Alias for an existing remote
- \ "alias"
- 3 / Amazon Drive
- \ "amazon cloud drive"
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+[snip]
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -26642,7 +28354,7 @@ Type of storage to configure.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
Storage> s3
@@ -27293,9 +29005,12 @@ Properties:
#### --b2-download-auth-duration
-Time before the authorization token will expire in s or suffix ms|s|m|h|d.
+Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
+
+This is used in combination with "rclone link" for making files
+accessible to the public and sets the duration before the download
+authorization token will expire.
-The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.
Properties:
@@ -27371,6 +29086,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --b2-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_B2_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the b2 backend.
@@ -27918,6 +29644,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
+#### --box-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_BOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -28624,6 +30361,17 @@ Properties:
- Type: Duration
- Default: 1s
+#### --cache-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CACHE_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the cache backend.
@@ -29120,6 +30868,17 @@ Properties:
- If meta format is set to "none", rename transactions will always be used.
- This method is EXPERIMENTAL, don't use on production systems.
+#### --chunker-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CHUNKER_DESCRIPTION
+- Type: string
+- Required: false
+
# Citrix ShareFile
@@ -29421,6 +31180,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --sharefile-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SHAREFILE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -30011,6 +31781,22 @@ Properties:
- Type: bool
- Default: false
+#### --crypt-strict-names
+
+If set, this will raise an error when crypt comes across a filename that can't be decrypted.
+
+(By default, rclone will just log a NOTICE and continue as normal.)
+This can happen if encrypted and unencrypted files are stored in the same
+directory (which is not recommended.) It may also indicate a more serious
+problem that should be investigated.
+
+Properties:
+
+- Config: strict_names
+- Env Var: RCLONE_CRYPT_STRICT_NAMES
+- Type: bool
+- Default: false
+
#### --crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -30048,6 +31834,17 @@ Properties:
- Type: string
- Default: ".bin"
+#### --crypt-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_CRYPT_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -30216,7 +32013,7 @@ encoding is modified in two ways:
* we strip the padding character `=`
`base32` is used rather than the more efficient `base64` so rclone can be
-used on case insensitive remotes (e.g. Windows, Amazon Drive).
+used on case insensitive remotes (e.g. Windows, Box, Dropbox, Onedrive etc).
### Key derivation
@@ -30386,6 +32183,17 @@ Properties:
- Type: SizeSuffix
- Default: 20Mi
+#### --compress-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_COMPRESS_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -30544,6 +32352,21 @@ Properties:
- Type: SpaceSepList
- Default:
+### Advanced options
+
+Here are the Advanced options specific to combine (Combine several remotes into one).
+
+#### --combine-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_COMBINE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -31001,6 +32824,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+#### --dropbox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_DROPBOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -31324,6 +33158,17 @@ Properties:
- Type: Encoding
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
+#### --filefabric-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FILEFABRIC_DESCRIPTION
+- Type: string
+- Required: false
+
# FTP
@@ -31775,6 +33620,17 @@ Properties:
- "Ctl,LeftPeriod,Slash"
- VsFTPd can't handle file names starting with dot
+#### --ftp-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FTP_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -32523,6 +34379,17 @@ Properties:
- Type: Encoding
- Default: Slash,CrLf,InvalidUtf8,Dot
+#### --gcs-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_GCS_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -33916,10 +35783,23 @@ Properties:
- "true"
- Get GCP IAM credentials from the environment (env vars or IAM).
+#### --drive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_DRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
User metadata is stored in the properties field of the drive object.
+Metadata is supported on files and directories.
+
Here are the possible system metadata items for the drive backend.
| Name | Help | Type | Example | Read Only |
@@ -34746,6 +36626,17 @@ Properties:
- Type: Duration
- Default: 10m0s
+#### --gphotos-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_GPHOTOS_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -35061,6 +36952,17 @@ Properties:
- Type: SizeSuffix
- Default: 0
+#### --hasher-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HASHER_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -35410,6 +37312,17 @@ Properties:
- Type: Encoding
- Default: Slash,Colon,Del,Ctl,InvalidUtf8,Dot
+#### --hdfs-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HDFS_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -35831,6 +37744,17 @@ Properties:
- Type: Encoding
- Default: Slash,Dot
+#### --hidrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HIDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -36066,6 +37990,17 @@ Properties:
- Type: bool
- Default: false
+#### --http-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_HTTP_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the http backend.
@@ -36304,6 +38239,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket
+#### --imagekit-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_IMAGEKIT_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -36587,6 +38533,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot
+#### --internetarchive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_INTERNETARCHIVE_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Metadata fields provided by Internet Archive.
@@ -37061,6 +39018,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot
+#### --jottacloud-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_JOTTACLOUD_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Jottacloud has limited support for metadata, currently an extended set of timestamps.
@@ -37303,6 +39271,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --koofr-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_KOOFR_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -37520,6 +39499,21 @@ Properties:
- Type: string
- Required: true
+### Advanced options
+
+Here are the Advanced options specific to linkbox (Linkbox).
+
+#### --linkbox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_LINKBOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -37935,6 +39929,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --mailru-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MAILRU_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -38224,6 +40229,17 @@ Properties:
- Type: Encoding
- Default: Slash,InvalidUtf8,Dot
+#### --mega-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MEGA_DESCRIPTION
+- Type: string
+- Required: false
+
### Process `killed`
@@ -38299,6 +40315,21 @@ The memory backend replaces the [default restricted characters
set](https://rclone.org/overview/#restricted-characters).
+### Advanced options
+
+Here are the Advanced options specific to memory (In memory object storage system.).
+
+#### --memory-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_MEMORY_DESCRIPTION
+- Type: string
+- Required: false
+
# Akamai NetStorage
@@ -38539,6 +40570,17 @@ Properties:
- "https"
- HTTPS protocol
+#### --netstorage-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_NETSTORAGE_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the netstorage backend.
@@ -39404,6 +41446,35 @@ Properties:
- Type: bool
- Default: false
+#### --azureblob-delete-snapshots
+
+Set to specify how to deal with snapshots on blob deletion.
+
+Properties:
+
+- Config: delete_snapshots
+- Env Var: RCLONE_AZUREBLOB_DELETE_SNAPSHOTS
+- Type: string
+- Required: false
+- Choices:
+ - ""
+ - By default, the delete operation fails if a blob has snapshots
+ - "include"
+ - Specify 'include' to remove the root blob and all its snapshots
+ - "only"
+ - Specify 'only' to remove only the snapshots but keep the root blob.
+
+#### --azureblob-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_AZUREBLOB_DESCRIPTION
+- Type: string
+- Required: false
+
### Custom upload headers
@@ -40130,6 +42201,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot
+#### --azurefiles-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_AZUREFILES_DESCRIPTION
+- Type: string
+- Required: false
+
### Custom upload headers
@@ -40769,7 +42851,7 @@ Properties:
If set rclone will use delta listing to implement recursive listings.
-If this flag is set the the onedrive backend will advertise `ListR`
+If this flag is set the onedrive backend will advertise `ListR`
support for recursive listings.
Setting this flag speeds up these things greatly:
@@ -40802,6 +42884,30 @@ Properties:
- Type: bool
- Default: false
+#### --onedrive-metadata-permissions
+
+Control whether permissions should be read or written in metadata.
+
+Reading permissions metadata from files can be done quickly, but it
+isn't always desirable to set the permissions from the metadata.
+
+
+Properties:
+
+- Config: metadata_permissions
+- Env Var: RCLONE_ONEDRIVE_METADATA_PERMISSIONS
+- Type: Bits
+- Default: off
+- Examples:
+ - "off"
+ - Do not read or write the value
+ - "read"
+ - Read the value only
+ - "write"
+ - Write the value only
+ - "read,write"
+ - Read and Write the value.
+
#### --onedrive-encoding
The encoding for the backend.
@@ -40815,6 +42921,191 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --onedrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_ONEDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+### Metadata
+
+OneDrive supports System Metadata (not User Metadata, as of this writing) for
+both files and directories. Much of the metadata is read-only, and there are some
+differences between OneDrive Personal and Business (see table below for
+details).
+
+Permissions are also supported, if `--onedrive-metadata-permissions` is set. The
+accepted values for `--onedrive-metadata-permissions` are `read`, `write`,
+`read,write`, and `off` (the default). `write` supports adding new permissions,
+updating the "role" of existing permissions, and removing permissions. Updating
+and removing require the Permission ID to be known, so it is recommended to use
+`read,write` instead of `write` if you wish to update/remove permissions.
+
+Permissions are read/written in JSON format using the same schema as the
+[OneDrive API](https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online),
+which differs slightly between OneDrive Personal and Business.
+
+Example for OneDrive Personal:
+```json
+[
+ {
+ "id": "1234567890ABC!123",
+ "grantedTo": {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ },
+ "invitation": {
+ "email": "ryan@contoso.com"
+ },
+ "link": {
+ "webUrl": "https://1drv.ms/t/s!1234567890ABC"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "s!1234567890ABC"
+ }
+]
+```
+
+Example for OneDrive Business:
+```json
+[
+ {
+ "id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
+ "grantedToIdentities": [
+ {
+ "user": {
+ "displayName": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "link": {
+ "type": "view",
+ "scope": "users",
+ "webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "u!LKj1lkdlals90j1nlkascl"
+ },
+ {
+ "id": "5D33DD65C6932946",
+ "grantedTo": {
+ "user": {
+ "displayName": "John Doe",
+ "id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
+ },
+ "application": {},
+ "device": {}
+ },
+ "roles": [
+ "owner"
+ ],
+ "shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
+ }
+]
+```
+
+To write permissions, pass in a "permissions" metadata key using this same
+format. The [`--metadata-mapper`](https://rclone.org/docs/#metadata-mapper) tool can
+be very helpful for this.
+
+When adding permissions, an email address can be provided in the `User.ID` or
+`DisplayName` properties of `grantedTo` or `grantedToIdentities`. Alternatively,
+an ObjectID can be provided in `User.ID`. At least one valid recipient must be
+provided in order to add a permission for a user. Creating a Public Link is also
+supported, if `Link.Scope` is set to `"anonymous"`.
+
+Example request to add a "read" permission:
+
+```json
+[
+ {
+ "id": "",
+ "grantedTo": {
+ "user": {},
+ "application": {},
+ "device": {}
+ },
+ "grantedToIdentities": [
+ {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "roles": [
+ "read"
+ ]
+ }
+]
+```
+
+Note that adding a permission can fail if a conflicting permission already
+exists for the file/folder.
+
+To update an existing permission, include both the Permission ID and the new
+`roles` to be assigned. `roles` is the only property that can be changed.
+
+To remove permissions, pass in a blob containing only the permissions you wish
+to keep (which can be empty, to remove all.)
+
+Note that both reading and writing permissions requires extra API calls, so if
+you don't need to read or write permissions it is recommended to omit
+`--onedrive-metadata-permissions`.
+
+Metadata and permissions are supported for Folders (directories) as well as
+Files. Note that setting the `mtime` or `btime` on a Folder requires one extra
+API call on OneDrive Business only.
+
+OneDrive does not currently support User Metadata. When writing metadata, only
+writeable system properties will be written -- any read-only or unrecognized keys
+passed in will be ignored.
+
+TIP: to see the metadata and permissions for any file or folder, run:
+
+```
+rclone lsjson remote:path --stat -M --onedrive-metadata-permissions read
+```
+
+Here are the possible system metadata items for the onedrive backend.
+
+| Name | Help | Type | Example | Read Only |
+|------|------|------|---------|-----------|
+| btime | Time of file birth (creation) with S accuracy (mS for OneDrive Personal). | RFC 3339 | 2006-01-02T15:04:05Z | N |
+| content-type | The MIME type of the file. | string | text/plain | **Y** |
+| created-by-display-name | Display name of the user that created the item. | string | John Doe | **Y** |
+| created-by-id | ID of the user that created the item. | string | 48d31887-5fad-4d73-a9f5-3c356e68a038 | **Y** |
+| description | A short description of the file. Max 1024 characters. Only supported for OneDrive Personal. | string | Contract for signing | N |
+| id | The unique identifier of the item within OneDrive. | string | 01BYE5RZ6QN3ZWBTUFOFD3GSPGOHDJD36K | **Y** |
+| last-modified-by-display-name | Display name of the user that last modified the item. | string | John Doe | **Y** |
+| last-modified-by-id | ID of the user that last modified the item. | string | 48d31887-5fad-4d73-a9f5-3c356e68a038 | **Y** |
+| malware-detected | Whether OneDrive has detected that the item contains malware. | boolean | true | **Y** |
+| mtime | Time of last modification with S accuracy (mS for OneDrive Personal). | RFC 3339 | 2006-01-02T15:04:05Z | N |
+| package-type | If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. | string | oneNote | **Y** |
+| permissions | Permissions in a JSON dump of OneDrive format. Enable with --onedrive-metadata-permissions. Properties: id, grantedTo, grantedToIdentities, invitation, inheritedFrom, link, roles, shareId | JSON | {} | N |
+| shared-by-id | ID of the user that shared the item (if shared). | string | 48d31887-5fad-4d73-a9f5-3c356e68a038 | **Y** |
+| shared-owner-id | ID of the owner of the shared item (if shared). | string | 48d31887-5fad-4d73-a9f5-3c356e68a038 | **Y** |
+| shared-scope | If shared, indicates the scope of how the item is shared: anonymous, organization, or users. | string | users | **Y** |
+| shared-time | Time when the item was shared, with S accuracy (mS for OneDrive Personal). | RFC 3339 | 2006-01-02T15:04:05Z | **Y** |
+| utime | Time of upload with S accuracy (mS for OneDrive Personal). | RFC 3339 | 2006-01-02T15:04:05Z | **Y** |
+
+See the [metadata](https://rclone.org/docs/#metadata) docs for more info.
+
## Limitations
@@ -41204,6 +43495,17 @@ Properties:
- Type: SizeSuffix
- Default: 10Mi
+#### --opendrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_OPENDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -41278,13 +43580,17 @@ Press Enter for the default (env_auth).
2 | you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
| https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
\ (user_principal_auth)
- / use instance principals to authorize an instance to make API calls.
- 3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
+ / use instance principals to authorize an instance to make API calls.
+ 3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
| https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
\ (instance_principal_auth)
- 4 / use resource principals to make API calls
+ / use workload identity to grant Kubernetes pods policy-driven access to Oracle Cloud
+ 4 | Infrastructure (OCI) resources using OCI Identity and Access Management (IAM).
+ | https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
+ \ (workload_identity_auth)
+ 5 / use resource principals to make API calls
\ (resource_principal_auth)
- 5 / no credentials needed, this is typically for reading public buckets
+ 6 / no credentials needed, this is typically for reading public buckets
\ (no_auth)
provider> 2
@@ -41370,6 +43676,7 @@ Rclone supports the following OCI authentication provider.
User Principal
Instance Principal
Resource Principal
+ Workload Identity
No authentication
### User Principal
@@ -41443,6 +43750,14 @@ Sample rclone configuration file for Authentication Provider Resource Principal:
region = us-ashburn-1
provider = resource_principal_auth
+### Workload Identity
+Workload Identity auth may be used when running Rclone from Kubernetes pod on a Container Engine for Kubernetes (OKE) cluster.
+For more details on configuring Workload Identity, see [Granting Workloads Access to OCI Resources](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm).
+To use workload identity, ensure Rclone is started with these environment variables set in its process.
+
+ export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
+ export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
+
### No authentication
Public buckets do not require any authentication mechanism to read objects.
@@ -41525,6 +43840,9 @@ Properties:
- use instance principals to authorize an instance to make API calls.
- each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
- https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
+ - "workload_identity_auth"
+ - use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM).
+ - https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
- "resource_principal_auth"
- use resource principals to make API calls
- "no_auth"
@@ -41910,6 +44228,17 @@ Properties:
- "AES256"
- AES256
+#### --oos-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_OOS_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the oracleobjectstorage backend.
@@ -41990,6 +44319,47 @@ Options:
- "max-age": Max age of upload to delete
+### restore
+
+Restore objects from Archive to Standard storage
+
+ rclone backend restore remote: [options] [+]
+
+This command can be used to restore one or more objects from Archive to Standard storage.
+
+ Usage Examples:
+
+ rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
+ rclone backend restore oos:bucket -o hours=HOURS
+
+This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
+
+ rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
+
+All the objects shown will be marked for restore, then
+
+ rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
+
+ It returns a list of status dictionaries with Object Name and Status
+ keys. The Status will be "RESTORED"" if it was successful or an error message
+ if not.
+
+ [
+ {
+ "Object": "test.txt"
+ "Status": "RESTORED",
+ },
+ {
+ "Object": "test/file4.txt"
+ "Status": "RESTORED",
+ }
+ ]
+
+
+Options:
+
+- "hours": The number of hours for which this object will be restored. Default is 24 hrs.
+
## Tutorials
@@ -42301,6 +44671,17 @@ Properties:
- Type: Encoding
- Default: Slash,Ctl,InvalidUtf8
+#### --qingstor-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_QINGSTOR_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -42535,7 +44916,7 @@ Properties:
#### --quatrix-hard-delete
-Delete files permanently rather than putting them into the trash.
+Delete files permanently rather than putting them into the trash
Properties:
@@ -42544,6 +44925,28 @@ Properties:
- Type: bool
- Default: false
+#### --quatrix-skip-project-folders
+
+Skip project folders in operations
+
+Properties:
+
+- Config: skip_project_folders
+- Env Var: RCLONE_QUATRIX_SKIP_PROJECT_FOLDERS
+- Type: bool
+- Default: false
+
+#### --quatrix-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_QUATRIX_DESCRIPTION
+- Type: string
+- Required: false
+
## Storage usage
@@ -42745,6 +45148,17 @@ Properties:
- Type: Encoding
- Default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot
+#### --sia-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SIA_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -43341,6 +45755,17 @@ Properties:
- Type: Encoding
- Default: Slash,InvalidUtf8
+#### --swift-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SWIFT_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -43668,6 +46093,17 @@ Properties:
- Type: string
- Required: false
+#### --pcloud-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PCLOUD_DESCRIPTION
+- Type: string
+- Required: false
+
# PikPak
@@ -43906,6 +46342,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --pikpak-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PIKPAK_DESCRIPTION
+- Type: string
+- Required: false
+
## Backend commands
Here are the commands specific to the pikpak backend.
@@ -44176,6 +46623,17 @@ Properties:
- Type: Encoding
- Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --premiumizeme-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PREMIUMIZEME_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -44515,6 +46973,17 @@ Properties:
- Type: bool
- Default: true
+#### --protondrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -44737,6 +47206,17 @@ Properties:
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+#### --putio-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PUTIO_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -45074,6 +47554,17 @@ Properties:
- Type: bool
- Default: true
+#### --protondrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -45486,6 +47977,17 @@ Properties:
- Type: Encoding
- Default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8
+#### --seafile-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SEAFILE_DESCRIPTION
+- Type: string
+- Required: false
+
# SFTP
@@ -46526,6 +49028,17 @@ Properties:
- Type: bool
- Default: false
+#### --sftp-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SFTP_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -46807,6 +49320,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
+#### --smb-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SMB_DESCRIPTION
+- Type: string
+- Required: false
+
# Storj
@@ -47098,6 +49622,21 @@ Properties:
- Type: string
- Required: false
+### Advanced options
+
+Here are the Advanced options specific to storj (Storj Decentralized Cloud Storage).
+
+#### --storj-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_STORJ_DESCRIPTION
+- Type: string
+- Required: false
+
## Usage
@@ -47497,6 +50036,17 @@ Properties:
- Type: Encoding
- Default: Slash,Ctl,InvalidUtf8,Dot
+#### --sugarsync-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SUGARSYNC_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -47655,6 +50205,17 @@ Properties:
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
+#### --uptobox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UPTOBOX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -47947,6 +50508,17 @@ Properties:
- Type: SizeSuffix
- Default: 1Gi
+#### --union-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UNION_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -48223,13 +50795,35 @@ Properties:
- Type: SizeSuffix
- Default: 10Mi
+#### --webdav-owncloud-exclude-shares
+
+Exclude ownCloud shares
+
+Properties:
+
+- Config: owncloud_exclude_shares
+- Env Var: RCLONE_WEBDAV_OWNCLOUD_EXCLUDE_SHARES
+- Type: bool
+- Default: false
+
+#### --webdav-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_WEBDAV_DESCRIPTION
+- Type: string
+- Required: false
+
## Provider notes
See below for notes on specific providers.
-## Fastmail Files
+### Fastmail Files
Use `https://webdav.fastmail.com/` or a subdirectory as the URL,
and your Fastmail email `username@domain.tld` as the username.
@@ -48624,6 +51218,17 @@ Properties:
- Type: Encoding
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
+#### --yandex-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_YANDEX_DESCRIPTION
+- Type: string
+- Required: false
+
## Limitations
@@ -48878,6 +51483,17 @@ Properties:
- Type: Encoding
- Default: Del,Ctl,InvalidUtf8
+#### --zoho-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_ZOHO_DESCRIPTION
+- Type: string
+- Required: false
+
## Setting up your own client_id
@@ -49457,6 +52073,17 @@ Properties:
- Type: Encoding
- Default: Slash,Dot
+#### --local-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_LOCAL_DESCRIPTION
+- Type: string
+- Required: false
+
### Metadata
Depending on which OS is in use the local backend may return only some
@@ -49468,6 +52095,8 @@ netbsd, macOS and Solaris. It is **not** supported on Windows yet
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
+Metadata is supported on files and directories.
+
Here are the possible system metadata items for the local backend.
| Name | Help | Type | Example | Read Only |
@@ -49516,6 +52145,238 @@ Options:
# Changelog
+## v1.66.0 - 2024-03-10
+
+[See commits](https://github.com/rclone/rclone/compare/v1.65.0...v1.66.0)
+
+* Major features
+ * Rclone will now sync directory modification times if the backend supports it.
+ * This can be disabled with [--no-update-dir-modtime](https://rclone.org/docs/#no-update-dir-modtime)
+ * See [the overview](https://rclone.org/overview/#features) and look for the `D` flags in the `ModTime` column to see which backends support it.
+ * Rclone will now sync directory metadata if the backend supports it when `-M`/`--metadata` is in use.
+ * See [the overview](https://rclone.org/overview/#features) and look for the `D` flags in the `Metadata` column to see which backends support it.
+ * Bisync has received many updates see below for more details or [bisync's changelog](https://rclone.org/bisync/#changelog)
+* Removed backends
+ * amazonclouddrive: Remove Amazon Drive backend code and docs (Nick Craig-Wood)
+* New Features
+ * backend
+ * Add description field for all backends (Paul Stern)
+ * build
+ * Update to go1.22 and make go1.20 the minimum required version (Nick Craig-Wood)
+ * Fix `CVE-2024-24786` by upgrading `google.golang.org/protobuf` (Nick Craig-Wood)
+ * check: Respect `--no-unicode-normalization` and `--ignore-case-sync` for `--checkfile` (nielash)
+ * cmd: Much improved shell auto completion which reduces the size of the completion file and works faster (Nick Craig-Wood)
+ * doc updates (albertony, ben-ba, Eli, emyarod, huajin tong, Jack Provance, kapitainsky, keongalvin, Nick Craig-Wood, nielash, rarspace01, rzitzer, Tera, Vincent Murphy)
+ * fs: Add more detailed logging for file includes/excludes (Kyle Reynolds)
+ * lsf
+ * Add `--time-format` flag (nielash)
+ * Make metadata appear for directories (Nick Craig-Wood)
+ * lsjson: Make metadata appear for directories (Nick Craig-Wood)
+ * rc
+ * Add `srcFs` and `dstFs` to `core/stats` and `core/transferred` stats (Nick Craig-Wood)
+ * Add `operations/hashsum` to the rc as `rclone hashsum` equivalent (Nick Craig-Wood)
+ * Add `config/paths` to the rc as `rclone config paths` equivalent (Nick Craig-Wood)
+ * sync
+ * Optionally report list of synced paths to file (nielash)
+ * Implement directory sync for mod times and metadata (Nick Craig-Wood)
+ * Don't set directory modtimes if already set (nielash)
+ * Don't sync directory modtimes from backends which don't have directories (Nick Craig-Wood)
+* Bug Fixes
+ * backend
+ * Make backends which use oauth implement the `Shutdown` and shutdown the oauth properly (rkonfj)
+ * bisync
+ * Handle unicode and case normalization consistently (nielash)
+ * Partial uploads known issue on `local`/`ftp`/`sftp` has been resolved (unless using `--inplace`) (nielash)
+ * Fixed handling of unicode normalization and case insensitivity, support for [`--fix-case`](https://rclone.org/docs/#fix-case), [`--ignore-case-sync`](/docs/#ignore-case-sync), [`--no-unicode-normalization`](/docs/#no-unicode-normalization) (nielash)
+ * Bisync no longer fails to find the correct listing file when configs are overridden with backend-specific flags. (nielash)
+ * nfsmount
+ * Fix exit after external unmount (nielash)
+ * Fix `--volname` being ignored (nielash)
+ * operations
+ * Fix renaming a file on macOS (nielash)
+ * Fix case-insensitive moves in operations.Move (nielash)
+ * Fix TestCaseInsensitiveMoveFileDryRun on chunker integration tests (nielash)
+ * Fix TestMkdirModTime test (Nick Craig-Wood)
+ * Fix TestSetDirModTime for backends with SetDirModTime but not Metadata (Nick Craig-Wood)
+ * Fix typo in log messages (nielash)
+ * serve nfs: Fix writing files via Finder on macOS (nielash)
+ * serve restic: Fix error handling (Michael Eischer)
+ * serve webdav: Fix `--baseurl` without leading / (Nick Craig-Wood)
+ * stats: Fix race between ResetCounters and stopAverageLoop called from time.AfterFunc (Nick Craig-Wood)
+ * sync
+ * `--fix-case` flag to rename case insensitive dest (nielash)
+ * Use operations.DirMove instead of sync.MoveDir for `--fix-case` (nielash)
+ * systemd: Fix detection and switch to the coreos package everywhere rather than having 2 separate libraries (Anagh Kumar Baranwal)
+* Mount
+ * Fix macOS not noticing errors with `--daemon` (Nick Craig-Wood)
+ * Notice daemon dying much quicker (Nick Craig-Wood)
+* VFS
+ * Fix unicode normalization on macOS (nielash)
+* Bisync
+ * Copies and deletes are now handled in one operation instead of two (nielash)
+ * `--track-renames` and `--backup-dir` are now supported (nielash)
+ * Final listings are now generated from sync results, to avoid needing to re-list (nielash)
+ * Bisync is now much more resilient to changes that happen during a bisync run, and far less prone to critical errors / undetected changes (nielash)
+ * Bisync is now capable of rolling a file listing back in cases of uncertainty, essentially marking the file as needing to be rechecked next time. (nielash)
+ * A few basic terminal colors are now supported, controllable with [`--color`](https://rclone.org/docs/#color-when) (`AUTO`|`NEVER`|`ALWAYS`) (nielash)
+ * Initial listing snapshots of Path1 and Path2 are now generated concurrently, using the same "march" infrastructure as `check` and `sync`, for performance improvements and less risk of error. (nielash)
+ * `--resync` is now much more efficient (especially for users of `--create-empty-src-dirs`) (nielash)
+ * Google Docs (and other files of unknown size) are now supported (with the same options as in `sync`) (nielash)
+ * Equality checks before a sync conflict rename now fall back to `cryptcheck` (when possible) or `--download`, (nielash)
+instead of of `--size-only`, when `check` is not available.
+ * Bisync now fully supports comparing based on any combination of size, modtime, and checksum, lifting the prior restriction on backends without modtime support. (nielash)
+ * Bisync now supports a "Graceful Shutdown" mode to cleanly cancel a run early without requiring `--resync`. (nielash)
+ * New `--recover` flag allows robust recovery in the event of interruptions, without requiring `--resync`. (nielash)
+ * A new `--max-lock` setting allows lock files to automatically renew and expire, for better automatic recovery when a run is interrupted. (nielash)
+ * Bisync now supports auto-resolving sync conflicts and customizing rename behavior with new [`--conflict-resolve`](#conflict-resolve), [`--conflict-loser`](#conflict-loser), and [`--conflict-suffix`](#conflict-suffix) flags. (nielash)
+ * A new [`--resync-mode`](#resync-mode) flag allows more control over which version of a file gets kept during a `--resync`. (nielash)
+ * Bisync now supports [`--retries`](https://rclone.org/docs/#retries-int) and [`--retries-sleep`](/docs/#retries-sleep-time) (when [`--resilient`](#resilient) is set.) (nielash)
+ * Clarify file operation directions in dry-run logs (Kyle Reynolds)
+* Local
+ * Fix cleanRootPath on Windows after go1.21.4 stdlib update (nielash)
+ * Implement setting modification time on directories (nielash)
+ * Implement modtime and metadata for directories (Nick Craig-Wood)
+ * Fix setting of btime on directories on Windows (Nick Craig-Wood)
+ * Delete backend implementation of Purge to speed up and make stats (Nick Craig-Wood)
+ * Support metadata setting and mapping on server side Move (Nick Craig-Wood)
+* Cache
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+* Crypt
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+ * Improve handling of undecryptable file names (nielash)
+ * Add missing error check spotted by linter (Nick Craig-Wood)
+* Azure Blob
+ * Implement `--azureblob-delete-snapshots` (Nick Craig-Wood)
+* B2
+ * Clarify exactly what `--b2-download-auth-duration` does in the docs (Nick Craig-Wood)
+* Chunker
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+* Combine
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+ * Fix directory metadata error on upstream root (nielash)
+ * Fix directory move across upstreams (nielash)
+* Compress
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+* Drive
+ * Implement setting modification time on directories (nielash)
+ * Implement modtime and metadata setting for directories (Nick Craig-Wood)
+ * Support metadata setting and mapping on server side Move,Copy (Nick Craig-Wood)
+* FTP
+ * Fix mkdir with rsftp which is returning the wrong code (Nick Craig-Wood)
+* Hasher
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+ * Fix error from trying to stop an already-stopped db (nielash)
+ * Look for cached hash if passed hash unexpectedly blank (nielash)
+* Imagekit
+ * Updated docs and web content (Harshit Budhraja)
+ * Updated overview - supported operations (Harshit Budhraja)
+* Mega
+ * Fix panic with go1.22 (Nick Craig-Wood)
+* Netstorage
+ * Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+* Onedrive
+ * Add metadata support (nielash)
+* Opendrive
+ * Fix moving file/folder within the same parent dir (nielash)
+* Oracle Object Storage
+ * Support `backend restore` command (Nikhil Ahuja)
+ * Support workload identity authentication for OKE (Anders Swanson)
+* Protondrive
+ * Fix encoding of Root method (Nick Craig-Wood)
+* Quatrix
+ * Fix `Content-Range` header (Volodymyr)
+ * Add option to skip project folders (Oksana Zhykina)
+ * Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+* S3
+ * Add `--s3-version-deleted` to show delete markers in listings when using versions. (Nick Craig-Wood)
+ * Add IPv6 support with option `--s3-use-dual-stack` (Anthony Metzidis)
+ * Copy parts in parallel when doing chunked server side copy (Nick Craig-Wood)
+ * GCS provider: fix server side copy of files bigger than 5G (Nick Craig-Wood)
+ * Support metadata setting and mapping on server side Copy (Nick Craig-Wood)
+* Seafile
+ * Fix download/upload error when `FILE_SERVER_ROOT` is relative (DanielEgbers)
+ * Fix Root to return correct directory when pointing to a file (Nick Craig-Wood)
+* SFTP
+ * Implement setting modification time on directories (nielash)
+ * Set directory modtimes update on write flag (Nick Craig-Wood)
+ * Shorten wait delay for external ssh binaries now that we are using go1.20 (Nick Craig-Wood)
+* Swift
+ * Avoid unnecessary container versioning check (Joe Cai)
+* Union
+ * Implement setting modification time on directories (if supported by wrapped remote) (nielash)
+ * Implement setting metadata on directories (Nick Craig-Wood)
+* WebDAV
+ * Reduce priority of chunks upload log (Gabriel Ramos)
+ * owncloud: Add config `owncloud_exclude_shares` which allows to exclude shared files and folders when listing remote resources (Thomas Müller)
+
+## v1.65.2 - 2024-01-24
+
+[See commits](https://github.com/rclone/rclone/compare/v1.65.1...v1.65.2)
+
+* Bug Fixes
+ * build: bump github.com/cloudflare/circl from 1.3.6 to 1.3.7 (dependabot)
+ * docs updates (Nick Craig-Wood, kapitainsky, nielash, Tera, Harshit Budhraja)
+* VFS
+ * Fix stale data when using `--vfs-cache-mode` full (Nick Craig-Wood)
+* Azure Blob
+ * **IMPORTANT** Fix data corruption bug - see [#7590](https://github.com/rclone/rclone/issues/7590) (Nick Craig-Wood)
+
+## v1.65.1 - 2024-01-08
+
+[See commits](https://github.com/rclone/rclone/compare/v1.65.0...v1.65.1)
+
+* Bug Fixes
+ * build
+ * Bump golang.org/x/crypto to fix ssh terrapin CVE-2023-48795 (dependabot)
+ * Update to go1.21.5 to fix Windows path problems (Nick Craig-Wood)
+ * Fix docker build on arm/v6 (Nick Craig-Wood)
+ * install.sh: fix harmless error message on install (Nick Craig-Wood)
+ * accounting: fix stats to show server side transfers (Nick Craig-Wood)
+ * doc fixes (albertony, ben-ba, Eli Orzitzer, emyarod, keongalvin, rarspace01)
+ * nfsmount: Compile for all unix oses, add `--sudo` and fix error/option handling (Nick Craig-Wood)
+ * operations: Fix files moved by rclone move not being counted as transfers (Nick Craig-Wood)
+ * oauthutil: Avoid panic when `*token` and `*ts.token` are the same (rkonfj)
+ * serve s3: Fix listing oddities (Nick Craig-Wood)
+* VFS
+ * Note that `--vfs-refresh` runs in the background (Nick Craig-Wood)
+* Azurefiles
+ * Fix storage base url (Oksana)
+* Crypt
+ * Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+* Chunker
+ * Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+* Compress
+ * Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+* Dropbox
+ * Fix used space on dropbox team accounts (Nick Craig-Wood)
+* FTP
+ * Fix multi-thread copy (WeidiDeng)
+* Googlephotos
+ * Fix nil pointer exception when batch failed (Nick Craig-Wood)
+* Hasher
+ * Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+ * Fix invalid memory address error when MaxAge == 0 (nielash)
+* Onedrive
+ * Fix error listing: unknown object type `` (Nick Craig-Wood)
+ * Fix "unauthenticated: Unauthenticated" errors when uploading (Nick Craig-Wood)
+* Oracleobjectstorage
+ * Fix object storage endpoint for custom endpoints (Manoj Ghosh)
+ * Multipart copy create bucket if it doesn't exist. (Manoj Ghosh)
+* Protondrive
+ * Fix CVE-2023-45286 / GHSA-xwh9-gc39-5298 (Nick Craig-Wood)
+* S3
+ * Fix crash if no UploadId in multipart upload (Nick Craig-Wood)
+* Smb
+ * Fix shares not listed by updating go-smb2 (halms)
+* Union
+ * Fix rclone move a file over itself deleting the file (Nick Craig-Wood)
+
## v1.65.0 - 2023-11-26
[See commits](https://github.com/rclone/rclone/compare/v1.64.0...v1.65.0)
@@ -54432,10 +57293,13 @@ Point release to fix hubic and azureblob backends.
## Limitations
-### Directory timestamps aren't preserved
+### Directory timestamps aren't preserved on some backends
-Rclone doesn't currently preserve the timestamps of directories. This
-is because rclone only really considers objects when syncing.
+As of `v1.66`, rclone supports syncing directory modtimes, if the backend
+supports it. Some backends do not support it -- see
+[overview](https://rclone.org/overview/) for a complete list. Additionally, note
+that empty directories are not synced by default (this can be enabled with
+`--create-empty-src-dirs`.)
### Rclone struggles with millions of files in a directory/bucket
@@ -54799,7 +57663,7 @@ put them back in again.` >}}
* Scott McGillivray
* Bjørn Erik Pedersen
* Lukas Loesche
- * emyarod
+ * emyarod
* T.C. Ferguson
* Brandur
* Dario Giovannetti
@@ -55547,6 +58411,27 @@ put them back in again.` >}}
* Alen Šiljak
* 你知道未来吗
* Abhinav Dhiman <8640877+ahnv@users.noreply.github.com>
+ * halms <7513146+halms@users.noreply.github.com>
+ * ben-ba
+ * Eli Orzitzer
+ * Anthony Metzidis
+ * emyarod
+ * keongalvin
+ * rarspace01
+ * Paul Stern
+ * Nikhil Ahuja
+ * Harshit Budhraja <52413945+harshit-budhraja@users.noreply.github.com>
+ * Tera <24725862+teraa@users.noreply.github.com>
+ * Kyle Reynolds
+ * Michael Eischer
+ * Thomas Müller <1005065+DeepDiver1975@users.noreply.github.com>
+ * DanielEgbers <27849724+DanielEgbers@users.noreply.github.com>
+ * Jack Provance <49460795+njprov@users.noreply.github.com>
+ * Gabriel Ramos <109390599+gabrielramos02@users.noreply.github.com>
+ * Dan McArdle
+ * Joe Cai
+ * Anders Swanson
+ * huajin tong <137764712+thirdkeyword@users.noreply.github.com>
# Contact the rclone project
diff --git a/MANUAL.txt b/MANUAL.txt
index 11a38590b..65aafc533 100644
--- a/MANUAL.txt
+++ b/MANUAL.txt
@@ -1,6 +1,6 @@
rclone(1) User Manual
Nick Craig-Wood
-Nov 26, 2023
+Mar 10, 2024
Rclone syncs your files to cloud storage
@@ -79,6 +79,7 @@ Features
- Can use multi-threaded downloads to local disk
- Copy new or changed files to cloud storage
- Sync (one way) to make a directory identical
+- Bisync (two way) to keep two directories in sync bidirectionally
- Move files to cloud storage deleting the local after verification
- Check hashes and for missing/extra files
- Mount your cloud storage as a network disk
@@ -93,7 +94,6 @@ S3, that work out of the box.)
- 1Fichier
- Akamai Netstorage
- Alibaba Cloud (Aliyun) Object Storage System (OSS)
-- Amazon Drive
- Amazon S3
- Backblaze B2
- Box
@@ -116,6 +116,7 @@ S3, that work out of the box.)
- Hetzner Storage Box
- HiDrive
- HTTP
+- ImageKit
- Internet Archive
- Jottacloud
- IBM COS S3
@@ -820,7 +821,6 @@ See the following for detailed instructions for
- 1Fichier
- Akamai Netstorage
- Alias
-- Amazon Drive
- Amazon S3
- Backblaze B2
- Box
@@ -998,6 +998,14 @@ recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
+Rclone will sync the modification times of files and directories if the
+backend supports it. If metadata syncing is required then use the
+--metadata flag.
+
+Note that the modification time and metadata for the root directory will
+not be synced. See https://github.com/rclone/rclone/issues/7652 for more
+info.
+
Note: Use the -P/--progress flag to view real-time transfer statistics.
Note: Use the --dry-run or the --interactive/-i flag to test without
@@ -1023,7 +1031,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1037,6 +1045,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -1129,18 +1138,85 @@ the destination from the sync with a filter rule or by putting an
exclude-if-present file inside the destination directory and sync to a
destination that is inside the source directory.
+Rclone will sync the modification times of files and directories if the
+backend supports it. If metadata syncing is required then use the
+--metadata flag.
+
+Note that the modification time and metadata for the root directory will
+not be synced. See https://github.com/rclone/rclone/issues/7652 for more
+info.
+
Note: Use the -P/--progress flag to view real-time transfer statistics
Note: Use the rclone dedupe command to deal with "Duplicate
object/directory found in source/destination - ignoring" errors. See
this forum post for more info.
+Logger Flags
+
+The --differ, --missing-on-dst, --missing-on-src, --match and --error
+flags write paths, one per line, to the file name (or stdout if it is -)
+supplied. What they write is described in the help below. For example
+--differ will write all paths which are present on both the source and
+destination but different.
+
+The --combined flag will write a file (or stdout) which contains all
+file paths with a symbol and then a space and then the path to tell you
+what happened to it. These are reminiscent of diff files.
+
+- = path means path was found in source and destination and was
+ identical
+- `- path` means path was missing on the source, so only in the
+ destination
+- `+ path` means path was missing on the destination, so only in the
+ source
+- `* path` means path was present in source and destination but
+ different.
+- ! path means there was an error reading or hashing the source or
+ dest.
+
+The --dest-after flag writes a list file using the same format flags as
+lsf (including customizable options for hash, modtime, etc.)
+Conceptually it is similar to rsync's --itemize-changes, but not
+identical -- it should output an accurate list of what will be on the
+destination after the sync.
+
+Note that these logger flags have a few limitations, and certain
+scenarios are not currently supported:
+
+- --max-duration / CutoffModeHard
+- --compare-dest / --copy-dest
+- server-side moves of an entire dir at once
+- High-level retries, because there would be duplicates (use
+ --retries 1 to disable)
+- Possibly some unusual error scenarios
+
+Note also that each file is logged during the sync, as opposed to after,
+so it is most useful as a predictor of what SHOULD happen to each file
+(which may or may not match what actually DID.)
+
rclone sync source:path dest:path [flags]
Options
+ --absolute Put a leading / in front of path names
+ --combined string Make a combined report of changes to this file
--create-empty-src-dirs Create empty source dirs on destination after sync
+ --csv Output in CSV format
+ --dest-after string Report all files that exist on the dest post-sync
+ --differ string Report all non-matching files to this file
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --error string Report all files with errors (hashing or reading) to this file
+ --files-only Only list files (default true)
+ -F, --format string Output format - see lsf help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
-h, --help help for sync
+ --match string Report all matching files to this file
+ --missing-on-dst string Report all files missing from the destination to this file
+ --missing-on-src string Report all files missing from the source to this file
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
Copy Options
@@ -1155,7 +1231,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1169,6 +1245,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -1186,6 +1263,7 @@ Flags just used for rclone sync.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -1269,6 +1347,14 @@ See the --no-traverse option for controlling whether rclone lists the
destination directory or not. Supplying this option when moving a small
number of files into a large destination can speed transfers up greatly.
+Rclone will sync the modification times of files and directories if the
+backend supports it. If metadata syncing is required then use the
+--metadata flag.
+
+Note that the modification time and metadata for the root directory will
+not be synced. See https://github.com/rclone/rclone/issues/7652 for more
+info.
+
Important: Since this can cause data loss, test first with the --dry-run
or the --interactive/-i flag.
@@ -1295,7 +1381,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -1309,6 +1395,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -2537,26 +2624,42 @@ each successive run it will: - list files on Path1 and Path2, and check
for changes on each side. Changes include New, Newer, Older, and Deleted
files. - Propagate changes on Path1 to Path2, and vice-versa.
+Bisync is in beta and is considered an advanced command, so use with
+care. Make sure you have read and understood the entire manual
+(especially the Limitations section) before using, or data loss can
+result. Questions can be asked in the Rclone Forum.
+
See full bisync description for details.
rclone bisync remote1:path1 remote2:path2 [flags]
Options
- --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
- --check-filename string Filename for --check-access (default: RCLONE_TEST)
- --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
- --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
- --filters-file string Read filtering patterns from a file
- --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
- -h, --help help for bisync
- --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
- --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
- -1, --resync Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.
- --workdir string Use custom working dir - useful for testing. (default: $HOME/.cache/rclone/bisync)
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
Copy Options
@@ -2571,7 +2674,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -2585,6 +2688,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -3550,7 +3654,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -3564,6 +3668,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -3623,7 +3728,7 @@ SEE ALSO
rclone copyurl
-Copy url content to dest.
+Copy the contents of the URL supplied content to dest:path.
Synopsis
@@ -3632,10 +3737,11 @@ it in temporary storage.
Setting --auto-filename will attempt to automatically determine the
filename from the URL (after any redirections) and used in the
-destination path. With --auto-filename-header in addition, if a specific
-filename is set in HTTP headers, it will be used instead of the name
-from the URL. With --print-filename in addition, the resulting file name
-will be printed.
+destination path.
+
+With --auto-filename-header in addition, if a specific filename is set
+in HTTP headers, it will be used instead of the name from the URL. With
+--print-filename in addition, the resulting file name will be printed.
Setting --no-clobber will prevent overwriting file on the destination if
there is one with the same name.
@@ -3643,6 +3749,19 @@ there is one with the same name.
Setting --stdout or making the output file name - will cause the output
to be written to standard output.
+Troublshooting
+
+If you can't get rclone copyurl to work then here are some things you
+can try:
+
+- --disable-http2 rclone will use HTTP2 if available - try disabling
+ it
+- --bind 0.0.0.0 rclone will use IPv6 if available - try disabling it
+- --bind ::0 to disable IPv4
+- --user agent curl - some sites have whitelists for curl's
+ user-agent - try that
+- Make sure the site works with curl directly
+
rclone copyurl https://example.com dest:path [flags]
Options
@@ -4139,14 +4258,15 @@ Synopsis
rclone listremotes lists all the available remotes from the config file.
-When used with the --long flag it lists the types too.
+When used with the --long flag it lists the types and the descriptions
+too.
rclone listremotes [flags]
Options
-h, --help help for listremotes
- --long Show the type as well as names
+ --long Show the type and the description as well as names
See the global flags page for global options not listed here.
@@ -4253,6 +4373,20 @@ those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
+The default time format is '2006-01-02 15:04:05'. Other formats can be
+specified with the --time-format flag. Examples:
+
+ rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
+ rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
+ rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
+ rclone lsf remote:path --format pt --time-format RFC3339
+ rclone lsf remote:path --format pt --time-format DateOnly
+ rclone lsf remote:path --format pt --time-format max
+
+--time-format max will automatically truncate
+'2006-01-02 15:04:05.000000000' to the maximum precision supported by
+the remote.
+
Any of the filtering options can be applied to this command.
There are several related list commands
@@ -4280,16 +4414,17 @@ bucket-based remotes).
Options
- --absolute Put a leading / in front of path names
- --csv Output in CSV format
- -d, --dir-slash Append a slash to directory names (default true)
- --dirs-only Only list directories
- --files-only Only list files
- -F, --format string Output format - see help for details (default "p")
- --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
- -h, --help help for lsf
- -R, --recursive Recurse into the listing
- -s, --separator string Separator for the items in the format (default ";")
+ --absolute Put a leading / in front of path names
+ --csv Output in CSV format
+ -d, --dir-slash Append a slash to directory names (default true)
+ --dirs-only Only list directories
+ --files-only Only list files
+ -F, --format string Output format - see help for details (default "p")
+ --hash h Use this hash when h is used in the format MD5|SHA-1|DropboxHash (default "md5")
+ -h, --help help for lsf
+ -R, --recursive Recurse into the listing
+ -s, --separator string Separator for the items in the format (default ";")
+ -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)
Filter Options
@@ -4759,6 +4894,12 @@ Mounting on macOS can be done either via built-in NFS server, macFUSE
utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE
system which "mounts" via an NFSv4 local server.
+Unicode Normalization
+
+It is highly recommended to keep the default of
+--no-unicode-normalization=false for all mount and serve commands on
+macOS. For details, see vfs-case-sensitivity.
+
NFS mount
This method spins up an NFS server using serve nfs command and mounts it
@@ -4766,6 +4907,12 @@ to the specified mountpoint. If you run this in background mode using
|--daemon|, you will need to send SIGTERM signal to the rclone process
using |kill| command to stop the mount.
+Note that --nfs-cache-handle-limit controls the maximum number of cached
+file handles stored by the nfsmount caching handler. This should not be
+set too low or you may experience errors when trying to access files.
+The default is 1000000, but consider lowering this limit if the server's
+system resource usage causes problems.
+
macFUSE Notes
If installing macFUSE using dmg packages from the website, rclone will
@@ -4794,14 +4941,6 @@ This means that viewing files with various tools, notably macOS Finder,
will cause rlcone to update the modification time of the file. This may
make rclone upload a full new copy of the file.
-Unicode Normalization
-
-Rclone includes flags for unicode normalization with macFUSE that should
-be updated for FUSE-T. See this forum post and FUSE-T issue #16. The
-following flag should be added to the rclone mount command.
-
- -o modules=iconv,from_code=UTF-8,to_code=UTF-8
-
Read Only mounts
When mounting with --read-only, attempts to write to files will fail
@@ -5264,6 +5403,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -5320,6 +5481,7 @@ Options
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -5332,7 +5494,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -5428,7 +5590,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -5442,6 +5604,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -5608,6 +5771,918 @@ SEE ALSO
- rclone - Show help for rclone commands, flags and backends.
+rclone nfsmount
+
+Mount the remote as file system on a mountpoint.
+
+Synopsis
+
+rclone nfsmount allows Linux, FreeBSD, macOS and Windows to mount any of
+Rclone's cloud storage systems as a file system with FUSE.
+
+First set up your remote using rclone config. Check it works with
+rclone ls etc.
+
+On Linux and macOS, you can run mount in either foreground or background
+(aka daemon) mode. Mount runs in foreground mode by default. Use the
+--daemon flag to force background mode. On Windows you can run mount in
+foreground only, the flag is ignored.
+
+In background mode rclone acts as a generic Unix mount program: the main
+program starts, spawns background rclone process to setup and maintain
+the mount, waits until success or timeout and exits with appropriate
+code (killing the child process if it fails).
+
+On Linux/macOS/FreeBSD start the mount like this, where
+/path/to/local/mount is an empty existing directory:
+
+ rclone nfsmount remote:path/to/files /path/to/local/mount
+
+On Windows you can start a mount in different ways. See below for
+details. If foreground mount is used interactively from a console
+window, rclone will serve the mount and occupy the console so another
+window should be used to work with the mount until rclone is interrupted
+e.g. by pressing Ctrl-C.
+
+The following examples will mount to an automatically assigned drive, to
+specific drive letter X:, to path C:\path\parent\mount (where parent
+directory or drive must exist, and mount must not exist, and is not
+supported when mounting as a network drive), and the last example will
+mount as network share \\cloud\remote and map it to an automatically
+assigned drive:
+
+ rclone nfsmount remote:path/to/files *
+ rclone nfsmount remote:path/to/files X:
+ rclone nfsmount remote:path/to/files C:\path\parent\mount
+ rclone nfsmount remote:path/to/files \\cloud\remote
+
+When the program ends while in foreground mode, either via Ctrl+C or
+receiving a SIGINT or SIGTERM signal, the mount should be automatically
+stopped.
+
+When running in background mode the user will have to stop the mount
+manually:
+
+ # Linux
+ fusermount -u /path/to/local/mount
+ # OS X
+ umount /path/to/local/mount
+
+The umount operation can fail, for example when the mountpoint is busy.
+When that happens, it is the user's responsibility to stop the mount
+manually.
+
+The size of the mounted file system will be set according to information
+retrieved from the remote, the same as returned by the rclone about
+command. Remotes with unlimited storage may report the used size only,
+then an additional 1 PiB of free space is assumed. If the remote does
+not support the about feature at all, then 1 PiB is set as both the
+total and the free size.
+
+Installing on Windows
+
+To run rclone nfsmount on Windows, you will need to download and install
+WinFsp.
+
+WinFsp is an open-source Windows File System Proxy which makes it easy
+to write user space file systems for Windows. It provides a FUSE
+emulation layer which rclone uses combination with cgofuse. Both of
+these packages are by Bill Zissimopoulos who was very helpful during the
+implementation of rclone nfsmount for Windows.
+
+Mounting modes on windows
+
+Unlike other operating systems, Microsoft Windows provides a different
+filesystem type for network and fixed drives. It optimises access on the
+assumption fixed disk drives are fast and reliable, while network drives
+have relatively high latency and less reliability. Some settings can
+also be differentiated between the two types, for example that Windows
+Explorer should just display icons and not create preview thumbnails for
+image and video files on network drives.
+
+In most cases, rclone will mount the remote as a normal, fixed disk
+drive by default. However, you can also choose to mount it as a remote
+network drive, often described as a network share. If you mount an
+rclone remote using the default, fixed drive mode and experience
+unexpected program errors, freezes or other issues, consider mounting as
+a network drive instead.
+
+When mounting as a fixed disk drive you can either mount to an unused
+drive letter, or to a path representing a nonexistent subdirectory of an
+existing parent directory or drive. Using the special value * will tell
+rclone to automatically assign the next available drive letter, starting
+with Z: and moving backward. Examples:
+
+ rclone nfsmount remote:path/to/files *
+ rclone nfsmount remote:path/to/files X:
+ rclone nfsmount remote:path/to/files C:\path\parent\mount
+ rclone nfsmount remote:path/to/files X:
+
+Option --volname can be used to set a custom volume name for the mounted
+file system. The default is to use the remote name and path.
+
+To mount as network drive, you can add option --network-mode to your
+nfsmount command. Mounting to a directory path is not supported in this
+mode, it is a limitation Windows imposes on junctions, so the remote
+must always be mounted to a drive letter.
+
+ rclone nfsmount remote:path/to/files X: --network-mode
+
+A volume name specified with --volname will be used to create the
+network share path. A complete UNC path, such as \\cloud\remote,
+optionally with path \\cloud\remote\madeup\path, will be used as is. Any
+other string will be used as the share part, after a default prefix
+\\server\. If no volume name is specified then \\server\share will be
+used. You must make sure the volume name is unique when you are mounting
+more than one drive, or else the mount command will fail. The share name
+will treated as the volume label for the mapped drive, shown in Windows
+Explorer etc, while the complete \\server\share will be reported as the
+remote UNC path by net use etc, just like a normal network drive
+mapping.
+
+If you specify a full network share UNC path with --volname, this will
+implicitly set the --network-mode option, so the following two examples
+have same result:
+
+ rclone nfsmount remote:path/to/files X: --network-mode
+ rclone nfsmount remote:path/to/files X: --volname \\server\share
+
+You may also specify the network share UNC path as the mountpoint
+itself. Then rclone will automatically assign a drive letter, same as
+with * and use that as mountpoint, and instead use the UNC path
+specified as the volume name, as if it were specified with the --volname
+option. This will also implicitly set the --network-mode option. This
+means the following two examples have same result:
+
+ rclone nfsmount remote:path/to/files \\cloud\remote
+ rclone nfsmount remote:path/to/files * --volname \\cloud\remote
+
+There is yet another way to enable network mode, and to set the share
+path, and that is to pass the "native" libfuse/WinFsp option directly:
+--fuse-flag --VolumePrefix=\server\share. Note that the path must be
+with just a single backslash prefix in this case.
+
+Note: In previous versions of rclone this was the only supported method.
+
+Read more about drive mapping
+
+See also Limitations section below.
+
+Windows filesystem permissions
+
+The FUSE emulation layer on Windows must convert between the POSIX-based
+permission model used in FUSE, and the permission model used in Windows,
+based on access-control lists (ACL).
+
+The mounted filesystem will normally get three entries in its
+access-control list (ACL), representing permissions for the POSIX
+permission scopes: Owner, group and others. By default, the owner and
+group will be taken from the current user, and the built-in group
+"Everyone" will be used to represent others. The user/group can be
+customized with FUSE options "UserName" and "GroupName", e.g.
+-o UserName=user123 -o GroupName="Authenticated Users". The permissions
+on each entry will be set according to options --dir-perms and
+--file-perms, which takes a value in traditional Unix numeric notation.
+
+The default permissions corresponds to
+--file-perms 0666 --dir-perms 0777, i.e. read and write permissions to
+everyone. This means you will not be able to start any programs from the
+mount. To be able to do that you must add execute permissions, e.g.
+--file-perms 0777 --dir-perms 0777 to add it to everyone. If the program
+needs to write files, chances are you will have to enable VFS File
+Caching as well (see also limitations). Note that the default write
+permission have some restrictions for accounts other than the owner,
+specifically it lacks the "write extended attributes", as explained
+next.
+
+The mapping of permissions is not always trivial, and the result you see
+in Windows Explorer may not be exactly like you expected. For example,
+when setting a value that includes write access for the group or others
+scope, this will be mapped to individual permissions "write attributes",
+"write data" and "append data", but not "write extended attributes".
+Windows will then show this as basic permission "Special" instead of
+"Write", because "Write" also covers the "write extended attributes"
+permission. When setting digit 0 for group or others, to indicate no
+permissions, they will still get individual permissions "read
+attributes", "read extended attributes" and "read permissions". This is
+done for compatibility reasons, e.g. to allow users without additional
+permissions to be able to read basic metadata about files like in Unix.
+
+WinFsp 2021 (version 1.9) introduced a new FUSE option "FileSecurity",
+that allows the complete specification of file security descriptors
+using SDDL. With this you get detailed control of the resulting
+permissions, compared to use of the POSIX permissions described above,
+and no additional permissions will be added automatically for
+compatibility with Unix. Some example use cases will following.
+
+If you set POSIX permissions for only allowing access to the owner,
+using --file-perms 0600 --dir-perms 0700, the user group and the
+built-in "Everyone" group will still be given some special permissions,
+as described above. Some programs may then (incorrectly) interpret this
+as the file being accessible by everyone, for example an SSH client may
+warn about "unprotected private key file". You can work around this by
+specifying -o FileSecurity="D:P(A;;FA;;;OW)", which sets file all access
+(FA) to the owner (OW), and nothing else.
+
+When setting write permissions then, except for the owner, this does not
+include the "write extended attributes" permission, as mentioned above.
+This may prevent applications from writing to files, giving permission
+denied error instead. To set working write permissions for the built-in
+"Everyone" group, similar to what it gets by default but with the
+addition of the "write extended attributes", you can specify
+-o FileSecurity="D:P(A;;FRFW;;;WD)", which sets file read (FR) and file
+write (FW) to everyone (WD). If file execute (FX) is also needed, then
+change to -o FileSecurity="D:P(A;;FRFWFX;;;WD)", or set file all access
+(FA) to get full access permissions, including delete, with
+-o FileSecurity="D:P(A;;FA;;;WD)".
+
+Windows caveats
+
+Drives created as Administrator are not visible to other accounts, not
+even an account that was elevated to Administrator with the User Account
+Control (UAC) feature. A result of this is that if you mount to a drive
+letter from a Command Prompt run as Administrator, and then try to
+access the same drive from Windows Explorer (which does not run as
+Administrator), you will not be able to see the mounted drive.
+
+If you don't need to access the drive from applications running with
+administrative privileges, the easiest way around this is to always
+create the mount from a non-elevated command prompt.
+
+To make mapped drives available to the user account that created them
+regardless if elevated or not, there is a special Windows setting called
+linked connections that can be enabled.
+
+It is also possible to make a drive mount available to everyone on the
+system, by running the process creating it as the built-in SYSTEM
+account. There are several ways to do this: One is to use the
+command-line utility PsExec, from Microsoft's Sysinternals suite, which
+has option -s to start processes as the SYSTEM account. Another
+alternative is to run the mount command from a Windows Scheduled Task,
+or a Windows Service, configured to run as the SYSTEM account. A third
+alternative is to use the WinFsp.Launcher infrastructure). Read more in
+the install documentation. Note that when running rclone as another
+user, it will not use the configuration file from your profile unless
+you tell it to with the --config option. Note also that it is now the
+SYSTEM account that will have the owner permissions, and other accounts
+will have permissions according to the group or others scopes. As
+mentioned above, these will then not get the "write extended attributes"
+permission, and this may prevent writing to files. You can work around
+this with the FileSecurity option, see example above.
+
+Note that mapping to a directory path, instead of a drive letter, does
+not suffer from the same limitations.
+
+Mounting on macOS
+
+Mounting on macOS can be done either via built-in NFS server, macFUSE
+(also known as osxfuse) or FUSE-T. macFUSE is a traditional FUSE driver
+utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE
+system which "mounts" via an NFSv4 local server.
+
+Unicode Normalization
+
+It is highly recommended to keep the default of
+--no-unicode-normalization=false for all mount and serve commands on
+macOS. For details, see vfs-case-sensitivity.
+
+NFS mount
+
+This method spins up an NFS server using serve nfs command and mounts it
+to the specified mountpoint. If you run this in background mode using
+|--daemon|, you will need to send SIGTERM signal to the rclone process
+using |kill| command to stop the mount.
+
+Note that --nfs-cache-handle-limit controls the maximum number of cached
+file handles stored by the nfsmount caching handler. This should not be
+set too low or you may experience errors when trying to access files.
+The default is 1000000, but consider lowering this limit if the server's
+system resource usage causes problems.
+
+macFUSE Notes
+
+If installing macFUSE using dmg packages from the website, rclone will
+locate the macFUSE libraries without any further intervention. If
+however, macFUSE is installed using the macports package manager, the
+following addition steps are required.
+
+ sudo mkdir /usr/local/lib
+ cd /usr/local/lib
+ sudo ln -s /opt/local/lib/libfuse.2.dylib
+
+FUSE-T Limitations, Caveats, and Notes
+
+There are some limitations, caveats, and notes about how it works. These
+are current as of FUSE-T version 1.0.14.
+
+ModTime update on read
+
+As per the FUSE-T wiki:
+
+ File access and modification times cannot be set separately as it
+ seems to be an issue with the NFS client which always modifies both.
+ Can be reproduced with 'touch -m' and 'touch -a' commands
+
+This means that viewing files with various tools, notably macOS Finder,
+will cause rlcone to update the modification time of the file. This may
+make rclone upload a full new copy of the file.
+
+Read Only mounts
+
+When mounting with --read-only, attempts to write to files will fail
+silently as opposed to with a clear warning as in macFUSE.
+
+Limitations
+
+Without the use of --vfs-cache-mode this can only write files
+sequentially, it can only seek when reading. This means that many
+applications won't work with their files on an rclone mount without
+--vfs-cache-mode writes or --vfs-cache-mode full. See the VFS File
+Caching section for more info. When using NFS mount on macOS, if you
+don't specify |--vfs-cache-mode| the mount point will be read-only.
+
+The bucket-based remotes (e.g. Swift, S3, Google Compute Storage, B2) do
+not support the concept of empty directories, so empty directories will
+have a tendency to disappear once they fall out of the directory cache.
+
+When rclone mount is invoked on Unix with --daemon flag, the main rclone
+program will wait for the background mount to become ready or until the
+timeout specified by the --daemon-wait flag. On Linux it can check mount
+status using ProcFS so the flag in fact sets maximum time to wait, while
+the real wait can be less. On macOS / BSD the time to wait is constant
+and the check is performed only at the end. We advise you to set wait
+time on macOS reasonably.
+
+Only supported on Linux, FreeBSD, OS X and Windows at the moment.
+
+rclone nfsmount vs rclone sync/copy
+
+File systems expect things to be 100% reliable, whereas cloud storage
+systems are a long way from 100% reliable. The rclone sync/copy commands
+cope with this with lots of retries. However rclone nfsmount can't use
+retries in the same way without making local copies of the uploads. Look
+at the VFS File Caching for solutions to make nfsmount more reliable.
+
+Attribute caching
+
+You can use the flag --attr-timeout to set the time the kernel caches
+the attributes (size, modification time, etc.) for directory entries.
+
+The default is 1s which caches files just long enough to avoid too many
+callbacks to rclone from the kernel.
+
+In theory 0s should be the correct value for filesystems which can
+change outside the control of the kernel. However this causes quite a
+few problems such as rclone using too much memory, rclone not serving
+files to samba and excessive time listing directories.
+
+The kernel can cache the info about a file for the time given by
+--attr-timeout. You may see corruption if the remote file changes length
+during this window. It will show up as either a truncated file or a file
+with garbage on the end. With --attr-timeout 1s this is very unlikely
+but not impossible. The higher you set --attr-timeout the more likely it
+is. The default setting of "1s" is the lowest setting which mitigates
+the problems above.
+
+If you set it higher (10s or 1m say) then the kernel will call back to
+rclone less often making it more efficient, however there is more chance
+of the corruption issue above.
+
+If files don't change on the remote outside of the control of rclone
+then there is no chance of corruption.
+
+This is the same as setting the attr_timeout option in mount.fuse.
+
+Filters
+
+Note that all the rclone filters can be used to select a subset of the
+files to be visible in the mount.
+
+systemd
+
+When running rclone nfsmount as a systemd service, it is possible to use
+Type=notify. In this case the service will enter the started state after
+the mountpoint has been successfully set up. Units having the rclone
+nfsmount service specified as a requirement will see all files and
+folders immediately in this mode.
+
+Note that systemd runs mount units without any environment variables
+including PATH or HOME. This means that tilde (~) expansion will not
+work and you should provide --config and --cache-dir explicitly as
+absolute paths via rclone arguments. Since mounting requires the
+fusermount program, rclone will use the fallback PATH of /bin:/usr/bin
+in this scenario. Please ensure that fusermount is present on this PATH.
+
+Rclone as Unix mount helper
+
+The core Unix program /bin/mount normally takes the -t FSTYPE argument
+then runs the /sbin/mount.FSTYPE helper program passing it mount options
+as -o key=val,... or --opt=.... Automount (classic or systemd) behaves
+in a similar way.
+
+rclone by default expects GNU-style flags --key val. To run it as a
+mount helper you should symlink rclone binary to /sbin/mount.rclone and
+optionally /usr/bin/rclonefs, e.g.
+ln -s /usr/bin/rclone /sbin/mount.rclone. rclone will detect it and
+translate command-line arguments appropriately.
+
+Now you can run classic mounts like this:
+
+ mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
+
+or create systemd mount units:
+
+ # /etc/systemd/system/mnt-data.mount
+ [Unit]
+ Description=Mount for /mnt/data
+ [Mount]
+ Type=rclone
+ What=sftp1:subdir
+ Where=/mnt/data
+ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone.conf,cache-dir=/var/rclone
+
+optionally accompanied by systemd automount unit
+
+ # /etc/systemd/system/mnt-data.automount
+ [Unit]
+ Description=AutoMount for /mnt/data
+ [Automount]
+ Where=/mnt/data
+ TimeoutIdleSec=600
+ [Install]
+ WantedBy=multi-user.target
+
+or add in /etc/fstab a line like
+
+ sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
+
+or use classic Automountd. Remember to provide explicit
+config=...,cache-dir=... as a workaround for mount units being run
+without HOME.
+
+Rclone in the mount helper mode will split -o argument(s) by comma,
+replace _ by - and prepend -- to get the command-line flags. Options
+containing commas or spaces can be wrapped in single or double quotes.
+Any inner quotes inside outer quotes of the same type should be doubled.
+
+Mount option syntax includes a few extra options treated specially:
+
+- env.NAME=VALUE will set an environment variable for the mount
+ process. This helps with Automountd and Systemd.mount which don't
+ allow setting custom environment for mount helpers. Typically you
+ will use env.HTTPS_PROXY=proxy.host:3128 or env.HOME=/root
+- command=cmount can be used to run cmount or any other rclone command
+ rather than the default mount.
+- args2env will pass mount options to the mount helper running in
+ background via environment variables instead of command line
+ arguments. This allows to hide secrets from such commands as ps or
+ pgrep.
+- vv... will be transformed into appropriate --verbose=N
+- standard mount options like x-systemd.automount, _netdev, nosuid and
+ alike are intended only for Automountd and ignored by rclone. ##
+ VFS - Virtual File System
+
+This command uses the VFS layer. This adapts the cloud storage objects
+that rclone uses into something which looks much more like a disk filing
+system.
+
+Cloud storage objects have lots of properties which aren't like disk
+files - you can't extend them or write to the middle of them, so the VFS
+layer has to deal with that. Because there is no one right way of doing
+this there are various options explained below.
+
+The VFS layer also implements a directory cache - this caches info about
+files and directories (but not the data) in memory.
+
+VFS Directory Cache
+
+Using the --dir-cache-time flag, you can control how long a directory
+should be considered up to date and not refreshed from the backend.
+Changes made through the VFS will appear immediately or invalidate the
+cache.
+
+ --dir-cache-time duration Time to cache directory entries for (default 5m0s)
+ --poll-interval duration Time to wait between polling for changes. Must be smaller than dir-cache-time. Only on supported remotes. Set to 0 to disable (default 1m0s)
+
+However, changes made directly on the cloud storage by the web interface
+or a different copy of rclone will only be picked up once the directory
+cache expires if the backend configured does not support polling for
+changes. If the backend supports polling, changes will be picked up
+within the polling interval.
+
+You can send a SIGHUP signal to rclone for it to flush all directory
+caches, regardless of how old they are. Assuming only one rclone
+instance is running, you can reset the cache like this:
+
+ kill -SIGHUP $(pidof rclone)
+
+If you configure rclone with a remote control then you can use rclone rc
+to flush the whole directory cache:
+
+ rclone rc vfs/forget
+
+Or individual files or directories:
+
+ rclone rc vfs/forget file=path/to/file dir=path/to/dir
+
+VFS File Buffering
+
+The --buffer-size flag determines the amount of memory, that will be
+used to buffer data in advance.
+
+Each open file will try to keep the specified amount of data in memory
+at all times. The buffered data is bound to one open file and won't be
+shared.
+
+This flag is a upper limit for the used memory per open file. The buffer
+will only use memory for data that is downloaded but not not yet read.
+If the buffer is empty, only a small amount of memory will be used.
+
+The maximum memory used by rclone for buffering can be up to
+--buffer-size * open files.
+
+VFS File Caching
+
+These flags control the VFS file caching options. File caching is
+necessary to make the VFS layer appear compatible with a normal file
+system. It can be disabled at the cost of some compatibility.
+
+For example you'll need to enable VFS caching if you want to read and
+write simultaneously to a file. See below for more details.
+
+Note that the VFS cache is separate from the cache backend and you may
+find that you need one or the other or both.
+
+ --cache-dir string Directory rclone will use for caching.
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-max-age duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-poll-interval duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-write-back duration Time to writeback files after last use when using cache (default 5s)
+
+If run with -vv rclone will print the location of the file cache. The
+files are stored in the user cache file area which is OS dependent but
+can be controlled with --cache-dir or setting the appropriate
+environment variable.
+
+The cache has 4 different modes selected by --vfs-cache-mode. The higher
+the cache mode the more compatible rclone becomes at the cost of using
+disk space.
+
+Note that files are written back to the remote only when they are closed
+and if they haven't been accessed for --vfs-write-back seconds. If
+rclone is quit or dies with files that haven't been uploaded, these will
+be uploaded next time rclone is run with the same flags.
+
+If using --vfs-cache-max-size or --vfs-cache-min-free-size note that the
+cache may exceed these quotas for two reasons. Firstly because it is
+only checked every --vfs-cache-poll-interval. Secondly because open
+files cannot be evicted from the cache. When --vfs-cache-max-size or
+--vfs-cache-min-free-size is exceeded, rclone will attempt to evict the
+least accessed files from the cache first. rclone will start with files
+that haven't been accessed for the longest. This cache flushing strategy
+is efficient and more relevant files are likely to remain cached.
+
+The --vfs-cache-max-age will evict files from the cache after the set
+time since last access has passed. The default value of 1 hour will
+start evicting files from cache that haven't been accessed for 1 hour.
+When a cached file is accessed the 1 hour timer is reset to 0 and will
+wait for 1 more hour before evicting. Specify the time with standard
+notation, s, m, h, d, w .
+
+You should not run two copies of rclone using the same VFS cache with
+the same or overlapping remotes if using --vfs-cache-mode > off. This
+can potentially cause data corruption if you do. You can work around
+this by giving each rclone its own cache hierarchy with --cache-dir. You
+don't need to worry about this if the remotes in use don't overlap.
+
+--vfs-cache-mode off
+
+In this mode (the default) the cache will read directly from the remote
+and write directly to the remote without caching anything on disk.
+
+This will mean some operations are not possible
+
+- Files can't be opened for both read AND write
+- Files opened for write can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files open for read with O_TRUNC will be opened write only
+- Files open for write only will behave as if O_TRUNC was supplied
+- Open modes O_APPEND, O_TRUNC are ignored
+- If an upload fails it can't be retried
+
+--vfs-cache-mode minimal
+
+This is very similar to "off" except that files opened for read AND
+write will be buffered to disk. This means that files opened for write
+will be a lot more compatible, but uses the minimal disk space.
+
+These operations are not possible
+
+- Files opened for write only can't be seeked
+- Existing files opened for write must have O_TRUNC set
+- Files opened for write only will ignore O_APPEND, O_TRUNC
+- If an upload fails it can't be retried
+
+--vfs-cache-mode writes
+
+In this mode files opened for read only are still read directly from the
+remote, write only and read/write files are buffered to disk first.
+
+This mode should support all normal file system operations.
+
+If an upload fails it will be retried at exponentially increasing
+intervals up to 1 minute.
+
+--vfs-cache-mode full
+
+In this mode all reads and writes are buffered to and from disk. When
+data is read from the remote this is buffered to disk as well.
+
+In this mode the files in the cache will be sparse files and rclone will
+keep track of which bits of the files it has downloaded.
+
+So if an application only reads the starts of each file, then rclone
+will only buffer the start of the file. These files will appear to be
+their full size in the cache, but they will be sparse files with only
+the data that has been downloaded present in them.
+
+This mode should support all normal file system operations and is
+otherwise identical to --vfs-cache-mode writes.
+
+When reading a file rclone will read --buffer-size plus --vfs-read-ahead
+bytes ahead. The --buffer-size is buffered in memory whereas the
+--vfs-read-ahead is buffered on disk.
+
+When using this mode it is recommended that --buffer-size is not set too
+large and --vfs-read-ahead is set large if required.
+
+IMPORTANT not all file systems support sparse files. In particular
+FAT/exFAT do not. Rclone will perform very badly if the cache directory
+is on a filesystem which doesn't support sparse files and it will log an
+ERROR message if one is detected.
+
+Fingerprinting
+
+Various parts of the VFS use fingerprinting to see if a local file copy
+has changed relative to a remote file. Fingerprints are made from:
+
+- size
+- modification time
+- hash
+
+where available on an object.
+
+On some backends some of these attributes are slow to read (they take an
+extra API call per object, or extra work per object).
+
+For example hash is slow with the local and sftp backends as they have
+to read the entire file and hash it, and modtime is slow with the s3,
+swift, ftp and qinqstor backends because they need to do an extra API
+call to fetch it.
+
+If you use the --vfs-fast-fingerprint flag then rclone will not include
+the slow operations in the fingerprint. This makes the fingerprinting
+less accurate but much faster and will improve the opening time of
+cached files.
+
+If you are running a vfs cache over local, s3 or swift backends then
+using this flag is recommended.
+
+Note that if you change the value of this flag, the fingerprints of the
+files in the cache may be invalidated and the files will need to be
+downloaded again.
+
+VFS Chunked Reading
+
+When rclone reads files from a remote it reads them in chunks. This
+means that rather than requesting the whole file rclone reads the chunk
+specified. This can reduce the used download quota for some remotes by
+requesting only chunks from the remote that are actually read, at the
+cost of an increased number of requests.
+
+These flags control the chunking:
+
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M)
+ --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off)
+
+Rclone will start reading a chunk of size --vfs-read-chunk-size, and
+then double the size for each read. When --vfs-read-chunk-size-limit is
+specified, and greater than --vfs-read-chunk-size, the chunk size for
+each open file will get doubled only until the specified value is
+reached. If the value is "off", which is the default, the limit is
+disabled and the chunk size will grow indefinitely.
+
+With --vfs-read-chunk-size 100M and --vfs-read-chunk-size-limit 0 the
+following parts will be downloaded: 0-100M, 100M-200M, 200M-300M,
+300M-400M and so on. When --vfs-read-chunk-size-limit 500M is specified,
+the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M,
+1200M-1700M and so on.
+
+Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading.
+
+VFS Performance
+
+These flags may be used to enable/disable features of the VFS for
+performance or other reasons. See also the chunked reading feature.
+
+In particular S3 and Swift benefit hugely from the --no-modtime flag (or
+use --use-server-modtime for a slightly different effect) as each read
+of the modification time takes a transaction.
+
+ --no-checksum Don't compare checksums on up/download.
+ --no-modtime Don't read/write the modification time (can speed things up).
+ --no-seek Don't allow seeking in files.
+ --read-only Only allow read-only access.
+
+Sometimes rclone is delivered reads or writes out of order. Rather than
+seeking rclone will wait a short time for the in sequence read or write
+to come in. These flags only come into effect when not using an on disk
+cache file.
+
+ --vfs-read-wait duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-write-wait duration Time to wait for in-sequence write before giving error (default 1s)
+
+When using VFS write caching (--vfs-cache-mode with value writes or
+full), the global flag --transfers can be set to adjust the number of
+parallel uploads of modified files from the cache (the related global
+flag --checkers has no effect on the VFS).
+
+ --transfers int Number of file transfers to run in parallel (default 4)
+
+VFS Case Sensitivity
+
+Linux file systems are case-sensitive: two files can differ only by
+case, and the exact case must be used when opening a file.
+
+File systems in modern Windows are case-insensitive but case-preserving:
+although existing files can be opened using any case, the exact case
+used to create the file is preserved and available for programs to
+query. It is not allowed for two files in the same directory to differ
+only by case.
+
+Usually file systems on macOS are case-insensitive. It is possible to
+make macOS file systems case-sensitive but that is not the default.
+
+The --vfs-case-insensitive VFS flag controls how rclone handles these
+two cases. If its value is "false", rclone passes file names to the
+remote as-is. If the flag is "true" (or appears without a value on the
+command line), rclone may perform a "fixup" as explained below.
+
+The user may specify a file name to open/delete/rename/etc with a case
+different than what is stored on the remote. If an argument refers to an
+existing file with exactly the same name, then the case of the existing
+file on the disk will be used. However, if a file name with exactly the
+same name is not found but a name differing only by case exists, rclone
+will transparently fixup the name. This fixup happens only when an
+existing file is requested. Case sensitivity of file names created anew
+by rclone is controlled by the underlying remote.
+
+Note that case sensitivity of the operating system running rclone (the
+target) may differ from case sensitivity of a file system presented by
+rclone (the source). The flag controls whether "fixup" is performed to
+satisfy the target.
+
+If the flag is not provided on the command line, then its default value
+depends on the operating system where rclone runs: "true" on Windows and
+macOS, "false" otherwise. If the flag is provided without a value, then
+it is "true".
+
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
+VFS Disk Options
+
+This flag allows you to manually set the statistics about the filing
+system. It can be useful when those statistics cannot be read correctly
+automatically.
+
+ --vfs-disk-space-total-size Manually set the total disk space size (example: 256G, default: -1)
+
+Alternate report of used bytes
+
+Some backends, most notably S3, do not report the amount of bytes used.
+If you need this information to be available when running df on the
+filesystem, then pass the flag --vfs-used-is-size to rclone. With this
+flag set, instead of relying on the backend to report this information,
+rclone will scan the whole remote similar to rclone size and compute the
+total used space itself.
+
+WARNING. Contrary to rclone size, this flag ignores filters so that the
+result is accurate. However, this is very inefficient and may cost lots
+of API calls resulting in extra charges. Use it as a last resort and
+only with caching.
+
+ rclone nfsmount remote:path /path/to/mountpoint [flags]
+
+Options
+
+ --addr string IPaddress:Port or :Port to bind server to
+ --allow-non-empty Allow mounting over a non-empty directory (not supported on Windows)
+ --allow-other Allow access to other users (not supported on Windows)
+ --allow-root Allow access to root user (not supported on Windows)
+ --async-read Use asynchronous reads (not supported on Windows) (default true)
+ --attr-timeout Duration Time for which file/directory attributes are cached (default 1s)
+ --daemon Run mount in background and exit parent process (as background output is suppressed, use --log-file with --log-format=pid,... to monitor) (not supported on Windows)
+ --daemon-timeout Duration Time limit for rclone to respond to kernel (not supported on Windows) (default 0s)
+ --daemon-wait Duration Time to wait for ready mount from daemon (maximum time on Linux, constant sleep time on OSX/BSD) (not supported on Windows) (default 1m0s)
+ --debug-fuse Debug the FUSE internals - needs -v
+ --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows)
+ --devname string Set the device name - default is remote:path
+ --dir-cache-time Duration Time to cache directory entries for (default 5m0s)
+ --dir-perms FileMode Directory permissions (default 0777)
+ --file-perms FileMode File permissions (default 0666)
+ --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required)
+ --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
+ -h, --help help for nfsmount
+ --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki)
+ --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset)
+ --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only)
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
+ --no-checksum Don't compare checksums on up/download
+ --no-modtime Don't read/write the modification time (can speed things up)
+ --no-seek Don't allow seeking in files
+ --noappledouble Ignore Apple Double (._) and .DS_Store files (supported on OSX only) (default true)
+ --noapplexattr Ignore all "com.apple.*" extended attributes (supported on OSX only)
+ -o, --option stringArray Option for libfuse/WinFsp (repeat if required)
+ --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s)
+ --read-only Only allow read-only access
+ --sudo Use sudo to run the mount command as root.
+ --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
+ --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
+ --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
+ --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
+ --vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
+ --vfs-cache-mode CacheMode Cache mode off|minimal|writes|full (default off)
+ --vfs-cache-poll-interval Duration Interval to poll the cache for stale objects (default 1m0s)
+ --vfs-case-insensitive If a file name not found, find a case insensitive match
+ --vfs-disk-space-total-size SizeSuffix Specify the total space of disk (default off)
+ --vfs-fast-fingerprint Use fast (less accurate) fingerprints for change detection
+ --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full
+ --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
+ --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
+ --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
+ --vfs-used-is-size rclone size Use the rclone size algorithm for Used size
+ --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
+ --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
+ --volname string Set the volume name (supported on Windows and OSX only)
+ --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows)
+
+Filter Options
+
+Flags for filtering directory listings.
+
+ --delete-excluded Delete files on dest excluded from sync
+ --exclude stringArray Exclude files matching pattern
+ --exclude-from stringArray Read file exclude patterns from file (use - to read from stdin)
+ --exclude-if-present stringArray Exclude directories if filename is present
+ --files-from stringArray Read list of source-file names from file (use - to read from stdin)
+ --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin)
+ -f, --filter stringArray Add a file filtering rule
+ --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin)
+ --ignore-case Ignore case in filters (case insensitive)
+ --include stringArray Include files matching pattern
+ --include-from stringArray Read file include patterns from file (use - to read from stdin)
+ --max-age Duration Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --max-depth int If set limits the recursion depth to this (default -1)
+ --max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
+ --metadata-exclude stringArray Exclude metadatas matching pattern
+ --metadata-exclude-from stringArray Read metadata exclude patterns from file (use - to read from stdin)
+ --metadata-filter stringArray Add a metadata filtering rule
+ --metadata-filter-from stringArray Read metadata filtering patterns from a file (use - to read from stdin)
+ --metadata-include stringArray Include metadatas matching pattern
+ --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin)
+ --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
+ --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
+
+See the global flags page for global options not listed here.
+
+SEE ALSO
+
+- rclone - Show help for rclone commands, flags and backends.
+
rclone obscure
Obscure password for use in the rclone config file.
@@ -6476,6 +7551,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -6519,6 +7616,7 @@ Options
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -6531,7 +7629,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -6921,6 +8019,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -6982,6 +8102,7 @@ Options
--socket-gid int GID for unix socket (default: current process GID) (default 1000)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -6994,7 +8115,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -7368,6 +8489,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -7485,6 +8628,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication (default "anonymous")
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -7497,7 +8641,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -8008,6 +9152,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -8134,6 +9300,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -8146,7 +9313,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -8207,7 +9374,12 @@ and port using --addr flag.
Modifying files through NFS protocol requires VFS caching. Usually you
will need to specify --vfs-cache-mode in order to be able to write to
the mountpoint (full is recommended). If you don't specify VFS cache
-mode, the mount will be read-only.
+mode, the mount will be read-only. Note also that
+--nfs-cache-handle-limit controls the maximum number of cached file
+handles stored by the caching handler. This should not be set too low or
+you may experience errors when trying to access files. The default is
+1000000, but consider lowering this limit if the server's system
+resource usage causes problems.
To serve NFS over the network use following command:
@@ -8532,6 +9704,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -8564,6 +9758,7 @@ Options
--file-perms FileMode File permissions (default 0666)
--gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000)
-h, --help help for nfs
+ --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000)
--no-checksum Don't compare checksums on up/download
--no-modtime Don't read/write the modification time (can speed things up)
--no-seek Don't allow seeking in files
@@ -8571,6 +9766,7 @@ Options
--read-only Only allow read-only access
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -8583,7 +9779,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -9279,6 +10475,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -9331,6 +10549,7 @@ Options
--server-write-timeout Duration Timeout for server writing data (default 1h0m0s)
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -9343,7 +10562,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -9748,6 +10967,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -9865,6 +11106,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -9877,7 +11119,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -10418,6 +11660,28 @@ depends on the operating system where rclone runs: "true" on Windows and
macOS, "false" otherwise. If the flag is provided without a value, then
it is "true".
+The --no-unicode-normalization flag controls whether a similar "fixup"
+is performed for filenames that differ but are canonically equivalent
+with respect to unicode. Unicode normalization can be particularly
+helpful for users of macOS, which prefers form NFD instead of the NFC
+used by most other platforms. It is therefore highly recommended to keep
+the default of false on macOS, to avoid encoding compatibility issues.
+
+In the (probably unlikely) event that a directory has multiple duplicate
+filenames after applying case and unicode normalization, the
+--vfs-block-norm-dupes flag allows hiding these duplicates. This comes
+with a performance tradeoff, as rclone will have to scan the entire
+directory for duplicates when listing a directory. For this reason, it
+is recommended to leave this disabled if not needed. However, macOS
+users may wish to consider using it, as otherwise, if a remote directory
+contains both NFC and NFD versions of the same filename, an odd
+situation will occur: both versions of the file will be visible in the
+mount, and both will appear to be editable, however, editing either
+version will actually result in only the NFD version getting edited
+under the hood. --vfs-block- norm-dupes prevents this confusion by
+detecting this scenario, hiding the duplicates, and logging an error,
+similar to how this is handled in rclone sync.
+
VFS Disk Options
This flag allows you to manually set the statistics about the filing
@@ -10546,6 +11810,7 @@ Options
--uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000)
--umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2)
--user string User name for authentication
+ --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost)
--vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s)
--vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off)
--vfs-cache-min-free-space SizeSuffix Target minimum free space on the disk containing the cache (default off)
@@ -10558,7 +11823,7 @@ Options
--vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi)
--vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off)
--vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms)
- --vfs-refresh Refreshes the directory cache recursively on start
+ --vfs-refresh Refreshes the directory cache recursively in the background on start
--vfs-used-is-size rclone size Use the rclone size algorithm for Used size
--vfs-write-back Duration Time to writeback files after last use when using cache (default 5s)
--vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s)
@@ -11290,17 +12555,20 @@ This can be used when scripting to make aged backups efficiently, e.g.
Metadata support
-Metadata is data about a file which isn't the contents of the file.
-Normally rclone only preserves the modification time and the content
-(MIME) type where possible.
+Metadata is data about a file (or directory) which isn't the contents of
+the file (or directory). Normally rclone only preserves the modification
+time and the content (MIME) type where possible.
-Rclone supports preserving all the available metadata on files (not
-directories) when using the --metadata or -M flag.
+Rclone supports preserving all the available metadata on files and
+directories when using the --metadata or -M flag.
Exactly what metadata is supported and what that support means depends
on the backend. Backends that support metadata have a metadata section
in their docs and are listed in the features table (Eg local, s3)
+Some backends don't support metadata, some only support metadata on
+files and some support metadata on both files and directories.
+
Rclone only supports a one-time sync of metadata. This means that
metadata will be synced from the source object to the destination object
only when the source object has changed and needs to be re-uploaded. If
@@ -11320,6 +12588,13 @@ The --metadata-mapper flag can be used to pass the name of a program in
which can transform metadata when it is being copied from source to
destination.
+Rclone supports --metadata-set and --metadata-mapper when doing sever
+side Move and server side Copy, but not when doing server side DirMove
+(renaming a directory) as this would involve recursing into the
+directory. Note that you can disable DirMove with --disable DirMove and
+rclone will revert back to using Move for each individual object where
+--metadata-set and --metadata-mapper are supported.
+
Types of metadata
Metadata is divided into two type. System metadata and User metadata.
@@ -11977,6 +13252,24 @@ NB: Enabling this option turns a usually non-fatal error into a
potentially fatal one - please check and adjust your scripts
accordingly!
+--fix-case
+
+Normally, a sync to a case insensitive dest (such as macOS / Windows)
+will not result in a matching filename if the source and dest filenames
+have casing differences but are otherwise identical. For example,
+syncing hello.txt to HELLO.txt will normally result in the dest filename
+remaining HELLO.txt. If --fix-case is set, then HELLO.txt will be
+renamed to hello.txt to match the source.
+
+NB: - directory names with incorrect casing will also be fixed -
+--fix-case will be ignored if --immutable is set - using
+--local-case-sensitive instead is not advisable; it will cause HELLO.txt
+to get deleted! - the old dest filename must not be excluded by filters.
+Be especially careful with --files-from, which does not respect
+--ignore-case! - on remotes that do not support server-side move,
+--fix-case will require downloading the file and re-uploading it. To
+avoid this, do not use --fix-case.
+
--fs-cache-expire-duration=TIME
When using rclone via the API rclone caches created remotes for 5
@@ -12396,10 +13689,10 @@ some context for the Metadata which may be important.
- DstFs is the config string for the remote that the object is being
copied to
- DstFsType is the name of the destination backend.
-- Remote is the path of the file relative to the root.
-- Size, MimeType, ModTime are attributes of the file.
+- Remote is the path of the object relative to the root.
+- Size, MimeType, ModTime are attributes of the object.
- IsDir is true if this is a directory (not yet implemented).
-- ID is the source ID of the file if known.
+- ID is the source ID of the object if known.
- Metadata is the backend specific metadata as described in the
backend docs.
@@ -12521,7 +13814,7 @@ When transferring files above SIZE to capable backends, rclone will use
multiple threads to transfer the file (default 256M).
Capable backends are marked in the overview as MultithreadUpload. (They
-need to implement either the OpenWriterAt or OpenChunkedWriter internal
+need to implement either the OpenWriterAt or OpenChunkWriter internal
interfaces). These include include, local, s3, azureblob, b2,
oracleobjectstorage and smb at the time of writing.
@@ -12631,6 +13924,11 @@ files if they are incorrect as it would normally.
This can be used if the remote is being synced with another tool also
(e.g. the Google Drive client).
+--no-update-dir-modtime
+
+When using this flag, rclone won't update modification times of remote
+directories if they are incorrect as it would normally.
+
--order-by string
The --order-by flag controls the order in which files in the backlog are
@@ -13719,14 +15017,14 @@ Use web browser to automatically authenticate? question.
Execute the following on the machine with the web browser (same rclone
version recommended):
- rclone authorize "amazon cloud drive"
+ rclone authorize "dropbox"
Then paste the result below:
result>
Then on your main desktop machine
- rclone authorize "amazon cloud drive"
+ rclone authorize "dropbox"
If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
Log in and authorize rclone for access
Waiting for code...
@@ -14319,7 +15617,7 @@ E.g. for an alternative filter-file.txt:
- *
Files file1.jpg, file3.png and file2.avi are listed whilst secret17.jpg
-and files without the suffix .jpgor.png` are excluded.
+and files without the suffix .jpg or .png are excluded.
E.g. for an alternative filter-file.txt:
@@ -15273,6 +16571,26 @@ See the config password command for more information on the above.
Authentication is required for this call.
+config/paths: Reads the config file path and other important paths.
+
+Returns a JSON object with the following keys:
+
+- config: path to config file
+- cache: path to root of cache directory
+- temp: path to root of temporary directory
+
+Eg
+
+ {
+ "cache": "/home/USER/.cache/rclone",
+ "config": "/home/USER/.rclone.conf",
+ "temp": "/tmp"
+ }
+
+See the config paths command for more information on the above.
+
+Authentication is required for this call.
+
config/providers: Shows how providers are configured in the config file.
Returns a JSON object: - providers - array of objects
@@ -16082,6 +17400,52 @@ instead:
rclone rc --loopback operations/fsinfo fs=remote:
+operations/hashsum: Produces a hashsum file for all the objects in the path.
+
+Produces a hash file for all the objects in the path using the hash
+named. The output is in the same format as the standard md5sum/sha1sum
+tool.
+
+This takes the following parameters:
+
+- fs - a remote name string e.g. "drive:" for the source, "/" for
+ local filesystem
+ - this can point to a file and just that file will be returned in
+ the listing.
+- hashType - type of hash to be used
+- download - check by downloading rather than with hash (boolean)
+- base64 - output the hashes in base64 rather than hex (boolean)
+
+If you supply the download flag, it will download the data from the
+remote and create the hash on the fly. This can be useful for remotes
+that don't support the given hash or if you really want to check all the
+data.
+
+Note that if you wish to supply a checkfile to check hashes against the
+current files then you should use operations/check instead of
+operations/hashsum.
+
+Returns:
+
+- hashsum - array of strings of the hashes
+- hashType - type of hash used
+
+Example:
+
+ $ rclone rc --loopback operations/hashsum fs=bin hashType=MD5 download=true base64=true
+ {
+ "hashType": "md5",
+ "hashsum": [
+ "WTSVLpuiXyJO_kGzJerRLg== backend-versions.sh",
+ "v1b_OlWCJO9LtNq3EIKkNQ== bisect-go-rclone.sh",
+ "VHbmHzHh4taXzgag8BAIKQ== bisect-rclone.sh",
+ ]
+ }
+
+See the hashsum command for more information on the above.
+
+Authentication is required for this call.
+
operations/list: List the given remote and path in JSON format
This takes the following parameters:
@@ -16463,7 +17827,11 @@ This takes the following parameters
- resilient - Allow future runs to retry after certain less-serious
errors, instead of requiring resync. Use at your own risk!
- workdir - server directory for history files (default:
- /home/ncw/.cache/rclone/bisync)
+ ~/.cache/rclone/bisync)
+- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path
+ on the same remote.
+- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path
+ on the same remote.
- noCleanup - retain working files
See bisync command help and full bisync description for more
@@ -16829,7 +18197,6 @@ Here is an overview of the major features of each cloud storage system.
------------------------------- ------------------- --------- ------------------ ----------------- ----------- ----------
1Fichier Whirlpool - No Yes R -
Akamai Netstorage MD5, SHA256 R/W No No R -
- Amazon Drive MD5 - Yes No R -
Amazon S3 (or S3 compatible) MD5 R/W No No R/W RWU
Backblaze B2 SHA1 R/W No No R/W -
Box SHA1 R/W Yes No - -
@@ -16838,7 +18205,7 @@ Here is an overview of the major features of each cloud storage system.
Enterprise File Fabric - R/W Yes No R/W -
FTP - R/W ¹⁰ No No - -
Google Cloud Storage MD5 R/W No No R/W -
- Google Drive MD5, SHA1, SHA256 R/W No Yes R/W -
+ Google Drive MD5, SHA1, SHA256 DR/W No Yes R/W DRWU
Google Photos - - No Yes R -
HDFS - R/W No No - -
HiDrive HiDrive ¹² R/W No No - -
@@ -16852,7 +18219,7 @@ Here is an overview of the major features of each cloud storage system.
Memory MD5 R/W No No - -
Microsoft Azure Blob Storage MD5 R/W No No R/W -
Microsoft Azure Files Storage MD5 R/W Yes No R/W -
- Microsoft OneDrive QuickXorHash ⁵ R/W Yes No R -
+ Microsoft OneDrive QuickXorHash ⁵ DR/W Yes No R DRW
OpenDrive MD5 R/W Yes Partial ⁸ - -
OpenStack Swift MD5 R/W No No R/W -
Oracle Object Storage MD5 R/W No No R/W -
@@ -16864,7 +18231,7 @@ Here is an overview of the major features of each cloud storage system.
QingStor MD5 - ⁹ No No R/W -
Quatrix by Maytech - R/W No No - -
Seafile - - No No - -
- SFTP MD5, SHA1 ² R/W Depends No - -
+ SFTP MD5, SHA1 ² DR/W Depends No - -
Sia - - No No - -
SMB - R/W Yes No - -
SugarSync - - No No - -
@@ -16873,7 +18240,7 @@ Here is an overview of the major features of each cloud storage system.
WebDAV MD5, SHA1 ³ R ⁴ Depends No - -
Yandex Disk MD5 R/W No No R -
Zoho WorkDrive - - No No - -
- The local filesystem All R/W Depends No - RWU
+ The local filesystem All DR/W Depends No - DRWU
¹ Dropbox supports its own custom hash. This is an SHA256 sum of all the
4 MiB block SHA256s.
@@ -16925,13 +18292,31 @@ ModTime
Almost all cloud storage systems store some sort of timestamp on
objects, but several of them not something that is appropriate to use
for syncing. E.g. some backends will only write a timestamp that
-represent the time of the upload. To be relevant for syncing it should
+represents the time of the upload. To be relevant for syncing it should
be able to store the modification time of the source object. If this is
not the case, rclone will only check the file size by default, though
can be configured to check the file hash (with the --checksum flag).
Ideally it should also be possible to change the timestamp of an
existing file without having to re-upload it.
+ -----------------------------------------------------------------------
+ Key Explanation
+ ------------------- ---------------------------------------------------
+ - ModTimes not supported - times likely the upload
+ time
+
+ R ModTimes supported on files but can't be changed
+ without re-upload
+
+ R/W Read and Write ModTimes fully supported on files
+
+ DR ModTimes supported on files and directories but
+ can't be changed without re-upload
+
+ DR/W Read and Write ModTimes fully supported on files
+ and directories
+ -----------------------------------------------------------------------
+
Storage systems with a - in the ModTime column, means the modification
read on objects is not the modification time of the file when uploaded.
It is most likely the time the file was uploaded, or possibly something
@@ -16951,6 +18336,9 @@ time only on a files in a mount will be silently ignored.
Storage systems with R/W (for read/write) in the ModTime column, means
they do also support modtime-only operations.
+Storage systems with D in the ModTime column means that the following
+symbols apply to directories as well as files.
+
Case Insensitive
If a cloud storage systems is case sensitive then it is possible to have
@@ -17298,11 +18686,24 @@ backend) and/or user metadata (general purpose metadata).
The levels of metadata support are
- Key Explanation
- ----- -----------------------------------------------------------------
- R Read only System Metadata
- RW Read and write System Metadata
- RWU Read and write System Metadata and read and write User Metadata
+ -----------------------------------------------------------------------
+ Key Explanation
+ ------------------- ---------------------------------------------------
+ R Read only System Metadata on files only
+
+ RW Read and write System Metadata on files only
+
+ RWU Read and write System Metadata and read and write
+ User Metadata on files only
+
+ DR Read only System Metadata on files and directories
+
+ DRW Read and write System Metadata on files and
+ directories
+
+ DRWU Read and write System Metadata and read and write
+ User Metadata on files and directories
+ -----------------------------------------------------------------------
See the metadata docs for more info.
@@ -17319,8 +18720,6 @@ upon backend-specific capabilities.
Akamai Yes No No No No Yes Yes No No No Yes
Netstorage
- Amazon Drive Yes No Yes Yes No No No No No No Yes
-
Amazon S3 (or No Yes No No Yes Yes Yes Yes Yes No No
S3 compatible)
@@ -17351,6 +18750,8 @@ upon backend-specific capabilities.
HTTP No No No No No No No No No No Yes
+ ImageKit Yes Yes Yes No No No No No No No Yes
+
Internet No Yes No No Yes Yes No No Yes Yes No
Archive
@@ -17415,7 +18816,7 @@ upon backend-specific capabilities.
Zoho WorkDrive Yes Yes Yes Yes No No No No No Yes Yes
- The local Yes No Yes Yes No No Yes Yes No Yes Yes
+ The local No No Yes Yes No No Yes Yes No Yes Yes
filesystem
-------------------------------------------------------------------------------------------------------------------------------------
@@ -17532,7 +18933,7 @@ Flags for anything which can Copy a file.
--ignore-checksum Skip post copy check of checksums
--ignore-existing Skip all files that exist on destination
--ignore-size Ignore size when skipping use modtime or checksum
- -I, --ignore-times Don't skip files that match size and time - transfer all files
+ -I, --ignore-times Don't skip items that match size and time - transfer all unconditionally
--immutable Do not modify files, fail if existing files have been modified
--inplace Download directly to destination file instead of atomic download to temp/rename
--max-backlog int Maximum number of objects in sync or check backlog (default 10000)
@@ -17546,6 +18947,7 @@ Flags for anything which can Copy a file.
--multi-thread-write-buffer-size SizeSuffix In memory buffer size for writing when in multi-thread mode (default 128Ki)
--no-check-dest Don't check the destination, copy regardless
--no-traverse Don't traverse destination file system on copy
+ --no-update-dir-modtime Don't update directory modification times
--no-update-modtime Don't update destination modtime if files identical
--order-by string Instructions on how to order the transfers, e.g. 'size,descending'
--partial-suffix string Add partial-suffix to temporary file name when --inplace is not used (default ".partial")
@@ -17563,6 +18965,7 @@ Flags just used for rclone sync.
--delete-after When synchronizing, delete files on destination after transferring (default)
--delete-before When synchronizing, delete files on destination before transferring
--delete-during When synchronizing, delete files during transfer
+ --fix-case Force rename of case insensitive dest to match source
--ignore-errors Delete even if there are I/O errors
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
@@ -17609,7 +19012,7 @@ General networking and HTTP stuff.
--tpslimit float Limit HTTP transactions per second to this
--tpslimit-burst int Max burst of transactions for --tpslimit (default 1)
--use-cookies Enable session cookiejar
- --user-agent string Set the user-agent to a specified string (default "rclone/v1.65.0")
+ --user-agent string Set the user-agent to a specified string (default "rclone/v1.66.0")
Performance
@@ -17766,14 +19169,7 @@ Backend
Backend only flags. These can be set in the config file also.
- --acd-auth-url string Auth server URL
- --acd-client-id string OAuth Client Id
- --acd-client-secret string OAuth Client Secret
- --acd-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
- --acd-templink-threshold SizeSuffix Files >= this size will be downloaded via their tempLink (default 9Gi)
- --acd-token string OAuth Access Token as a JSON blob
- --acd-token-url string Token server url
- --acd-upload-wait-per-gb Duration Additional time per GiB to wait after a failed complete upload to see if it appears (default 3m0s)
+ --alias-description string Description of the remote
--alias-remote string Remote or path to alias
--azureblob-access-tier string Access tier of blob: hot, cool, cold or archive
--azureblob-account string Azure Storage Account Name
@@ -17784,6 +19180,8 @@ Backend only flags. These can be set in the config file also.
--azureblob-client-id string The ID of the client in use
--azureblob-client-secret string One of the service principal's client secrets
--azureblob-client-send-certificate-chain Send the certificate chain when using certificate auth
+ --azureblob-delete-snapshots string Set to specify how to deal with snapshots on blob deletion
+ --azureblob-description string Description of the remote
--azureblob-directory-markers Upload an empty object with a trailing slash when a new directory is created
--azureblob-disable-checksum Don't store MD5 checksum with object metadata
--azureblob-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8)
@@ -17814,6 +19212,7 @@ Backend only flags. These can be set in the config file also.
--azurefiles-client-secret string One of the service principal's client secrets
--azurefiles-client-send-certificate-chain Send the certificate chain when using certificate auth
--azurefiles-connection-string string Azure Files Connection String
+ --azurefiles-description string Description of the remote
--azurefiles-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot)
--azurefiles-endpoint string Endpoint for the service
--azurefiles-env-auth Read credentials from runtime (environment variables, CLI or MSI)
@@ -17833,8 +19232,9 @@ Backend only flags. These can be set in the config file also.
--b2-account string Account ID or Application Key ID
--b2-chunk-size SizeSuffix Upload chunk size (default 96Mi)
--b2-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4Gi)
+ --b2-description string Description of the remote
--b2-disable-checksum Disable checksums for large (> upload cutoff) files
- --b2-download-auth-duration Duration Time before the authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
+ --b2-download-auth-duration Duration Time before the public link authorization token will expire in s or suffix ms|s|m|h|d (default 1w)
--b2-download-url string Custom endpoint for downloads
--b2-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--b2-endpoint string Endpoint for the service
@@ -17853,6 +19253,7 @@ Backend only flags. These can be set in the config file also.
--box-client-id string OAuth Client Id
--box-client-secret string OAuth Client Secret
--box-commit-retries int Max number of times to try committing a multipart file (default 100)
+ --box-description string Description of the remote
--box-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot)
--box-impersonate string Impersonate this user ID when using a service account
--box-list-chunk int Size of listing chunk 1-1000 (default 1000)
@@ -17869,6 +19270,7 @@ Backend only flags. These can be set in the config file also.
--cache-db-path string Directory to store file structure metadata DB (default "$HOME/.cache/rclone/cache-backend")
--cache-db-purge Clear all the cached data for this remote on start
--cache-db-wait-time Duration How long to wait for the DB to be available - 0 is unlimited (default 1s)
+ --cache-description string Description of the remote
--cache-info-age Duration How long to cache file structure information (directory listings, file size, times, etc.) (default 6h0m0s)
--cache-plex-insecure string Skip all certificate verification when connecting to the Plex server
--cache-plex-password string The password of the Plex user (obscured)
@@ -17882,15 +19284,19 @@ Backend only flags. These can be set in the config file also.
--cache-workers int How many workers should run in parallel to download chunks (default 4)
--cache-writes Cache file data on writes through the FS
--chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi)
+ --chunker-description string Description of the remote
--chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks
--chunker-hash-type string Choose how chunker handles hash sums (default "md5")
--chunker-remote string Remote to chunk/unchunk
+ --combine-description string Description of the remote
--combine-upstreams SpaceSepList Upstreams for combining
+ --compress-description string Description of the remote
--compress-level int GZIP compression level (-2 to 9) (default -1)
--compress-mode string Compression mode (default "gzip")
--compress-ram-cache-limit SizeSuffix Some remotes don't allow the upload of files with unknown size (default 20Mi)
--compress-remote string Remote to compress
-L, --copy-links Follow symlinks and copy the pointed to item
+ --crypt-description string Description of the remote
--crypt-directory-name-encryption Option to either encrypt directory names or leave them intact (default true)
--crypt-filename-encoding string How to encode the encrypted filename to text string (default "base32")
--crypt-filename-encryption string How to encrypt the filenames (default "standard")
@@ -17901,6 +19307,7 @@ Backend only flags. These can be set in the config file also.
--crypt-remote string Remote to encrypt/decrypt
--crypt-server-side-across-configs Deprecated: use --server-side-across-configs instead
--crypt-show-mapping For all files listed show how the names encrypt
+ --crypt-strict-names If set, this will raise an error when crypt comes across a filename that can't be decrypted
--crypt-suffix string If this is set it will override the default suffix of ".bin" (default ".bin")
--drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded
--drive-allow-import-name-change Allow the filetype to change when uploading Google docs
@@ -17910,6 +19317,7 @@ Backend only flags. These can be set in the config file also.
--drive-client-id string Google Application Client Id
--drive-client-secret string OAuth Client Secret
--drive-copy-shortcut-content Server side copy contents of shortcuts instead of the shortcut
+ --drive-description string Description of the remote
--drive-disable-http2 Disable drive using http2 (default true)
--drive-encoding Encoding The encoding for the backend (default InvalidUtf8)
--drive-env-auth Get IAM credentials from runtime (environment variables or instance meta data if no env vars)
@@ -17958,6 +19366,7 @@ Backend only flags. These can be set in the config file also.
--dropbox-chunk-size SizeSuffix Upload chunk size (< 150Mi) (default 48Mi)
--dropbox-client-id string OAuth Client Id
--dropbox-client-secret string OAuth Client Secret
+ --dropbox-description string Description of the remote
--dropbox-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,RightSpace,InvalidUtf8,Dot)
--dropbox-impersonate string Impersonate this user when using a business account
--dropbox-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
@@ -17967,10 +19376,12 @@ Backend only flags. These can be set in the config file also.
--dropbox-token-url string Token server url
--fichier-api-key string Your API Key, get it from https://1fichier.com/console/params.pl
--fichier-cdn Set if you wish to use CDN download links
+ --fichier-description string Description of the remote
--fichier-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot)
--fichier-file-password string If you want to download a shared file that is password protected, add this parameter (obscured)
--fichier-folder-password string If you want to list the files in a shared folder that is password protected, add this parameter (obscured)
--fichier-shared-folder string If you want to download a shared folder, add this parameter
+ --filefabric-description string Description of the remote
--filefabric-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--filefabric-permanent-token string Permanent Authentication Token
--filefabric-root-folder-id string ID of the root folder
@@ -17981,6 +19392,7 @@ Backend only flags. These can be set in the config file also.
--ftp-ask-password Allow asking for FTP password when needed
--ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s)
--ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited
+ --ftp-description string Description of the remote
--ftp-disable-epsv Disable using EPSV even if server advertises support
--ftp-disable-mlsd Disable using MLSD even if server advertises support
--ftp-disable-tls13 Disable TLS 1.3 (workaround for FTP servers with buggy TLS)
@@ -18006,6 +19418,7 @@ Backend only flags. These can be set in the config file also.
--gcs-client-id string OAuth Client Id
--gcs-client-secret string OAuth Client Secret
--gcs-decompress If set this will decompress gzip encoded objects
+ --gcs-description string Description of the remote
--gcs-directory-markers Upload an empty object with a trailing slash when a new directory is created
--gcs-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gcs-endpoint string Endpoint for the service
@@ -18026,6 +19439,7 @@ Backend only flags. These can be set in the config file also.
--gphotos-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
--gphotos-client-id string OAuth Client Id
--gphotos-client-secret string OAuth Client Secret
+ --gphotos-description string Description of the remote
--gphotos-encoding Encoding The encoding for the backend (default Slash,CrLf,InvalidUtf8,Dot)
--gphotos-include-archived Also view and download archived media
--gphotos-read-only Set to make the Google Photos backend read only
@@ -18034,10 +19448,12 @@ Backend only flags. These can be set in the config file also.
--gphotos-token string OAuth Access Token as a JSON blob
--gphotos-token-url string Token server url
--hasher-auto-size SizeSuffix Auto-update checksum for files smaller than this size (disabled by default)
+ --hasher-description string Description of the remote
--hasher-hashes CommaSepList Comma separated list of supported checksum types (default md5,sha1)
--hasher-max-age Duration Maximum time to keep checksums in cache (0 = no cache, off = cache forever) (default off)
--hasher-remote string Remote to cache checksums for (e.g. myRemote:path)
--hdfs-data-transfer-protection string Kerberos data transfer protection: authentication|integrity|privacy
+ --hdfs-description string Description of the remote
--hdfs-encoding Encoding The encoding for the backend (default Slash,Colon,Del,Ctl,InvalidUtf8,Dot)
--hdfs-namenode CommaSepList Hadoop name nodes and ports
--hdfs-service-principal-name string Kerberos service principal name for the namenode
@@ -18046,6 +19462,7 @@ Backend only flags. These can be set in the config file also.
--hidrive-chunk-size SizeSuffix Chunksize for chunked uploads (default 48Mi)
--hidrive-client-id string OAuth Client Id
--hidrive-client-secret string OAuth Client Secret
+ --hidrive-description string Description of the remote
--hidrive-disable-fetching-member-count Do not fetch number of objects in directories unless it is absolutely necessary
--hidrive-encoding Encoding The encoding for the backend (default Slash,Dot)
--hidrive-endpoint string Endpoint for the service (default "https://api.hidrive.strato.com/2.1")
@@ -18056,10 +19473,12 @@ Backend only flags. These can be set in the config file also.
--hidrive-token-url string Token server url
--hidrive-upload-concurrency int Concurrency for chunked uploads (default 4)
--hidrive-upload-cutoff SizeSuffix Cutoff/Threshold for chunked uploads (default 96Mi)
+ --http-description string Description of the remote
--http-headers CommaSepList Set HTTP headers for all transactions
--http-no-head Don't use HEAD requests
--http-no-slash Set this if the site doesn't end directories with /
--http-url string URL of HTTP host to connect to
+ --imagekit-description string Description of the remote
--imagekit-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket)
--imagekit-endpoint string You can find your ImageKit.io URL endpoint in your [dashboard](https://imagekit.io/dashboard/developer/api-keys)
--imagekit-only-signed Restrict unsigned image URLs If you have configured Restrict unsigned image URLs in your dashboard settings, set this to true
@@ -18068,6 +19487,7 @@ Backend only flags. These can be set in the config file also.
--imagekit-upload-tags string Tags to add to the uploaded files, e.g. "tag1,tag2"
--imagekit-versions Include old versions in directory listings
--internetarchive-access-key-id string IAS3 Access Key
+ --internetarchive-description string Description of the remote
--internetarchive-disable-checksum Don't ask the server to test against MD5 checksum calculated by rclone (default true)
--internetarchive-encoding Encoding The encoding for the backend (default Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot)
--internetarchive-endpoint string IAS3 Endpoint (default "https://s3.us.archive.org")
@@ -18077,6 +19497,7 @@ Backend only flags. These can be set in the config file also.
--jottacloud-auth-url string Auth server URL
--jottacloud-client-id string OAuth Client Id
--jottacloud-client-secret string OAuth Client Secret
+ --jottacloud-description string Description of the remote
--jottacloud-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot)
--jottacloud-hard-delete Delete files permanently rather than putting them into the trash
--jottacloud-md5-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate the MD5 if required (default 10Mi)
@@ -18085,6 +19506,7 @@ Backend only flags. These can be set in the config file also.
--jottacloud-token-url string Token server url
--jottacloud-trashed-only Only show files that are in the trash
--jottacloud-upload-resume-limit SizeSuffix Files bigger than this can be resumed if the upload fail's (default 10Mi)
+ --koofr-description string Description of the remote
--koofr-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--koofr-endpoint string The Koofr API endpoint to use
--koofr-mountid string Mount ID of the mount to use
@@ -18092,10 +19514,12 @@ Backend only flags. These can be set in the config file also.
--koofr-provider string Choose your storage provider
--koofr-setmtime Does the backend support setting modification time (default true)
--koofr-user string Your user name
+ --linkbox-description string Description of the remote
--linkbox-token string Token from https://www.linkbox.to/admin/account
-l, --links Translate symlinks to/from regular files with a '.rclonelink' extension
--local-case-insensitive Force the filesystem to report itself as case insensitive
--local-case-sensitive Force the filesystem to report itself as case sensitive
+ --local-description string Description of the remote
--local-encoding Encoding The encoding for the backend (default Slash,Dot)
--local-no-check-updated Don't check to see if the files change during upload
--local-no-preallocate Disable preallocation of disk space for transferred files
@@ -18108,6 +19532,7 @@ Backend only flags. These can be set in the config file also.
--mailru-check-hash What should copy do if file checksum is mismatched or invalid (default true)
--mailru-client-id string OAuth Client Id
--mailru-client-secret string OAuth Client Secret
+ --mailru-description string Description of the remote
--mailru-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--mailru-pass string Password (obscured)
--mailru-speedup-enable Skip full upload if there is another file with same data hash (default true)
@@ -18118,12 +19543,15 @@ Backend only flags. These can be set in the config file also.
--mailru-token-url string Token server url
--mailru-user string User name (usually email)
--mega-debug Output more debug from Mega
+ --mega-description string Description of the remote
--mega-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--mega-hard-delete Delete files permanently rather than putting them into the trash
--mega-pass string Password (obscured)
--mega-use-https Use HTTPS for transfers
--mega-user string User name
+ --memory-description string Description of the remote
--netstorage-account string Set the NetStorage account name
+ --netstorage-description string Description of the remote
--netstorage-host string Domain+path of NetStorage host to connect to
--netstorage-protocol string Select between HTTP or HTTPS protocol (default "https")
--netstorage-secret string Set the NetStorage account secret/G2O key for authentication (obscured)
@@ -18135,6 +19563,7 @@ Backend only flags. These can be set in the config file also.
--onedrive-client-id string OAuth Client Id
--onedrive-client-secret string OAuth Client Secret
--onedrive-delta If set rclone will use delta listing to implement recursive listings
+ --onedrive-description string Description of the remote
--onedrive-drive-id string The ID of the drive to use
--onedrive-drive-type string The type of the drive (personal | business | documentLibrary)
--onedrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot)
@@ -18144,6 +19573,7 @@ Backend only flags. These can be set in the config file also.
--onedrive-link-scope string Set the scope of the links created by the link command (default "anonymous")
--onedrive-link-type string Set the type of the links created by the link command (default "view")
--onedrive-list-chunk int Size of listing chunk (default 1000)
+ --onedrive-metadata-permissions Bits Control whether permissions should be read or written in metadata (default off)
--onedrive-no-versions Remove all versions on modifying operations
--onedrive-region string Choose national cloud region for OneDrive (default "global")
--onedrive-root-folder-id string ID of the root folder
@@ -18157,6 +19587,7 @@ Backend only flags. These can be set in the config file also.
--oos-config-profile string Profile name inside the oci config file (default "Default")
--oos-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--oos-copy-timeout Duration Timeout for copy (default 1m0s)
+ --oos-description string Description of the remote
--oos-disable-checksum Don't store MD5 checksum with object metadata
--oos-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8,Dot)
--oos-endpoint string Endpoint for Object storage API
@@ -18175,12 +19606,14 @@ Backend only flags. These can be set in the config file also.
--oos-upload-concurrency int Concurrency for multipart uploads (default 10)
--oos-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--opendrive-chunk-size SizeSuffix Files will be uploaded in chunks this size (default 10Mi)
+ --opendrive-description string Description of the remote
--opendrive-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot)
--opendrive-password string Password (obscured)
--opendrive-username string Username
--pcloud-auth-url string Auth server URL
--pcloud-client-id string OAuth Client Id
--pcloud-client-secret string OAuth Client Secret
+ --pcloud-description string Description of the remote
--pcloud-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--pcloud-hostname string Hostname to connect to (default "api.pcloud.com")
--pcloud-password string Your pcloud password (obscured)
@@ -18191,6 +19624,7 @@ Backend only flags. These can be set in the config file also.
--pikpak-auth-url string Auth server URL
--pikpak-client-id string OAuth Client Id
--pikpak-client-secret string OAuth Client Secret
+ --pikpak-description string Description of the remote
--pikpak-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot)
--pikpak-hash-memory-limit SizeSuffix Files bigger than this will be cached on disk to calculate hash if required (default 10Mi)
--pikpak-pass string Pikpak password (obscured)
@@ -18203,11 +19637,13 @@ Backend only flags. These can be set in the config file also.
--premiumizeme-auth-url string Auth server URL
--premiumizeme-client-id string OAuth Client Id
--premiumizeme-client-secret string OAuth Client Secret
+ --premiumizeme-description string Description of the remote
--premiumizeme-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--premiumizeme-token string OAuth Access Token as a JSON blob
--premiumizeme-token-url string Token server url
--protondrive-2fa string The 2FA code
--protondrive-app-version string The app version string (default "macos-drive@1.0.0-alpha.1+rclone")
+ --protondrive-description string Description of the remote
--protondrive-enable-caching Caches the files and folders metadata to reduce API calls (default true)
--protondrive-encoding Encoding The encoding for the backend (default Slash,LeftSpace,RightSpace,InvalidUtf8,Dot)
--protondrive-mailbox-password string The mailbox password of your two-password proton account (obscured)
@@ -18218,12 +19654,14 @@ Backend only flags. These can be set in the config file also.
--putio-auth-url string Auth server URL
--putio-client-id string OAuth Client Id
--putio-client-secret string OAuth Client Secret
+ --putio-description string Description of the remote
--putio-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--putio-token string OAuth Access Token as a JSON blob
--putio-token-url string Token server url
--qingstor-access-key-id string QingStor Access Key ID
--qingstor-chunk-size SizeSuffix Chunk size to use for uploading (default 4Mi)
--qingstor-connection-retries int Number of connection retries (default 3)
+ --qingstor-description string Description of the remote
--qingstor-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8)
--qingstor-endpoint string Enter an endpoint URL to connection QingStor API
--qingstor-env-auth Get QingStor credentials from runtime
@@ -18232,18 +19670,21 @@ Backend only flags. These can be set in the config file also.
--qingstor-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--qingstor-zone string Zone to connect to
--quatrix-api-key string API key for accessing Quatrix account
+ --quatrix-description string Description of the remote
--quatrix-effective-upload-time string Wanted upload time for one chunk (default "4s")
--quatrix-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot)
--quatrix-hard-delete Delete files permanently rather than putting them into the trash
--quatrix-host string Host name of Quatrix account
--quatrix-maximal-summary-chunk-size SizeSuffix The maximal summary for all chunks. It should not be less than 'transfers'*'minimal_chunk_size' (default 95.367Mi)
--quatrix-minimal-chunk-size SizeSuffix The minimal size for one chunk (default 9.537Mi)
+ --quatrix-skip-project-folders Skip project folders in operations
--s3-access-key-id string AWS Access Key ID
--s3-acl string Canned ACL used when creating buckets and storing or copying objects
--s3-bucket-acl string Canned ACL used when creating buckets
--s3-chunk-size SizeSuffix Chunk size to use for uploading (default 5Mi)
--s3-copy-cutoff SizeSuffix Cutoff for switching to multipart copy (default 4.656Gi)
--s3-decompress If set this will decompress gzip encoded objects
+ --s3-description string Description of the remote
--s3-directory-markers Upload an empty object with a trailing slash when a new directory is created
--s3-disable-checksum Don't store MD5 checksum with object metadata
--s3-disable-http2 Disable usage of http2 for S3 backends
@@ -18278,19 +19719,22 @@ Backend only flags. These can be set in the config file also.
--s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key
--s3-storage-class string The storage class to use when storing new objects in S3
--s3-sts-endpoint string Endpoint for STS
- --s3-upload-concurrency int Concurrency for multipart uploads (default 4)
+ --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4)
--s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi)
--s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint
--s3-use-accept-encoding-gzip Accept-Encoding: gzip Whether to send Accept-Encoding: gzip header (default unset)
--s3-use-already-exists Tristate Set if rclone should report BucketAlreadyExists errors on bucket creation (default unset)
+ --s3-use-dual-stack If true use AWS S3 dual-stack endpoint (IPv6 support)
--s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset)
--s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset)
--s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads
--s3-v2-auth If true use v2 authentication
--s3-version-at Time Show file versions as they were at the specified time (default off)
+ --s3-version-deleted Show deleted file markers when using versions
--s3-versions Include old versions in directory listings
--seafile-2fa Two-factor authentication ('true' if the account has 2FA enabled)
--seafile-create-library Should rclone create a library if it doesn't exist
+ --seafile-description string Description of the remote
--seafile-encoding Encoding The encoding for the backend (default Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8)
--seafile-library string Name of the library
--seafile-library-key string Library password (for encrypted libraries only) (obscured)
@@ -18302,6 +19746,7 @@ Backend only flags. These can be set in the config file also.
--sftp-ciphers SpaceSepList Space separated list of ciphers to be used for session encryption, ordered by preference
--sftp-concurrency int The maximum number of outstanding requests for one file (default 64)
--sftp-copy-is-hardlink Set to enable server side copies using hardlinks
+ --sftp-description string Description of the remote
--sftp-disable-concurrent-reads If set don't use concurrent reads
--sftp-disable-concurrent-writes If set don't use concurrent writes
--sftp-disable-hashcheck Disable the execution of SSH commands to determine if remote file hashing is available
@@ -18336,6 +19781,7 @@ Backend only flags. These can be set in the config file also.
--sharefile-chunk-size SizeSuffix Upload chunk size (default 64Mi)
--sharefile-client-id string OAuth Client Id
--sharefile-client-secret string OAuth Client Secret
+ --sharefile-description string Description of the remote
--sharefile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot)
--sharefile-endpoint string Endpoint for API calls
--sharefile-root-folder-id string ID of the root folder
@@ -18344,10 +19790,12 @@ Backend only flags. These can be set in the config file also.
--sharefile-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (default 128Mi)
--sia-api-password string Sia Daemon API Password (obscured)
--sia-api-url string Sia daemon API URL, like http://sia.daemon.host:9980 (default "http://127.0.0.1:9980")
+ --sia-description string Description of the remote
--sia-encoding Encoding The encoding for the backend (default Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot)
--sia-user-agent string Siad User Agent (default "Sia-Agent")
--skip-links Don't warn about skipped symlinks
--smb-case-insensitive Whether the server is configured to be case-insensitive (default true)
+ --smb-description string Description of the remote
--smb-domain string Domain name for NTLM authentication (default "WORKGROUP")
--smb-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot)
--smb-hide-special-share Hide special shares (e.g. print$) which users aren't supposed to access (default true)
@@ -18359,6 +19807,7 @@ Backend only flags. These can be set in the config file also.
--smb-user string SMB username (default "$USER")
--storj-access-grant string Access grant
--storj-api-key string API key
+ --storj-description string Description of the remote
--storj-passphrase string Encryption passphrase
--storj-provider string Choose an authentication method (default "existing")
--storj-satellite-address string Satellite address (default "us1.storj.io")
@@ -18367,6 +19816,7 @@ Backend only flags. These can be set in the config file also.
--sugarsync-authorization string Sugarsync authorization
--sugarsync-authorization-expiry string Sugarsync authorization expiry
--sugarsync-deleted-id string Sugarsync deleted folder id
+ --sugarsync-description string Description of the remote
--sugarsync-encoding Encoding The encoding for the backend (default Slash,Ctl,InvalidUtf8,Dot)
--sugarsync-hard-delete Permanently delete files if true
--sugarsync-private-access-key string Sugarsync Private Access Key
@@ -18380,6 +19830,7 @@ Backend only flags. These can be set in the config file also.
--swift-auth-token string Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
--swift-auth-version int AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
--swift-chunk-size SizeSuffix Above this size files will be chunked into a _segments container (default 5Gi)
+ --swift-description string Description of the remote
--swift-domain string User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
--swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8)
--swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public")
@@ -18399,17 +19850,21 @@ Backend only flags. These can be set in the config file also.
--union-action-policy string Policy to choose upstream on ACTION category (default "epall")
--union-cache-time int Cache time of usage and free space (in seconds) (default 120)
--union-create-policy string Policy to choose upstream on CREATE category (default "epmfs")
+ --union-description string Description of the remote
--union-min-free-space SizeSuffix Minimum viable free space for lfs/eplfs policies (default 1Gi)
--union-search-policy string Policy to choose upstream on SEARCH category (default "ff")
--union-upstreams string List of space separated upstreams
--uptobox-access-token string Your access token
+ --uptobox-description string Description of the remote
--uptobox-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot)
--uptobox-private Set to make uploaded files private
--webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon)
--webdav-bearer-token-command string Command to run to get a bearer token
+ --webdav-description string Description of the remote
--webdav-encoding string The encoding for the backend
--webdav-headers CommaSepList Set HTTP headers for all transactions
--webdav-nextcloud-chunk-size SizeSuffix Nextcloud upload chunk size (default 10Mi)
+ --webdav-owncloud-exclude-shares Exclude ownCloud shares
--webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms)
--webdav-pass string Password (obscured)
--webdav-url string URL of http host to connect to
@@ -18418,6 +19873,7 @@ Backend only flags. These can be set in the config file also.
--yandex-auth-url string Auth server URL
--yandex-client-id string OAuth Client Id
--yandex-client-secret string OAuth Client Secret
+ --yandex-description string Description of the remote
--yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot)
--yandex-hard-delete Delete files permanently rather than putting them into the trash
--yandex-token string OAuth Access Token as a JSON blob
@@ -18425,6 +19881,7 @@ Backend only flags. These can be set in the config file also.
--zoho-auth-url string Auth server URL
--zoho-client-id string OAuth Client Id
--zoho-client-secret string OAuth Client Secret
+ --zoho-description string Description of the remote
--zoho-encoding Encoding The encoding for the backend (default Del,Ctl,InvalidUtf8)
--zoho-region string Zoho region to connect to
--zoho-token string OAuth Access Token as a JSON blob
@@ -18934,20 +20391,36 @@ and verify that settings did update:
If docker refuses to remove the volume, you should find containers or
swarm services that use it and stop them first.
+Bisync
+
+bisync is in beta and is considered an advanced command, so use with
+care. Make sure you have read and understood the entire manual
+(especially the Limitations section) before using, or data loss can
+result. Questions can be asked in the Rclone Forum.
+
Getting started
- Install rclone and setup your remotes.
- Bisync will create its working directory at ~/.cache/rclone/bisync
- on Linux or C:\Users\MyLogin\AppData\Local\rclone\bisync on Windows.
- Make sure that this location is writable.
+ on Linux, /Users/yourusername/Library/Caches/rclone/bisync on Mac,
+ or C:\Users\MyLogin\AppData\Local\rclone\bisync on Windows. Make
+ sure that this location is writable.
- Run bisync with the --resync flag, specifying the paths to the local
and remote sync directory roots.
-- For successive sync runs, leave off the --resync flag.
+- For successive sync runs, leave off the --resync flag. (Important!)
- Consider using a filters file for excluding unnecessary files and
directories from the sync.
- Consider setting up the --check-access feature for safety.
-- On Linux, consider setting up a crontab entry. bisync can safely run
- in concurrent cron jobs thanks to lock files it maintains.
+- On Linux or Mac, consider setting up a crontab entry. bisync can
+ safely run in concurrent cron jobs thanks to lock files it
+ maintains.
+
+For example, your first command might look like this:
+
+ rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
+
+If all looks good, run it again without --dry-run. After that, remove
+--resync as well.
Here is a typical run log (with timestamps removed for clarity):
@@ -19004,36 +20477,36 @@ Command line syntax
Type 'rclone listremotes' for list of configured remotes.
Optional Flags:
- --check-access Ensure expected `RCLONE_TEST` files are found on
- both Path1 and Path2 filesystems, else abort.
- --check-filename FILENAME Filename for `--check-access` (default: `RCLONE_TEST`)
- --check-sync CHOICE Controls comparison of final listings:
- `true | false | only` (default: true)
- If set to `only`, bisync will only compare listings
- from the last run but skip actual sync.
- --filters-file PATH Read filtering patterns from a file
- --max-delete PERCENT Safety check on maximum percentage of deleted files allowed.
- If exceeded, the bisync run will abort. (default: 50%)
- --force Bypass `--max-delete` safety check and run the sync.
- Consider using with `--verbose`
- --create-empty-src-dirs Sync creation and deletion of empty directories.
- (Not compatible with --remove-empty-dirs)
- --remove-empty-dirs Remove empty directories at the final cleanup step.
- -1, --resync Performs the resync run.
- Warning: Path1 files may overwrite Path2 versions.
- Consider using `--verbose` or `--dry-run` first.
- --ignore-listing-checksum Do not use checksums for listings
- (add --ignore-checksum to additionally skip post-copy checksum checks)
- --resilient Allow future runs to retry after certain less-serious errors,
- instead of requiring --resync. Use at your own risk!
- --localtime Use local time in listings (default: UTC)
- --no-cleanup Retain working files (useful for troubleshooting and testing).
- --workdir PATH Use custom working directory (useful for testing).
- (default: `~/.cache/rclone/bisync`)
- -n, --dry-run Go through the motions - No files are copied/deleted.
- -v, --verbose Increases logging verbosity.
- May be specified more than once for more details.
- -h, --help help for bisync
+ --backup-dir1 string --backup-dir for Path1. Must be a non-overlapping path on the same remote.
+ --backup-dir2 string --backup-dir for Path2. Must be a non-overlapping path on the same remote.
+ --check-access Ensure expected RCLONE_TEST files are found on both Path1 and Path2 filesystems, else abort.
+ --check-filename string Filename for --check-access (default: RCLONE_TEST)
+ --check-sync string Controls comparison of final listings: true|false|only (default: true) (default "true")
+ --compare string Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')
+ --conflict-loser ConflictLoserAction Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): , num, pathname, delete (default: num)
+ --conflict-resolve string Automatically resolve conflicts by preferring the version that is: none, path1, path2, newer, older, larger, smaller (default: none) (default "none")
+ --conflict-suffix string Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')
+ --create-empty-src-dirs Sync creation and deletion of empty directories. (Not compatible with --remove-empty-dirs)
+ --download-hash Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)
+ --filters-file string Read filtering patterns from a file
+ --force Bypass --max-delete safety check and run the sync. Consider using with --verbose
+ -h, --help help for bisync
+ --ignore-listing-checksum Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)
+ --max-lock Duration Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m) (default 0s)
+ --no-cleanup Retain working files (useful for troubleshooting and testing).
+ --no-slow-hash Ignore listing checksums only on backends where they are slow
+ --recover Automatically recover from interruptions without requiring --resync.
+ --remove-empty-dirs Remove ALL empty directories at the final cleanup step.
+ --resilient Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!
+ -1, --resync Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.
+ --resync-mode string During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.) (default "none")
+ --retries int Retry operations this many times if they fail (requires --resilient). (default 3)
+ --retries-sleep Duration Interval between retrying operations if they fail, e.g. 500ms, 60s, 5m (0 to disable) (default 0s)
+ --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls.
+ --workdir string Use custom working dir - useful for testing. (default: {WORKDIR})
+ --max-delete PERCENT Safety check on maximum percentage of deleted files allowed. If exceeded, the bisync run will abort. (default: 50%)
+ -n, --dry-run Go through the motions - No files are copied/deleted.
+ -v, --verbose Increases logging verbosity. May be specified more than once for more details.
Arbitrary rclone flags may be specified on the bisync command line, for
example
@@ -19069,28 +20542,25 @@ Command-line flags
--resync
This will effectively make both Path1 and Path2 filesystems contain a
-matching superset of all files. Path2 files that do not exist in Path1
-will be copied to Path1, and the process will then copy the Path1 tree
-to Path2.
+matching superset of all files. By default, Path2 files that do not
+exist in Path1 will be copied to Path1, and the process will then copy
+the Path1 tree to Path2.
-The --resync sequence is roughly equivalent to:
+The --resync sequence is roughly equivalent to the following (but see
+--resync-mode for other options):
- rclone copy Path2 Path1 --ignore-existing
- rclone copy Path1 Path2
-
-Or, if using --create-empty-src-dirs:
-
- rclone copy Path2 Path1 --ignore-existing
- rclone copy Path1 Path2 --create-empty-src-dirs
- rclone copy Path2 Path1 --create-empty-src-dirs
+ rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
+ rclone copy Path1 Path2 [--create-empty-src-dirs]
The base directories on both Path1 and Path2 filesystems must exist or
bisync will fail. This is required for safety - that bisync can verify
that both paths are valid.
When using --resync, a newer version of a file on the Path2 filesystem
-will be overwritten by the Path1 filesystem version. (Note that this is
-NOT entirely symmetrical.) Carefully evaluate deltas using --dry-run.
+will (by default) be overwritten by the Path1 filesystem version. (Note
+that this is NOT entirely symmetrical, and more symmetrical options can
+be specified with the --resync-mode flag.) Carefully evaluate deltas
+using --dry-run.
For a resync run, one of the paths may be empty (no files in the path
tree). The resync run should result in files on both paths, else a
@@ -19102,6 +20572,100 @@ Empty current PathN listing. Cannot sync to an empty directory: X.pathN.lst
This is a safety check that an unexpected empty path does not result in
deleting everything in the other path.
+Note that --resync implies --resync-mode path1 unless a different
+--resync-mode is explicitly specified. It is not necessary to use both
+the --resync and --resync-mode flags -- either one is sufficient without
+the other.
+
+Note: --resync (including --resync-mode) should only be used under three
+specific (rare) circumstances: 1. It is your first bisync run (between
+these two paths) 2. You've just made changes to your bisync settings
+(such as editing the contents of your --filters-file) 3. There was an
+error on the prior run, and as a result, bisync now requires --resync to
+recover
+
+The rest of the time, you should omit --resync. The reason is because
+--resync will only copy (not sync) each side to the other. Therefore, if
+you included --resync for every bisync run, it would never be possible
+to delete a file -- the deleted file would always keep reappearing at
+the end of every run (because it's being copied from the other side
+where it still exists). Similarly, renaming a file would always result
+in a duplicate copy (both old and new name) on both sides.
+
+If you find that frequent interruptions from #3 are an issue, rather
+than automatically running --resync, the recommended alternative is to
+use the --resilient, --recover, and --conflict-resolve flags, (along
+with Graceful Shutdown mode, when needed) for a very robust
+"set-it-and-forget-it" bisync setup that can automatically bounce back
+from almost any interruption it might encounter. Consider adding
+something like the following:
+
+ --resilient --recover --max-lock 2m --conflict-resolve newer
+
+--resync-mode CHOICE
+
+In the event that a file differs on both sides during a --resync,
+--resync-mode controls which version will overwrite the other. The
+supported options are similar to --conflict-resolve. For all of the
+following options, the version that is kept is referred to as the
+"winner", and the version that is overwritten (deleted) is referred to
+as the "loser". The options are named after the "winner":
+
+- path1 - (the default) - the version from Path1 is unconditionally
+ considered the winner (regardless of modtime and size, if any). This
+ can be useful if one side is more trusted or up-to-date than the
+ other, at the time of the --resync.
+- path2 - same as path1, except the path2 version is considered the
+ winner.
+- newer - the newer file (by modtime) is considered the winner,
+ regardless of which side it came from. This may result in having a
+ mix of some winners from Path1, and some winners from Path2. (The
+ implementation is analogous to running rclone copy --update in both
+ directions.)
+- older - same as newer, except the older file is considered the
+ winner, and the newer file is considered the loser.
+- larger - the larger file (by size) is considered the winner
+ (regardless of modtime, if any). This can be a useful option for
+ remotes without modtime support, or with the kinds of files (such as
+ logs) that tend to grow but not shrink, over time.
+- smaller - the smaller file (by size) is considered the winner
+ (regardless of modtime, if any).
+
+For all of the above options, note the following: - If either of the
+underlying remotes lacks support for the chosen method, it will be
+ignored and will fall back to the default of path1. (For example, if
+--resync-mode newer is set, but one of the paths uses a remote that
+doesn't support modtime.) - If a winner can't be determined because the
+chosen method's attribute is missing or equal, it will be ignored, and
+bisync will instead try to determine whether the files differ by looking
+at the other --compare methods in effect. (For example, if
+--resync-mode newer is set, but the Path1 and Path2 modtimes are
+identical, bisync will compare the sizes.) If bisync concludes that they
+differ, preference is given to whichever is the "source" at that moment.
+(In practice, this gives a slight advantage to Path2, as the 2to1 copy
+comes before the 1to2 copy.) If the files do not differ, nothing is
+copied (as both sides are already correct). - These options apply only
+to files that exist on both sides (with the same name and relative
+path). Files that exist only on one side and not the other are always
+copied to the other, during --resync (this is one of the main
+differences between resync and non-resync runs.). - --conflict-resolve,
+--conflict-loser, and --conflict-suffix do not apply during --resync,
+and unlike these flags, nothing is renamed during --resync. When a file
+differs on both sides during --resync, one version always overwrites the
+other (much like in rclone copy.) (Consider using --backup-dir to retain
+a backup of the losing version.) - Unlike for --conflict-resolve,
+--resync-mode none is not a valid option (or rather, it will be
+interpreted as "no resync", unless --resync has also been specified, in
+which case it will be ignored.) - Winners and losers are decided at the
+individual file-level only (there is not currently an option to pick an
+entire winning directory atomically, although the path1 and path2
+options typically produce a similar result.) - To maintain
+backward-compatibility, the --resync flag implies --resync-mode path1
+unless a different --resync-mode is explicitly specified. Similarly, all
+--resync-mode options (except none) imply --resync, so it is not
+necessary to use both the --resync and --resync-mode flags
+simultaneously -- either one is sufficient without the other.
+
--check-access
Access check files are an additional safety measure against data loss.
@@ -19141,6 +20705,145 @@ must exist, synchronized between your source and destination filesets,
in order for --check-access to succeed. See --check-access for
additional details.
+--compare
+
+As of v1.66, bisync fully supports comparing based on any combination of
+size, modtime, and checksum (lifting the prior restriction on backends
+without modtime support.)
+
+By default (without the --compare flag), bisync inherits the same
+comparison options as sync (that is: size and modtime by default, unless
+modified with flags such as --checksum or --size-only.)
+
+If the --compare flag is set, it will override these defaults. This can
+be useful if you wish to compare based on combinations not currently
+supported in sync, such as comparing all three of size AND modtime AND
+checksum simultaneously (or just modtime AND checksum).
+
+--compare takes a comma-separated list, with the currently supported
+values being size, modtime, and checksum. For example, if you want to
+compare size and checksum, but not modtime, you would do:
+
+ --compare size,checksum
+
+Or if you want to compare all three:
+
+ --compare size,modtime,checksum
+
+--compare overrides any conflicting flags. For example, if you set the
+conflicting flags --compare checksum --size-only, --size-only will be
+ignored, and bisync will compare checksum and not size. To avoid
+confusion, it is recommended to use either --compare or the normal sync
+flags, but not both.
+
+If --compare includes checksum and both remotes support checksums but
+have no hash types in common with each other, checksums will be
+considered only for comparisons within the same side (to determine what
+has changed since the prior sync), but not for comparisons against the
+opposite side. If one side supports checksums and the other does not,
+checksums will only be considered on the side that supports them.
+
+When comparing with checksum and/or size without modtime, bisync cannot
+determine whether a file is newer or older -- only whether it is changed
+or unchanged. (If it is changed on both sides, bisync still does the
+standard equality-check to avoid declaring a sync conflict unless it
+absolutely has to.)
+
+It is recommended to do a --resync when changing --compare settings, as
+otherwise your prior listing files may not contain the attributes you
+wish to compare (for example, they will not have stored checksums if you
+were not previously comparing checksums.)
+
+--ignore-listing-checksum
+
+When --checksum or --compare checksum is set, bisync will retrieve (or
+generate) checksums (for backends that support them) when creating the
+listings for both paths, and store the checksums in the listing files.
+--ignore-listing-checksum will disable this behavior, which may speed
+things up considerably, especially on backends (such as local) where
+hashes must be computed on the fly instead of retrieved. Please note the
+following:
+
+- As of v1.66, --ignore-listing-checksum is now automatically set when
+ neither --checksum nor --compare checksum are in use (as the
+ checksums would not be used for anything.)
+- --ignore-listing-checksum is NOT the same as --ignore-checksum, and
+ you may wish to use one or the other, or both. In a nutshell:
+ --ignore-listing-checksum controls whether checksums are considered
+ when scanning for diffs, while --ignore-checksum controls whether
+ checksums are considered during the copy/sync operations that
+ follow, if there ARE diffs.
+- Unless --ignore-listing-checksum is passed, bisync currently
+ computes hashes for one path even when there's no common hash with
+ the other path (for example, a crypt remote.) This can still be
+ beneficial, as the hashes will still be used to detect changes
+ within the same side (if --checksum or --compare checksum is set),
+ even if they can't be used to compare against the opposite side.
+- If you wish to ignore listing checksums only on remotes where they
+ are slow to compute, consider using --no-slow-hash (or
+ --slow-hash-sync-only) instead of --ignore-listing-checksum.
+- If --ignore-listing-checksum is used simultaneously with
+ --compare checksum (or --checksum), checksums will be ignored for
+ bisync deltas, but still considered during the sync operations that
+ follow (if deltas are detected based on modtime and/or size.)
+
+--no-slow-hash
+
+On some remotes (notably local), checksums can dramatically slow down a
+bisync run, because hashes cannot be stored and need to be computed in
+real-time when they are requested. On other remotes (such as drive),
+they add practically no time at all. The --no-slow-hash flag will
+automatically skip checksums on remotes where they are slow, while still
+comparing them on others (assuming --compare includes checksum.) This
+can be useful when one of your bisync paths is slow but you still want
+to check checksums on the other, for a more robust sync.
+
+--slow-hash-sync-only
+
+Same as --no-slow-hash, except slow hashes are still considered during
+sync calls. They are still NOT considered for determining deltas, nor or
+they included in listings. They are also skipped during --resync. The
+main use case for this flag is when you have a large number of files,
+but relatively few of them change from run to run -- so you don't want
+to check your entire tree every time (it would take too long), but you
+still want to consider checksums for the smaller group of files for
+which a modtime or size change was detected. Keep in mind that this
+speed savings comes with a safety trade-off: if a file's content were to
+change without a change to its modtime or size, bisync would not detect
+it, and it would not be synced.
+
+--slow-hash-sync-only is only useful if both remotes share a common hash
+type (if they don't, bisync will automatically fall back to
+--no-slow-hash.) Both --no-slow-hash and --slow-hash-sync-only have no
+effect without --compare checksum (or --checksum).
+
+--download-hash
+
+If --download-hash is set, bisync will use best efforts to obtain an MD5
+checksum by downloading and computing on-the-fly, when checksums are not
+otherwise available (for example, a remote that doesn't support them.)
+Note that since rclone has to download the entire file, this may
+dramatically slow down your bisync runs, and is also likely to use a lot
+of data, so it is probably not practical for bisync paths with a large
+total file size. However, it can be a good option for syncing
+small-but-important files with maximum accuracy (for example, a source
+code repo on a crypt remote.) An additional advantage over methods like
+cryptcheck is that the original file is not required for comparison (for
+example, --download-hash can be used to bisync two different crypt
+remotes with different passwords.)
+
+When --download-hash is set, bisync still looks for more efficient
+checksums first, and falls back to downloading only when none are found.
+It takes priority over conflicting flags such as --no-slow-hash.
+--download-hash is not suitable for Google Docs and other files of
+unknown size, as their checksums would change from run to run (due to
+small variances in the internals of the generated export file.)
+Therefore, bisync automatically skips --download-hash for files with a
+size less than 0.
+
+See also: Hasher backend, cryptcheck command, rclone check --download
+option, md5sum command
+
--max-delete
As a safety check, if greater than the --max-delete percent of files
@@ -19180,6 +20883,146 @@ to the hash stored in the .md5 file. If they don't match, the run aborts
with a critical error and thus forces you to do a --resync, likely
avoiding a disaster.
+--conflict-resolve CHOICE
+
+In bisync, a "conflict" is a file that is new or changed on both sides
+(relative to the prior run) AND is not currently identical on both
+sides. --conflict-resolve controls how bisync handles such a scenario.
+The currently supported options are:
+
+- none - (the default) - do not attempt to pick a winner, keep and
+ rename both files according to --conflict-loser and
+ --conflict-suffix settings. For example, with the default settings,
+ file.txt on Path1 is renamed file.txt.conflict1 and file.txt on
+ Path2 is renamed file.txt.conflict2. Both are copied to the opposite
+ path during the run, so both sides end up with a copy of both files.
+ (As none is the default, it is not necessary to specify
+ --conflict-resolve none -- you can just omit the flag.)
+- newer - the newer file (by modtime) is considered the winner and is
+ copied without renaming. The older file (the "loser") is handled
+ according to --conflict-loser and --conflict-suffix settings (either
+ renamed or deleted.) For example, if file.txt on Path1 is newer than
+ file.txt on Path2, the result on both sides (with other default
+ settings) will be file.txt (winner from Path1) and
+ file.txt.conflict1 (loser from Path2).
+- older - same as newer, except the older file is considered the
+ winner, and the newer file is considered the loser.
+- larger - the larger file (by size) is considered the winner
+ (regardless of modtime, if any).
+- smaller - the smaller file (by size) is considered the winner
+ (regardless of modtime, if any).
+- path1 - the version from Path1 is unconditionally considered the
+ winner (regardless of modtime and size, if any). This can be useful
+ if one side is usually more trusted or up-to-date than the other.
+- path2 - same as path1, except the path2 version is considered the
+ winner.
+
+For all of the above options, note the following: - If either of the
+underlying remotes lacks support for the chosen method, it will be
+ignored and fall back to none. (For example, if --conflict-resolve newer
+is set, but one of the paths uses a remote that doesn't support
+modtime.) - If a winner can't be determined because the chosen method's
+attribute is missing or equal, it will be ignored and fall back to none.
+(For example, if --conflict-resolve newer is set, but the Path1 and
+Path2 modtimes are identical, even if the sizes may differ.) - If the
+file's content is currently identical on both sides, it is not
+considered a "conflict", even if new or changed on both sides since the
+prior sync. (For example, if you made a change on one side and then
+synced it to the other side by other means.) Therefore, none of the
+conflict resolution flags apply in this scenario. - The conflict
+resolution flags do not apply during a --resync, as there is no "prior
+run" to speak of (but see --resync-mode for similar options.)
+
+--conflict-loser CHOICE
+
+--conflict-loser determines what happens to the "loser" of a sync
+conflict (when --conflict-resolve determines a winner) or to both files
+(when there is no winner.) The currently supported options are:
+
+- num - (the default) - auto-number the conflicts by automatically
+ appending the next available number to the --conflict-suffix, in
+ chronological order. For example, with the default settings, the
+ first conflict for file.txt will be renamed file.txt.conflict1. If
+ file.txt.conflict1 already exists, file.txt.conflict2 will be used
+ instead (etc., up to a maximum of 9223372036854775807 conflicts.)
+- pathname - rename the conflicts according to which side they came
+ from, which was the default behavior prior to v1.66. For example,
+ with --conflict-suffix path, file.txt from Path1 will be renamed
+ file.txt.path1, and file.txt from Path2 will be renamed
+ file.txt.path2. If two non-identical suffixes are provided (ex.
+ --conflict-suffix cloud,local), the trailing digit is omitted.
+ Importantly, note that with pathname, there is no auto-numbering
+ beyond 2, so if file.txt.path2 somehow already exists, it will be
+ overwritten. Using a dynamic date variable in your --conflict-suffix
+ (see below) is one possible way to avoid this. Note also that
+ conflicts-of-conflicts are possible, if the original conflict is not
+ manually resolved -- for example, if for some reason you edited
+ file.txt.path1 on both sides, and those edits were different, the
+ result would be file.txt.path1.path1 and file.txt.path1.path2 (in
+ addition to file.txt.path2.)
+- delete - keep the winner only and delete the loser, instead of
+ renaming it. If a winner cannot be determined (see
+ --conflict-resolve for details on how this could happen), delete is
+ ignored and the default num is used instead (i.e. both versions are
+ kept and renamed, and neither is deleted.) delete is inherently the
+ most destructive option, so use it only with care.
+
+For all of the above options, note that if a winner cannot be determined
+(see --conflict-resolve for details on how this could happen), or if
+--conflict-resolve is not in use, both files will be renamed.
+
+--conflict-suffix STRING[,STRING]
+
+--conflict-suffix controls the suffix that is appended when bisync
+renames a --conflict-loser (default: conflict). --conflict-suffix will
+accept either one string or two comma-separated strings to assign
+different suffixes to Path1 vs. Path2. This may be helpful later in
+identifying the source of the conflict. (For example,
+--conflict-suffix dropboxconflict,laptopconflict)
+
+With --conflict-loser num, a number is always appended to the suffix.
+With --conflict-loser pathname, a number is appended only when one
+suffix is specified (or when two identical suffixes are specified.) i.e.
+with --conflict-loser pathname, all of the following would produce
+exactly the same result:
+
+ --conflict-suffix path
+ --conflict-suffix path,path
+ --conflict-suffix path1,path2
+
+Suffixes may be as short as 1 character. By default, the suffix is
+appended after any other extensions (ex. file.jpg.conflict1), however,
+this can be changed with the --suffix-keep-extension flag (i.e. to
+instead result in file.conflict1.jpg).
+
+--conflict-suffix supports several dynamic date variables when enclosed
+in curly braces as globs. This can be helpful to track the date and/or
+time that each conflict was handled by bisync. For example:
+
+ --conflict-suffix {DateOnly}-conflict
+ // result: myfile.txt.2006-01-02-conflict1
+
+All of the formats described here and here are supported, but take care
+to ensure that your chosen format does not use any characters that are
+illegal on your remotes (for example, macOS does not allow colons in
+filenames, and slashes are also best avoided as they are often
+interpreted as directory separators.) To address this particular issue,
+an additional {MacFriendlyTime} (or just {mac}) option is supported,
+which results in 2006-01-02 0304PM.
+
+Note that --conflict-suffix is entirely separate from rclone's main
+--sufix flag. This is intentional, as users may wish to use both flags
+simultaneously, if also using --backup-dir.
+
+Finally, note that the default in bisync prior to v1.66 was to rename
+conflicts with ..path1 and ..path2 (with two periods, and path instead
+of conflict.) Bisync now defaults to a single dot instead of a double
+dot, but additional dots can be added by including them in the specified
+suffix string. For example, for behavior equivalent to the previous
+default, use:
+
+ [--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
+
--check-sync
Enabled by default, the check-sync function checks that all of the same
@@ -19198,39 +21041,51 @@ significantly reduce the sync run times for very large numbers of files.
The check may be run manually with --check-sync=only. It runs only the
integrity check and terminates without actually synching.
-See also: Concurrent modifications
+Note that currently, --check-sync only checks listing snapshots and NOT
+the actual files on the remotes. Note also that the listing snapshots
+will not know about any changes that happened during or after the latest
+bisync run, as those will be discovered on the next run. Therefore,
+while listings should always match each other at the end of a bisync
+run, it is expected that they will not match the underlying remotes, nor
+will the remotes match each other, if there were changes during or after
+the run. This is normal, and any differences will be detected and synced
+on the next run.
---ignore-listing-checksum
+For a robust integrity check of the current state of the remotes (as
+opposed to just their listing snapshots), consider using check (or
+cryptcheck, if at least one path is a crypt remote) instead of
+--check-sync, keeping in mind that differences are expected if files
+changed during or after your last bisync run.
-By default, bisync will retrieve (or generate) checksums (for backends
-that support them) when creating the listings for both paths, and store
-the checksums in the listing files. --ignore-listing-checksum will
-disable this behavior, which may speed things up considerably,
-especially on backends (such as local) where hashes must be computed on
-the fly instead of retrieved. Please note the following:
+For example, a possible sequence could look like this:
-- While checksums are (by default) generated and stored in the listing
- files, they are NOT currently used for determining diffs (deltas).
- It is anticipated that full checksum support will be added in a
- future version.
-- --ignore-listing-checksum is NOT the same as --ignore-checksum, and
- you may wish to use one or the other, or both. In a nutshell:
- --ignore-listing-checksum controls whether checksums are considered
- when scanning for diffs, while --ignore-checksum controls whether
- checksums are considered during the copy/sync operations that
- follow, if there ARE diffs.
-- Unless --ignore-listing-checksum is passed, bisync currently
- computes hashes for one path even when there's no common hash with
- the other path (for example, a crypt remote.)
-- If both paths support checksums and have a common hash, AND
- --ignore-listing-checksum was not specified when creating the
- listings, --check-sync=only can be used to compare Path1 vs. Path2
- checksums (as of the time the previous listings were created.)
- However, --check-sync=only will NOT include checksums if the
- previous listings were generated on a run using
- --ignore-listing-checksum. For a more robust integrity check of the
- current state, consider using check (or cryptcheck, if at least one
- path is a crypt remote.)
+1. Normally scheduled bisync run:
+
+ rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
+
+2. Periodic independent integrity check (perhaps scheduled nightly or
+ weekly):
+
+ rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
+
+3. If diffs are found, you have some choices to correct them. If one
+ side is more up-to-date and you want to make the other side match
+ it, you could run:
+
+ rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
+
+(or switch Path1 and Path2 to make Path2 the source-of-truth)
+
+Or, if neither side is totally up-to-date, you could run a --resync to
+bring them back into agreement (but remember that this could cause
+deleted files to re-appear.)
+
+*Note also that rclone check does not currently include empty
+directories, so if you want to know if any empty directories are out of
+sync, consider alternatively running the above rclone sync command with
+--dry-run added.
+
+See also: Concurrent modifications, --resilient
--resilient
@@ -19258,7 +21113,115 @@ the time of the next run, that next run will be allowed to proceed.
Certain more serious errors will still enforce a --resync lockout, even
in --resilient mode, to prevent data loss.
-Behavior of --resilient may change in a future version.
+Behavior of --resilient may change in a future version. (See also:
+--recover, --max-lock, Graceful Shutdown)
+
+--recover
+
+If --recover is set, in the event of a sudden interruption or other
+un-graceful shutdown, bisync will attempt to automatically recover on
+the next run, instead of requiring --resync. Bisync is able to recover
+robustly by keeping one "backup" listing at all times, representing the
+state of both paths after the last known successful sync. Bisync can
+then compare the current state with this snapshot to determine which
+changes it needs to retry. Changes that were synced after this snapshot
+(during the run that was later interrupted) will appear to bisync as if
+they are "new or changed on both sides", but in most cases this is not a
+problem, as bisync will simply do its usual "equality check" and learn
+that no action needs to be taken on these files, since they are already
+identical on both sides.
+
+In the rare event that a file is synced successfully during a run that
+later aborts, and then that same file changes AGAIN before the next run,
+bisync will think it is a sync conflict, and handle it accordingly.
+(From bisync's perspective, the file has changed on both sides since the
+last trusted sync, and the files on either side are not currently
+identical.) Therefore, --recover carries with it a slightly increased
+chance of having conflicts -- though in practice this is pretty rare, as
+the conditions required to cause it are quite specific. This risk can be
+reduced by using bisync's "Graceful Shutdown" mode (triggered by sending
+SIGINT or Ctrl+C), when you have the choice, instead of forcing a sudden
+termination.
+
+--recover and --resilient are similar, but distinct -- the main
+difference is that --resilient is about retrying, while --recover is
+about recovering. Most users will probably want both. --resilient allows
+retrying when bisync has chosen to abort itself due to safety features
+such as failing --check-access or detecting a filter change. --resilient
+does not cover external interruptions such as a user shutting down their
+computer in the middle of a sync -- that is what --recover is for.
+
+--max-lock
+
+Bisync uses lock files as a safety feature to prevent interference from
+other bisync runs while it is running. Bisync normally removes these
+lock files at the end of a run, but if bisync is abruptly interrupted,
+these files will be left behind. By default, they will lock out all
+future runs, until the user has a chance to manually check things out
+and remove the lock. As an alternative, --max-lock can be used to make
+them automatically expire after a certain period of time, so that future
+runs are not locked out forever, and auto-recovery is possible.
+--max-lock can be any duration 2m or greater (or 0 to disable). If set,
+lock files older than this will be considered "expired", and future runs
+will be allowed to disregard them and proceed. (Note that the --max-lock
+duration must be set by the process that left the lock file -- not the
+later one interpreting it.)
+
+If set, bisync will also "renew" these lock files every
+--max-lock minus one minute throughout a run, for extra safety. (For
+example, with --max-lock 5m, bisync would renew the lock file (for
+another 5 minutes) every 4 minutes until the run has completed.) In
+other words, it should not be possible for a lock file to pass its
+expiration time while the process that created it is still running --
+and you can therefore be reasonably sure that any expired lock file you
+may find was left there by an interrupted run, not one that is still
+running and just taking awhile.
+
+If --max-lock is 0 or not set, the default is that lock files will never
+expire, and will block future runs (of these same two bisync paths)
+indefinitely.
+
+For maximum resilience from disruptions, consider setting a relatively
+short duration like --max-lock 2m along with --resilient and --recover,
+and a relatively frequent cron schedule. The result will be a very
+robust "set-it-and-forget-it" bisync run that can automatically bounce
+back from almost any interruption it might encounter, without requiring
+the user to get involved and run a --resync. (See also: Graceful
+Shutdown mode)
+
+--backup-dir1 and --backup-dir2
+
+As of v1.66, --backup-dir is supported in bisync. Because --backup-dir
+must be a non-overlapping path on the same remote, Bisync has introduced
+new --backup-dir1 and --backup-dir2 flags to support separate
+backup-dirs for Path1 and Path2 (bisyncing between different remotes
+with --backup-dir would not otherwise be possible.) --backup-dir1 and
+--backup-dir2 can use different remotes from each other, but
+--backup-dir1 must use the same remote as Path1, and --backup-dir2 must
+use the same remote as Path2. Each backup directory must not overlap its
+respective bisync Path without being excluded by a filter rule.
+
+The standard --backup-dir will also work, if both paths use the same
+remote (but note that deleted files from both paths would be mixed
+together in the same dir). If either --backup-dir1 and --backup-dir2 are
+set, they will override --backup-dir.
+
+Example:
+
+ rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
+
+In this example, if the user deletes a file in
+/Users/someuser/some/local/path/Bisync, bisync will propagate the delete
+to the other side by moving the corresponding file from gdrive:Bisync to
+gdrive:BackupDir. If the user deletes a file from gdrive:Bisync, bisync
+moves it from /Users/someuser/some/local/path/Bisync to
+/Users/someuser/some/local/path/BackupDir.
+
+In the event of a rename due to a sync conflict, the rename is not
+considered a delete, unless a previous conflict with the same name
+already exists and would get overwritten.
+
+See also: --suffix, --suffix-keep-extension
Operation
@@ -19275,8 +21238,9 @@ Safety measures
- Lock file prevents multiple simultaneous runs when taking a while.
This can be particularly useful if bisync is run by cron scheduler.
-- Handle change conflicts non-destructively by creating ..path1 and
- ..path2 file versions.
+- Handle change conflicts non-destructively by creating .conflict1,
+ .conflict2, etc. file versions, according to --conflict-resolve,
+ --conflict-loser, and --conflict-suffix settings.
- File system access health check using RCLONE_TEST files (see the
--check-access flag).
- Abort on excessive deletes - protects against a failed listing being
@@ -19317,45 +21281,50 @@ Normal sync checks
Unusual sync checks
- ----------------------------------------------------------------------------
- Type Description Result Implementation
- ----------------- --------------------- ------------------- ----------------
- Path1 new/changed File is new/changed No change None
- AND Path2 on Path1 AND
- new/changed AND new/changed on Path2
- Path1 == Path2 AND Path1 version is
- currently identical
- to Path2
+ ------------------------------------------------------------------------------
+ Type Description Result Implementation
+ ----------------- --------------------- -------------------- -----------------
+ Path1 new/changed File is new/changed No change None
+ AND Path2 on Path1 AND
+ new/changed AND new/changed on Path2
+ Path1 == Path2 AND Path1 version is
+ currently identical
+ to Path2
- Path1 new AND File is new on Path1 Files renamed to rclone copy
- Path2 new AND new on Path2 (and _Path1 and _Path2 _Path2 file to
- Path1 version is NOT Path1,
- identical to Path2) rclone copy
- _Path1 file to
- Path2
+ Path1 new AND File is new on Path1 Conflicts handled default:
+ Path2 new AND new on Path2 (and according to rclone copy
+ Path1 version is NOT --conflict-resolve & renamed
+ identical to Path2) --conflict-loser Path2.conflict2
+ settings file to Path1,
+ rclone copy
+ renamed
+ Path1.conflict1
+ file to Path2
- Path2 newer AND File is newer on Files renamed to rclone copy
- Path1 changed Path2 AND also _Path1 and _Path2 _Path2 file to
- changed Path1,
- (newer/older/size) on rclone copy
- Path1 (and Path1 _Path1 file to
- version is NOT Path2
- identical to Path2)
+ Path2 newer AND File is newer on Conflicts handled default:
+ Path1 changed Path2 AND also according to rclone copy
+ changed --conflict-resolve & renamed
+ (newer/older/size) on --conflict-loser Path2.conflict2
+ Path1 (and Path1 settings file to Path1,
+ version is NOT rclone copy
+ identical to Path2) renamed
+ Path1.conflict1
+ file to Path2
- Path2 newer AND File is newer on Path2 version rclone copy
- Path1 deleted Path2 AND also survives Path2 to Path1
- deleted on Path1
+ Path2 newer AND File is newer on Path2 version rclone copy Path2
+ Path1 deleted Path2 AND also survives to Path1
+ deleted on Path1
- Path2 deleted AND File is deleted on Path1 version rclone copy
- Path1 changed Path2 AND changed survives Path1 to Path2
- (newer/older/size) on
- Path1
+ Path2 deleted AND File is deleted on Path1 version rclone copy Path1
+ Path1 changed Path2 AND changed survives to Path2
+ (newer/older/size) on
+ Path1
- Path1 deleted AND File is deleted on Path2 version rclone copy
- Path2 changed Path1 AND changed survives Path2 to Path1
- (newer/older/size) on
- Path2
- ----------------------------------------------------------------------------
+ Path1 deleted AND File is deleted on Path2 version rclone copy Path2
+ Path2 changed Path1 AND changed survives to Path1
+ (newer/older/size) on
+ Path2
+ ------------------------------------------------------------------------------
As of rclone v1.64, bisync is now better at detecting false positive
sync conflicts, which would previously have resulted in unnecessary
@@ -19364,9 +21333,9 @@ to rename (because it is new/changed on both sides), it first checks
whether the Path1 and Path2 versions are currently identical (using the
same underlying function as check.) If bisync concludes that the files
are identical, it will skip them and move on. Otherwise, it will create
-renamed ..Path1 and ..Path2 duplicates, as before. This behavior also
-improves the experience of renaming directories, as a --resync is no
-longer required, so long as the same change has been made on both sides.
+renamed duplicates, as before. This behavior also improves the
+experience of renaming directories, as a --resync is no longer required,
+so long as the same change has been made on both sides.
All files changed check
@@ -19381,19 +21350,12 @@ the changes.
Modification times
-Bisync relies on file timestamps to identify changed files and will
-refuse to operate if backend lacks the modification time support.
-
-If you or your application should change the content of a file without
-changing the modification time then bisync will not notice the change,
-and thus will not copy it to the other side.
-
-Note that on some cloud storage systems it is not possible to have file
-timestamps that match precisely between the local and other filesystems.
-
-Bisync's approach to this problem is by tracking the changes on each
-side separately over time with a local database of files in that side
-then applying the resulting changes on the other side.
+By default, bisync compares files by modification time and size. If you
+or your application should change the content of a file without changing
+the modification time and size, then bisync will not notice the change,
+and thus will not copy it to the other side. As an alternative, consider
+comparing by checksum (if your remotes support it). See --compare for
+details.
Error handling
@@ -19417,7 +21379,7 @@ at ${HOME}/.cache/rclone/bisync/ on Linux.
Some errors are considered temporary and re-running the bisync is not
blocked. The critical return blocks further bisync runs.
-See also: --resilient
+See also: --resilient, --recover, --max-lock, Graceful Shutdown
Lock file
@@ -19428,7 +21390,9 @@ place and block any further runs of bisync for the same paths. Delete
the lock file as part of debugging the situation. The lock file
effectively blocks follow-on (e.g., scheduled by cron) runs when the
prior invocation is taking a long time. The lock file contains PID of
-the blocking process, which may help in debug.
+the blocking process, which may help in debug. Lock files can be set to
+automatically expire after a certain amount of time, using the
+--max-lock flag.
Note that while concurrent bisync runs are allowed, be very cautious
that there is no overlap in the trees being synched between concurrent
@@ -19441,69 +21405,74 @@ successful run, - 1 for a non-critical failing run (a rerun may be
successful), - 2 for a critically aborted run (requires a --resync to
recover).
+Graceful Shutdown
+
+Bisync has a "Graceful Shutdown" mode which is activated by sending
+SIGINT or pressing Ctrl+C during a run. Once triggered, bisync will use
+best efforts to exit cleanly before the timer runs out. If bisync is in
+the middle of transferring files, it will attempt to cleanly empty its
+queue by finishing what it has started but not taking more. If it cannot
+do so within 30 seconds, it will cancel the in-progress transfers at
+that point and then give itself a maximum of 60 seconds to wrap up, save
+its state for next time, and exit. With the -vP flags you will see
+constant status updates and a final confirmation of whether or not the
+graceful shutdown was successful.
+
+At any point during the "Graceful Shutdown" sequence, a second SIGINT or
+Ctrl+C will trigger an immediate, un-graceful exit, which will leave
+things in a messier state. Usually a robust recovery will still be
+possible if using --recover mode, otherwise you will need to do a
+--resync.
+
+If you plan to use Graceful Shutdown mode, it is recommended to use
+--resilient and --recover, and it is important to NOT use --inplace,
+otherwise you risk leaving partially-written files on one side, which
+may be confused for real files on the next run. Note also that in the
+event of an abrupt interruption, a lock file will be left behind to
+block concurrent runs. You will need to delete it before you can proceed
+with the next run (or wait for it to expire on its own, if using
+--max-lock.)
+
Limitations
Supported backends
Bisync is considered BETA and has been tested with the following
backends: - Local filesystem - Google Drive - Dropbox - OneDrive - S3 -
-SFTP - Yandex Disk
+SFTP - Yandex Disk - Crypt
It has not been fully tested with other services yet. If it works, or
sorta works, please let us know and we'll update the list. Run the test
suite to check for proper operation as described below.
-First release of rclone bisync requires that underlying backend supports
-the modification time feature and will refuse to run otherwise. This
-limitation will be lifted in a future rclone bisync release.
+The first release of rclone bisync required both underlying backends to
+support modification times, and refused to run otherwise. This
+limitation has been lifted as of v1.66, as bisync now supports comparing
+checksum and/or size instead of (or in addition to) modtime. See
+--compare for details.
Concurrent modifications
-When using Local, FTP or SFTP remotes rclone does not create temporary
-files at the destination when copying, and thus if the connection is
-lost the created file may be corrupt, which will likely propagate back
-to the original path on the next sync, resulting in data loss. This will
-be solved in a future release, there is no workaround at the moment.
+When using Local, FTP or SFTP remotes with --inplace, rclone does not
+create temporary files at the destination when copying, and thus if the
+connection is lost the created file may be corrupt, which will likely
+propagate back to the original path on the next sync, resulting in data
+loss. It is therefore recommended to omit --inplace.
-Files that change during a bisync run may result in data loss. This has
-been seen in a highly dynamic environment, where the filesystem is
-getting hammered by running processes during the sync. The currently
-recommended solution is to sync at quiet times or filter out unnecessary
-directories and files.
-
-As an alternative approach, consider using --check-sync=false (and
-possibly --resilient) to make bisync more forgiving of filesystems that
-change during the sync. Be advised that this may cause bisync to miss
-events that occur during a bisync run, so it is a good idea to
-supplement this with a periodic independent integrity check, and
-corrective sync if diffs are found. For example, a possible sequence
-could look like this:
-
-1. Normally scheduled bisync run:
-
- rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --check-sync=false --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
-
-2. Periodic independent integrity check (perhaps scheduled nightly or
- weekly):
-
- rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
-
-3. If diffs are found, you have some choices to correct them. If one
- side is more up-to-date and you want to make the other side match
- it, you could run:
-
- rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
-
-(or switch Path1 and Path2 to make Path2 the source-of-truth)
-
-Or, if neither side is totally up-to-date, you could run a --resync to
-bring them back into agreement (but remember that this could cause
-deleted files to re-appear.)
-
-*Note also that rclone check does not currently include empty
-directories, so if you want to know if any empty directories are out of
-sync, consider alternatively running the above rclone sync command with
---dry-run added.
+Files that change during a bisync run may result in data loss. Prior to
+rclone v1.66, this was commonly seen in highly dynamic environments,
+where the filesystem was getting hammered by running processes during
+the sync. As of rclone v1.66, bisync was redesigned to use a "snapshot"
+model, greatly reducing the risks from changes during a sync. Changes
+that are not detected during the current sync will now be detected
+during the following sync, and will no longer cause the entire run to
+throw a critical error. There is additionally a mechanism to mark files
+as needing to be internally rechecked next time, for added safety. It
+should therefore no longer be necessary to sync only at quiet times --
+however, note that an error can still occur if a file happens to change
+at the exact moment it's being read/written by bisync (same as would
+happen in rclone sync.) (See also: --ignore-checksum,
+--local-no-check-updated)
Empty directories
@@ -19525,14 +21494,20 @@ but it's still probably best to stick to one or the other, and use
Renamed directories
-Renaming a folder on the Path1 side results in deleting all files on the
-Path2 side and then copying all files again from Path1 to Path2. Bisync
-sees this as all files in the old directory name as deleted and all
-files in the new directory name as new. Currently, the most effective
-and efficient method of renaming a directory is to rename it to the same
-name on both sides. (As of rclone v1.64, a --resync is no longer
-required after doing so, as bisync will automatically detect that Path1
-and Path2 are in agreement.)
+By default, renaming a folder on the Path1 side results in deleting all
+files on the Path2 side and then copying all files again from Path1 to
+Path2. Bisync sees this as all files in the old directory name as
+deleted and all files in the new directory name as new.
+
+A recommended solution is to use --track-renames, which is now supported
+in bisync as of rclone v1.66. Note that --track-renames is not available
+during --resync, as --resync does not delete anything (--track-renames
+only supports sync, not copy.)
+
+Otherwise, the most effective and efficient method of renaming a
+directory is to rename it to the same name on both sides. (As of
+rclone v1.64, a --resync is no longer required after doing so, as bisync
+will automatically detect that Path1 and Path2 are in agreement.)
--fast-list used by default
@@ -19544,28 +21519,21 @@ users with many empty directories. For now, the recommended way to avoid
using --fast-list is to add --disable ListR to all bisync commands. The
default behavior may change in a future version.
-Overridden Configs
+Case (and unicode) sensitivity
-When rclone detects an overridden config, it adds a suffix like {ABCDE}
-on the fly to the internal name of the remote. Bisync follows suit by
-including this suffix in its listing filenames. However, this suffix
-does not necessarily persist from run to run, especially if different
-flags are provided. So if next time the suffix assigned is {FGHIJ},
-bisync will get confused, because it's looking for a listing file with
-{FGHIJ}, when the file it wants has {ABCDE}. As a result, it throws
-Bisync critical error: cannot find prior Path1 or Path2 listings, likely due to critical error on prior run
-and refuses to run again until the user runs a --resync (unless using
---resilient). The best workaround at the moment is to set any
-backend-specific flags in the config file instead of specifying them
-with command flags. (You can still override them as needed for other
-rclone commands.)
+As of v1.66, case and unicode form differences no longer cause critical
+errors, and normalization (when comparing between filesystems) is
+handled according to the same flags and defaults as rclone sync. See the
+following options (all of which are supported by bisync) to control this
+behavior more granularly: - --fix-case - --ignore-case-sync -
+--no-unicode-normalization - --local-unicode-normalization and
+--local-case-sensitive (caution: these are normally not what you want.)
-Case sensitivity
-
-Synching with case-insensitive filesystems, such as Windows or Box, can
-result in file name conflicts. This will be fixed in a future release.
-The near-term workaround is to make sure that files on both sides don't
-have spelling case differences (Smile.jpg vs. smile.jpg).
+Note that in the (probably rare) event that --fix-case is used AND a
+file is new/changed on both sides AND the checksums match AND the
+filename case does not match, the Path1 filename is considered the
+winner, for the purposes of --fix-case (Path2 will be renamed to match
+it).
Windows support
@@ -19844,22 +21812,57 @@ specifically which files are generating complaints. If the error is
This file has been identified as malware or spam and cannot be downloaded,
consider using the flag --drive-acknowledge-abuse.
-Google Doc files
+Google Docs (and other files of unknown size)
-Google docs exist as virtual files on Google Drive and cannot be
-transferred to other filesystems natively. While it is possible to
-export a Google doc to a normal file (with .xlsx extension, for
-example), it is not possible to import a normal file back into a Google
-document.
+As of v1.66, Google Docs (including Google Sheets, Slides, etc.) are now
+supported in bisync, subject to the same options, defaults, and
+limitations as in rclone sync. When bisyncing drive with non-drive
+backends, the drive -> non-drive direction is controlled by
+--drive-export-formats (default "docx,xlsx,pptx,svg") and the non-drive
+-> drive direction is controlled by --drive-import-formats (default
+none.)
-Bisync's handling of Google Doc files is to flag them in the run log
-output for user's attention and ignore them for any file transfers,
-deletes, or syncs. They will show up with a length of -1 in the
-listings. This bisync run is otherwise successful:
+For example, with the default export/import formats, a Google Sheet on
+the drive side will be synced to an .xlsx file on the non-drive side. In
+the reverse direction, .xlsx files with filenames that match an existing
+Google Sheet will be synced to that Google Sheet, while .xlsx files that
+do NOT match an existing Google Sheet will be copied to drive as normal
+.xlsx files (without conversion to Sheets, although the Google Drive web
+browser UI may still give you the option to open it as one.)
- 2021/05/11 08:23:15 INFO : Synching Path1 "/path/to/local/tree/base/" with Path2 "GDrive:"
- 2021/05/11 08:23:15 INFO : ...path2.lst-new: Ignoring incorrect line: "- -1 - - 2018-07-29T08:49:30.136000000+0000 GoogleDoc.docx"
- 2021/05/11 08:23:15 INFO : Bisync successful
+If --drive-import-formats is set (it's not, by default), then all of the
+specified formats will be converted to Google Docs, if there is no
+existing Google Doc with a matching name. Caution: such conversion can
+be quite lossy, and in most cases it's probably not what you want!
+
+To bisync Google Docs as URL shortcut links (in a manner similar to
+"Drive for Desktop"), use: --drive-export-formats url (or alternatives.)
+
+Note that these link files cannot be edited on the non-drive side -- you
+will get errors if you try to sync an edited link file back to drive.
+They CAN be deleted (it will result in deleting the corresponding Google
+Doc.) If you create a .url file on the non-drive side that does not
+match an existing Google Doc, bisyncing it will just result in copying
+the literal .url file over to drive (no Google Doc will be created.) So,
+as a general rule of thumb, think of them as read-only placeholders on
+the non-drive side, and make all your changes on the drive side.
+
+Likewise, even with other export-formats, it is best to only move/rename
+Google Docs on the drive side. This is because otherwise, bisync will
+interpret this as a file deleted and another created, and accordingly,
+it will delete the Google Doc and create a new file at the new path.
+(Whether or not that new file is a Google Doc depends on
+--drive-import-formats.)
+
+Lastly, take note that all Google Docs on the drive side have a size of
+-1 and no checksum. Therefore, they cannot be reliably synced with the
+--checksum or --size-only flags. (To be exact: they will still get
+created/deleted, and bisync's delta engine will notice changes and queue
+them for syncing, but the underlying sync function will consider them
+identical and skip them.) To work around this, use the default (modtime
+and size) instead of --checksum or --size-only.
+
+To ignore Google Docs entirely, use --drive-skip-gdocs.
Usage examples
@@ -20231,6 +22234,54 @@ Unison and synchronization in general.
Changelog
+v1.66
+
+- Copies and deletes are now handled in one operation instead of two
+- --track-renames and --backup-dir are now supported
+- Partial uploads known issue on local/ftp/sftp has been resolved
+ (unless using --inplace)
+- Final listings are now generated from sync results, to avoid needing
+ to re-list
+- Bisync is now much more resilient to changes that happen during a
+ bisync run, and far less prone to critical errors / undetected
+ changes
+- Bisync is now capable of rolling a file listing back in cases of
+ uncertainty, essentially marking the file as needing to be rechecked
+ next time.
+- A few basic terminal colors are now supported, controllable with
+ --color (AUTO|NEVER|ALWAYS)
+- Initial listing snapshots of Path1 and Path2 are now generated
+ concurrently, using the same "march" infrastructure as check and
+ sync, for performance improvements and less risk of error.
+- Fixed handling of unicode normalization and case insensitivity,
+ support for --fix-case, --ignore-case-sync,
+ --no-unicode-normalization
+- --resync is now much more efficient (especially for users of
+ --create-empty-src-dirs)
+- Google Docs (and other files of unknown size) are now supported
+ (with the same options as in sync)
+- Equality checks before a sync conflict rename now fall back to
+ cryptcheck (when possible) or --download, instead of of --size-only,
+ when check is not available.
+- Bisync no longer fails to find the correct listing file when configs
+ are overridden with backend-specific flags.
+- Bisync now fully supports comparing based on any combination of
+ size, modtime, and checksum, lifting the prior restriction on
+ backends without modtime support.
+- Bisync now supports a "Graceful Shutdown" mode to cleanly cancel a
+ run early without requiring --resync.
+- New --recover flag allows robust recovery in the event of
+ interruptions, without requiring --resync.
+- A new --max-lock setting allows lock files to automatically renew
+ and expire, for better automatic recovery when a run is interrupted.
+- Bisync now supports auto-resolving sync conflicts and customizing
+ rename behavior with new --conflict-resolve, --conflict-loser, and
+ --conflict-suffix flags.
+- A new --resync-mode flag allows more control over which version of a
+ file gets kept during a --resync.
+- Bisync now supports --retries and --retries-sleep (when --resilient
+ is set.)
+
v1.64
- Fixed an issue causing dry runs to inadvertently commit filter
@@ -20582,6 +22633,17 @@ Properties:
- Default:
Slash,LtGt,DoubleQuote,SingleQuote,BackQuote,Dollar,BackSlash,Del,Ctl,LeftSpace,RightSpace,InvalidUtf8,Dot
+--fichier-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_FICHIER_DESCRIPTION
+- Type: string
+- Required: false
+
Limitations
rclone about is not supported by the 1Fichier backend. Backends without
@@ -20697,334 +22759,22 @@ Properties:
- Type: string
- Required: true
-Amazon Drive
-
-Amazon Drive, formerly known as Amazon Cloud Drive, is a cloud storage
-service run by Amazon for consumers.
-
-Status
-
-Important: rclone supports Amazon Drive only if you have your own set of
-API keys. Unfortunately the Amazon Drive developer program is now closed
-to new entries so if you don't already have your own set of keys you
-will not be able to use rclone with Amazon Drive.
-
-For the history on why rclone no longer has a set of Amazon Drive API
-keys see the forum.
-
-If you happen to know anyone who works at Amazon then please ask them to
-re-instate rclone into the Amazon Drive developer program - thanks!
-
-Configuration
-
-The initial setup for Amazon Drive involves getting a token from Amazon
-which you need to do in your browser. rclone config walks you through
-it.
-
-The configuration process for Amazon Drive may involve using an oauth
-proxy. This is used to keep the Amazon credentials out of the source
-code. The proxy runs in Google's very secure App Engine environment and
-doesn't store any credentials which pass through it.
-
-Since rclone doesn't currently have its own Amazon Drive credentials so
-you will either need to have your own client_id and client_secret with
-Amazon Drive, or use a third-party oauth proxy in which case you will
-need to enter client_id, client_secret, auth_url and token_url.
-
-Note also if you are not using Amazon's auth_url and token_url, (ie you
-filled in something for those) then if setting up on a remote machine
-you can only use the copying the config method of configuration -
-rclone authorize will not work.
-
-Here is an example of how to make a remote called remote. First run:
-
- rclone config
-
-This will guide you through an interactive setup process:
-
- No remotes found, make a new one?
- n) New remote
- r) Rename remote
- c) Copy remote
- s) Set configuration password
- q) Quit config
- n/r/c/s/q> n
- name> remote
- Type of storage to configure.
- Choose a number from below, or type in your own value
- [snip]
- XX / Amazon Drive
- \ "amazon cloud drive"
- [snip]
- Storage> amazon cloud drive
- Amazon Application Client Id - required.
- client_id> your client ID goes here
- Amazon Application Client Secret - required.
- client_secret> your client secret goes here
- Auth server URL - leave blank to use Amazon's.
- auth_url> Optional auth URL
- Token server url - leave blank to use Amazon's.
- token_url> Optional token URL
- Remote config
- Make sure your Redirect URL is set to "http://127.0.0.1:53682/" in your custom config.
- Use web browser to automatically authenticate rclone with remote?
- * Say Y if the machine running rclone has a web browser you can use
- * Say N if running rclone on a (remote) machine without web browser access
- If not sure try Y. If Y failed, try N.
- y) Yes
- n) No
- y/n> y
- If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
- Log in and authorize rclone for access
- Waiting for code...
- Got code
- --------------------
- [remote]
- client_id = your client ID goes here
- client_secret = your client secret goes here
- auth_url = Optional auth URL
- token_url = Optional token URL
- token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxx","token_type":"bearer","refresh_token":"xxxxxxxxxxxxxxxxxx","expiry":"2015-09-06T16:07:39.658438471+01:00"}
- --------------------
- y) Yes this is OK
- e) Edit this remote
- d) Delete this remote
- y/e/d> y
-
-See the remote setup docs for how to set it up on a machine with no
-Internet browser available.
-
-Note that rclone runs a webserver on your local machine to collect the
-token as returned from Amazon. This only runs from the moment it opens
-your browser to the moment you get back the verification code. This is
-on http://127.0.0.1:53682/ and this it may require you to unblock it
-temporarily if you are running a host firewall.
-
-Once configured you can then use rclone like this,
-
-List directories in top level of your Amazon Drive
-
- rclone lsd remote:
-
-List all the files in your Amazon Drive
-
- rclone ls remote:
-
-To copy a local directory to an Amazon Drive directory called backup
-
- rclone copy /home/source remote:backup
-
-Modification times and hashes
-
-Amazon Drive doesn't allow modification times to be changed via the API
-so these won't be accurate or used for syncing.
-
-It does support the MD5 hash algorithm, so for a more accurate sync, you
-can use the --checksum flag.
-
-Restricted filename characters
-
- Character Value Replacement
- ----------- ------- -------------
- NUL 0x00 ␀
- / 0x2F /
-
-Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
-strings.
-
-Deleting files
-
-Any files you delete with rclone will end up in the trash. Amazon don't
-provide an API to permanently delete files, nor to empty the trash, so
-you will have to do that with one of Amazon's apps or via the Amazon
-Drive website. As of November 17, 2016, files are automatically deleted
-by Amazon from the trash after 30 days.
-
-Using with non .com Amazon accounts
-
-Let's say you usually use amazon.co.uk. When you authenticate with
-rclone it will take you to an amazon.com page to log in. Your
-amazon.co.uk email and password should work here just fine.
-
-Standard options
-
-Here are the Standard options specific to amazon cloud drive (Amazon
-Drive).
-
---acd-client-id
-
-OAuth Client Id.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_id
-- Env Var: RCLONE_ACD_CLIENT_ID
-- Type: string
-- Required: false
-
---acd-client-secret
-
-OAuth Client Secret.
-
-Leave blank normally.
-
-Properties:
-
-- Config: client_secret
-- Env Var: RCLONE_ACD_CLIENT_SECRET
-- Type: string
-- Required: false
-
Advanced options
-Here are the Advanced options specific to amazon cloud drive (Amazon
-Drive).
+Here are the Advanced options specific to alias (Alias for an existing
+remote).
---acd-token
+--alias-description
-OAuth Access Token as a JSON blob.
+Description of the remote
Properties:
-- Config: token
-- Env Var: RCLONE_ACD_TOKEN
+- Config: description
+- Env Var: RCLONE_ALIAS_DESCRIPTION
- Type: string
- Required: false
---acd-auth-url
-
-Auth server URL.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: auth_url
-- Env Var: RCLONE_ACD_AUTH_URL
-- Type: string
-- Required: false
-
---acd-token-url
-
-Token server url.
-
-Leave blank to use the provider defaults.
-
-Properties:
-
-- Config: token_url
-- Env Var: RCLONE_ACD_TOKEN_URL
-- Type: string
-- Required: false
-
---acd-checkpoint
-
-Checkpoint for internal polling (debug).
-
-Properties:
-
-- Config: checkpoint
-- Env Var: RCLONE_ACD_CHECKPOINT
-- Type: string
-- Required: false
-
---acd-upload-wait-per-gb
-
-Additional time per GiB to wait after a failed complete upload to see if
-it appears.
-
-Sometimes Amazon Drive gives an error when a file has been fully
-uploaded but the file appears anyway after a little while. This happens
-sometimes for files over 1 GiB in size and nearly every time for files
-bigger than 10 GiB. This parameter controls the time rclone waits for
-the file to appear.
-
-The default value for this parameter is 3 minutes per GiB, so by default
-it will wait 3 minutes for every GiB uploaded to see if the file
-appears.
-
-You can disable this feature by setting it to 0. This may cause conflict
-errors as rclone retries the failed upload but the file will most likely
-appear correctly eventually.
-
-These values were determined empirically by observing lots of uploads of
-big files for a range of file sizes.
-
-Upload with the "-v" flag to see more info about what rclone is doing in
-this situation.
-
-Properties:
-
-- Config: upload_wait_per_gb
-- Env Var: RCLONE_ACD_UPLOAD_WAIT_PER_GB
-- Type: Duration
-- Default: 3m0s
-
---acd-templink-threshold
-
-Files >= this size will be downloaded via their tempLink.
-
-Files this size or more will be downloaded via their "tempLink". This is
-to work around a problem with Amazon Drive which blocks downloads of
-files bigger than about 10 GiB. The default for this is 9 GiB which
-shouldn't need to be changed.
-
-To download files above this threshold, rclone requests a "tempLink"
-which downloads the file through a temporary URL directly from the
-underlying S3 storage.
-
-Properties:
-
-- Config: templink_threshold
-- Env Var: RCLONE_ACD_TEMPLINK_THRESHOLD
-- Type: SizeSuffix
-- Default: 9Gi
-
---acd-encoding
-
-The encoding for the backend.
-
-See the encoding section in the overview for more info.
-
-Properties:
-
-- Config: encoding
-- Env Var: RCLONE_ACD_ENCODING
-- Type: Encoding
-- Default: Slash,InvalidUtf8,Dot
-
-Limitations
-
-Note that Amazon Drive is case insensitive so you can't have a file
-called "Hello.doc" and one called "hello.doc".
-
-Amazon Drive has rate limiting so you may notice errors in the sync (429
-errors). rclone will automatically retry the sync up to 3 times by
-default (see --retries flag) which should hopefully work around this
-problem.
-
-Amazon Drive has an internal limit of file sizes that can be uploaded to
-the service. This limit is not officially published, but all files
-larger than this will fail.
-
-At the time of writing (Jan 2016) is in the area of 50 GiB per file.
-This means that larger files are likely to fail.
-
-Unfortunately there is no way for rclone to see that this failure is
-because of file size, so it will retry the operation, as any other
-failure. To avoid this problem, use --max-size 50000M option to limit
-the maximum size of uploaded files. Note that --max-size does not split
-files into segments, it only ignores files over this size.
-
-rclone about is not supported by the Amazon Drive backend. Backends
-without this capability cannot determine free space for an rclone mount
-or use policy mfs (most free space) as a member of an rclone union
-remote.
-
-See List of backends that do not support rclone about and rclone about
-
Amazon S3 Storage Providers
The S3 backend can be used with a number of different providers:
@@ -21103,7 +22853,7 @@ This will guide you through an interactive setup process.
Type of storage to configure.
Choose a number from below, or type in your own value
[snip]
- XX / Amazon S3 Compliant Storage Providers including AWS, Ceph, ChinaMobile, ArvanCloud, Dreamhost, IBM COS, Liara, Minio, and Tencent COS
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -21602,6 +23352,7 @@ permissions are required to be available on the bucket being written to:
- GetObject
- PutObject
- PutObjectACL
+- CreateBucket (unless using s3-no-check-bucket)
When using the lsd subcommand, the ListAllMyBuckets permission is
required.
@@ -21642,6 +23393,8 @@ Notes on above:
that USER_NAME has been created.
2. The Resource entry must include both resource ARNs, as one implies
the bucket and the other implies the bucket's objects.
+3. When using s3-no-check-bucket and the bucket already exsits, the
+ "arn:aws:s3:::BUCKET_NAME" doesn't have to be included.
For reference, here's an Ansible script that will generate one or more
buckets that will work with rclone sync.
@@ -22397,10 +24150,10 @@ Properties:
--s3-upload-concurrency
-Concurrency for multipart uploads.
+Concurrency for multipart uploads and copies.
This is the number of chunks of the same file that are uploaded
-concurrently.
+concurrently for multipart uploads and copies.
If you are uploading small numbers of large files over high-speed links
and these uploads do not fully utilize your bandwidth, then increasing
@@ -22448,6 +24201,19 @@ Properties:
- Type: bool
- Default: false
+--s3-use-dual-stack
+
+If true use AWS S3 dual-stack endpoint (IPv6 support).
+
+See AWS Docs on Dualstack Endpoints
+
+Properties:
+
+- Config: use_dual_stack
+- Env Var: RCLONE_S3_USE_DUAL_STACK
+- Type: bool
+- Default: false
+
--s3-use-accelerate-endpoint
If true use the AWS S3 accelerated endpoint.
@@ -22748,6 +24514,25 @@ Properties:
- Type: Time
- Default: off
+--s3-version-deleted
+
+Show deleted file markers when using versions.
+
+This shows deleted file markers in the listing when using versions.
+These will appear as 0 size files. The only operation which can be
+performed on them is deletion.
+
+Deleting a delete marker will reveal the previous version.
+
+Deleted files will always show with a timestamp.
+
+Properties:
+
+- Config: version_deleted
+- Env Var: RCLONE_S3_VERSION_DELETED
+- Type: bool
+- Default: false
+
--s3-decompress
If set this will decompress gzip encoded objects.
@@ -22894,6 +24679,17 @@ Properties:
- Type: Tristate
- Default: unset
+--s3-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_S3_DESCRIPTION
+- Type: string
+- Required: false
+
Metadata
User metadata is stored as x-amz-meta- keys. S3 metadata keys are case
@@ -23473,10 +25269,10 @@ Or you can also configure via the interactive command line:
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
- Storage> 5
+ Storage> s3
Option provider.
Choose your S3 provider.
Choose a number from below, or type in your own value.
@@ -23599,18 +25395,11 @@ To configure access to IBM COS S3, follow the steps below:
3. Select "s3" storage.
Choose a number from below, or type in your own value
- 1 / Alias for an existing remote
- \ "alias"
- 2 / Amazon Drive
- \ "amazon cloud drive"
- 3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, ChinaMobile, Liara, ArvanCloud, Minio, IBM COS)
- \ "s3"
- 4 / Backblaze B2
- \ "b2"
[snip]
- 23 / HTTP
- \ "http"
- Storage> 3
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
+ \ "s3"
+ [snip]
+ Storage> s3
4. Select IBM COS as the S3 Storage Provider.
@@ -23764,7 +25553,7 @@ This will guide you through an interactive setup process.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -23870,7 +25659,7 @@ Type s3 to choose the connection type:
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS and Wasabi
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -24096,15 +25885,8 @@ To configure access to Qiniu Kodo, follow the steps below:
3. Select s3 storage.
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+ [snip]
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -24364,7 +26146,7 @@ Choose s3 backend
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- XX / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -24638,7 +26420,7 @@ This will guide you through an interactive setup process.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
[snip]
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -24746,7 +26528,7 @@ This will guide you through an interactive setup process.
Type of storage to configure.
Choose a number from below, or type in your own value.
...
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Lyve Cloud, Minio, RackCorp, SeaweedFS, and Tencent COS
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
...
Storage> s3
@@ -24998,15 +26780,8 @@ To configure access to Leviia, follow the steps below:
3. Select s3 storage.
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ (fichier)
- 2 / Akamai NetStorage
- \ (netstorage)
- 3 / Alias for an existing remote
- \ (alias)
- 4 / Amazon Drive
- \ (amazon cloud drive)
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, Liara, Lyve Cloud, Minio, Netease, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+ [snip]
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ (s3)
[snip]
Storage> s3
@@ -25207,7 +26982,7 @@ This will guide you through an interactive setup process.
Type of storage to configure.
Choose a number from below, or type in your own value.
[snip]
- X / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...Linode, ...and others
\ (s3)
[snip]
Storage> s3
@@ -25444,13 +27219,8 @@ To configure access to Tencent COS, follow the steps below:
3. Select s3 storage.
Choose a number from below, or type in your own value
- 1 / 1Fichier
- \ "fichier"
- 2 / Alias for an existing remote
- \ "alias"
- 3 / Amazon Drive
- \ "amazon cloud drive"
- 4 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, ChinaMobile, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, Liara, Minio, and Tencent COS
+ [snip]
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
[snip]
Storage> s3
@@ -25839,7 +27609,7 @@ This will guide you through an interactive setup process.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
- 5 / Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, China Mobile, Cloudflare, GCS, ArvanCloud, DigitalOcean, Dreamhost, Huawei OBS, IBM COS, IDrive e2, IONOS Cloud, Liara, Lyve Cloud, Minio, Netease, Petabox, RackCorp, Scaleway, SeaweedFS, StackPath, Storj, Synology, Tencent COS, Qiniu and Wasabi
+ XX / Amazon S3 Compliant Storage Providers including AWS, ...
\ "s3"
Storage> s3
@@ -26445,9 +28215,12 @@ https://f002.backblazeb2.com/file/bucket/path/folder/file3?Authorization=xxxxxxx
#### --b2-download-auth-duration
- Time before the authorization token will expire in s or suffix ms|s|m|h|d.
+ Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
+
+ This is used in combination with "rclone link" for making files
+ accessible to the public and sets the duration before the download
+ authorization token will expire.
- The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.
Properties:
@@ -26523,6 +28296,17 @@ https://f002.backblazeb2.com/file/bucket/path/folder/file3?Authorization=xxxxxxx
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+ #### --b2-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_B2_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Backend commands
Here are the commands specific to the b2 backend.
@@ -27015,6 +28799,17 @@ c) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,RightSpace,InvalidUtf8,Dot
+ #### --box-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_BOX_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -27674,6 +29469,17 @@ dummyusername plex_password = *** ENCRYPTED *** chunk_size = 5M info_age
- Type: Duration
- Default: 1s
+ #### --cache-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_CACHE_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Backend commands
Here are the commands specific to the cache backend.
@@ -28137,6 +29943,17 @@ this remote y/e/d> y
- If meta format is set to "none", rename transactions will always be used.
- This method is EXPERIMENTAL, don't use on production systems.
+ #### --chunker-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_CHUNKER_DESCRIPTION
+ - Type: string
+ - Required: false
+
# Citrix ShareFile
@@ -28409,6 +30226,17 @@ this remote y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,LeftPeriod,RightSpace,RightPeriod,InvalidUtf8,Dot
+ #### --sharefile-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_SHAREFILE_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -28929,6 +30757,22 @@ subdir/file2.txt.bin 58 subdir/subsubdir/file4.txt.bin 55 file1.txt.bin
- Type: bool
- Default: false
+ #### --crypt-strict-names
+
+ If set, this will raise an error when crypt comes across a filename that can't be decrypted.
+
+ (By default, rclone will just log a NOTICE and continue as normal.)
+ This can happen if encrypted and unencrypted files are stored in the same
+ directory (which is not recommended.) It may also indicate a more serious
+ problem that should be investigated.
+
+ Properties:
+
+ - Config: strict_names
+ - Env Var: RCLONE_CRYPT_STRICT_NAMES
+ - Type: bool
+ - Default: false
+
#### --crypt-filename-encoding
How to encode the encrypted filename to text string.
@@ -28966,6 +30810,17 @@ subdir/file2.txt.bin 58 subdir/subsubdir/file4.txt.bin 55 file1.txt.bin
- Type: string
- Default: ".bin"
+ #### --crypt-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_CRYPT_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -29134,7 +30989,7 @@ subdir/file2.txt.bin 58 subdir/subsubdir/file4.txt.bin 55 file1.txt.bin
* we strip the padding character `=`
`base32` is used rather than the more efficient `base64` so rclone can be
- used on case insensitive remotes (e.g. Windows, Amazon Drive).
+ used on case insensitive remotes (e.g. Windows, Box, Dropbox, Onedrive etc).
### Key derivation
@@ -29280,6 +31135,17 @@ y/e/d> y
- Type: SizeSuffix
- Default: 20Mi
+ #### --compress-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_COMPRESS_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -29402,6 +31268,21 @@ files=drive:important/files -------------------- y) Yes this is OK
- Type: SpaceSepList
- Default:
+ ### Advanced options
+
+ Here are the Advanced options specific to combine (Combine several remotes into one).
+
+ #### --combine-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_COMBINE_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -29843,6 +31724,17 @@ s) Delete this remote y/e/d> y
- Type: Duration
- Default: 10m0s
+ #### --dropbox-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_DROPBOX_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -30135,6 +32027,17 @@ $ rclone lsf --dirs-only -Fip --csv filefabric: 120673758,Burnt PDFs/
- Type: Encoding
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
+ #### --filefabric-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_FILEFABRIC_DESCRIPTION
+ - Type: string
+ - Required: false
+
# FTP
@@ -30551,6 +32454,17 @@ this remote y/e/d> y
- "Ctl,LeftPeriod,Slash"
- VsFTPd can't handle file names starting with dot
+ #### --ftp-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_FTP_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -31243,6 +33157,17 @@ c) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,CrLf,InvalidUtf8,Dot
+ #### --gcs-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_GCS_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -32577,10 +34502,23 @@ rclone lsjson -vv -R --checkers=6 gdrive:folder
- "true"
- Get GCP IAM credentials from the environment (env vars or IAM).
+ #### --drive-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_DRIVE_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
User metadata is stored in the properties field of the drive object.
+ Metadata is supported on files and directories.
+
Here are the possible system metadata items for the drive backend.
| Name | Help | Type | Example | Read Only |
@@ -33224,6 +35162,14 @@ will count towards storage in your Google Account.
- Config: batch_commit_timeout - Env Var: RCLONE_GPHOTOS_BATCH_COMMIT_TIMEOUT - Type: Duration - Default: 10m0s
+ #### --gphotos-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description - Env Var: RCLONE_GPHOTOS_DESCRIPTION - Type: string - Required: false
+
## Limitations
Only images and videos can be uploaded. If you attempt to upload non videos or images or formats that Google Photos doesn't understand, rclone will upload the file, then Google Photos will give an error when it is put turned into a media item.
@@ -33452,6 +35398,17 @@ remote:/path/to/sum.sha1
- Type: SizeSuffix
- Default: 0
+ #### --hasher-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_HASHER_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -33766,6 +35723,17 @@ docker run --rm --name "rclone-hdfs" -p 127.0.0.1:9866:9866 -p
- Type: Encoding
- Default: Slash,Colon,Del,Ctl,InvalidUtf8,Dot
+ #### --hdfs-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_HDFS_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -34164,6 +36132,17 @@ Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,Dot
+ #### --hidrive-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_HIDRIVE_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -34376,6 +36355,17 @@ k) Quit config e/n/d/r/c/s/q> q
- Type: bool
- Default: false
+ #### --http-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_HTTP_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Backend commands
Here are the commands specific to the http backend.
@@ -34591,6 +36581,17 @@ rclone ls imagekit-media-library:directory
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Dollar,Question,Hash,Percent,BackSlash,Del,Ctl,InvalidUtf8,Dot,SquareBracket
+ #### --imagekit-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_IMAGEKIT_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Any metadata supported by the underlying remote is read and written.
@@ -34838,6 +36839,17 @@ remote d) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,CrLf,Del,Ctl,InvalidUtf8,Dot
+ #### --internetarchive-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_INTERNETARCHIVE_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Metadata fields provided by Internet Archive.
@@ -35265,6 +37277,17 @@ Edit this remote d) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,Del,Ctl,InvalidUtf8,Dot
+ #### --jottacloud-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_JOTTACLOUD_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Metadata
Jottacloud has limited support for metadata, currently an extended set of timestamps.
@@ -35469,6 +37492,17 @@ is OK (default) e) Edit this remote d) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+ #### --koofr-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_KOOFR_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -35590,6 +37624,21 @@ remote d) Delete this remote y/e/d> y
- Type: string
- Required: true
+ ### Advanced options
+
+ Here are the Advanced options specific to linkbox (Linkbox).
+
+ #### --linkbox-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_LINKBOX_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -35969,6 +38018,17 @@ this remote d) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,InvalidUtf8,Dot
+ #### --mailru-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_MAILRU_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Limitations
@@ -36227,6 +38287,17 @@ me@example.com:/$
- Type: Encoding
- Default: Slash,InvalidUtf8,Dot
+ #### --mega-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_MEGA_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Process `killed`
@@ -36292,6 +38363,21 @@ a) Delete this remote y/e/d> y
set](https://rclone.org/overview/#restricted-characters).
+ ### Advanced options
+
+ Here are the Advanced options specific to memory (In memory object storage system.).
+
+ #### --memory-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_MEMORY_DESCRIPTION
+ - Type: string
+ - Required: false
+
# Akamai NetStorage
@@ -36504,6 +38590,17 @@ Edit this remote d) Delete this remote y/e/d> y
- "https"
- HTTPS protocol
+ #### --netstorage-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_NETSTORAGE_DESCRIPTION
+ - Type: string
+ - Required: false
+
## Backend commands
Here are the commands specific to the netstorage backend.
@@ -37347,6 +39444,35 @@ Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y
- Type: bool
- Default: false
+ #### --azureblob-delete-snapshots
+
+ Set to specify how to deal with snapshots on blob deletion.
+
+ Properties:
+
+ - Config: delete_snapshots
+ - Env Var: RCLONE_AZUREBLOB_DELETE_SNAPSHOTS
+ - Type: string
+ - Required: false
+ - Choices:
+ - ""
+ - By default, the delete operation fails if a blob has snapshots
+ - "include"
+ - Specify 'include' to remove the root blob and all its snapshots
+ - "only"
+ - Specify 'only' to remove only the snapshots but keep the root blob.
+
+ #### --azureblob-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_AZUREBLOB_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Custom upload headers
@@ -38043,6 +40169,17 @@ Delete this remote y/e/d>
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,RightPeriod,InvalidUtf8,Dot
+ #### --azurefiles-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_AZUREFILES_DESCRIPTION
+ - Type: string
+ - Required: false
+
### Custom upload headers
@@ -38652,7 +40789,7 @@ e) Delete this remote y/e/d> y
If set rclone will use delta listing to implement recursive listings.
- If this flag is set the the onedrive backend will advertise `ListR`
+ If this flag is set the onedrive backend will advertise `ListR`
support for recursive listings.
Setting this flag speeds up these things greatly:
@@ -38685,6 +40822,30 @@ e) Delete this remote y/e/d> y
- Type: bool
- Default: false
+ #### --onedrive-metadata-permissions
+
+ Control whether permissions should be read or written in metadata.
+
+ Reading permissions metadata from files can be done quickly, but it
+ isn't always desirable to set the permissions from the metadata.
+
+
+ Properties:
+
+ - Config: metadata_permissions
+ - Env Var: RCLONE_ONEDRIVE_METADATA_PERMISSIONS
+ - Type: Bits
+ - Default: off
+ - Examples:
+ - "off"
+ - Do not read or write the value
+ - "read"
+ - Read the value only
+ - "write"
+ - Write the value only
+ - "read,write"
+ - Read and Write the value.
+
#### --onedrive-encoding
The encoding for the backend.
@@ -38698,4470 +40859,5304 @@ e) Delete this remote y/e/d> y
- Type: Encoding
- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftSpace,LeftTilde,RightSpace,RightPeriod,InvalidUtf8,Dot
+ #### --onedrive-description
+
+ Description of the remote
+
+ Properties:
+
+ - Config: description
+ - Env Var: RCLONE_ONEDRIVE_DESCRIPTION
+ - Type: string
+ - Required: false
+
+ ### Metadata
+
+ OneDrive supports System Metadata (not User Metadata, as of this writing) for
+ both files and directories. Much of the metadata is read-only, and there are some
+ differences between OneDrive Personal and Business (see table below for
+ details).
+
+ Permissions are also supported, if `--onedrive-metadata-permissions` is set. The
+ accepted values for `--onedrive-metadata-permissions` are `read`, `write`,
+ `read,write`, and `off` (the default). `write` supports adding new permissions,
+ updating the "role" of existing permissions, and removing permissions. Updating
+ and removing require the Permission ID to be known, so it is recommended to use
+ `read,write` instead of `write` if you wish to update/remove permissions.
+
+ Permissions are read/written in JSON format using the same schema as the
+ [OneDrive API](https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online),
+ which differs slightly between OneDrive Personal and Business.
+
+ Example for OneDrive Personal:
+ ```json
+ [
+ {
+ "id": "1234567890ABC!123",
+ "grantedTo": {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ },
+ "invitation": {
+ "email": "ryan@contoso.com"
+ },
+ "link": {
+ "webUrl": "https://1drv.ms/t/s!1234567890ABC"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "s!1234567890ABC"
+ }
+ ]
+
+Example for OneDrive Business:
+
+ [
+ {
+ "id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
+ "grantedToIdentities": [
+ {
+ "user": {
+ "displayName": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "link": {
+ "type": "view",
+ "scope": "users",
+ "webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
+ },
+ "roles": [
+ "read"
+ ],
+ "shareId": "u!LKj1lkdlals90j1nlkascl"
+ },
+ {
+ "id": "5D33DD65C6932946",
+ "grantedTo": {
+ "user": {
+ "displayName": "John Doe",
+ "id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
+ },
+ "application": {},
+ "device": {}
+ },
+ "roles": [
+ "owner"
+ ],
+ "shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
+ }
+ ]
+
+To write permissions, pass in a "permissions" metadata key using this
+same format. The --metadata-mapper tool can be very helpful for this.
+
+When adding permissions, an email address can be provided in the User.ID
+or DisplayName properties of grantedTo or grantedToIdentities.
+Alternatively, an ObjectID can be provided in User.ID. At least one
+valid recipient must be provided in order to add a permission for a
+user. Creating a Public Link is also supported, if Link.Scope is set to
+"anonymous".
+
+Example request to add a "read" permission:
+
+ [
+ {
+ "id": "",
+ "grantedTo": {
+ "user": {},
+ "application": {},
+ "device": {}
+ },
+ "grantedToIdentities": [
+ {
+ "user": {
+ "id": "ryan@contoso.com"
+ },
+ "application": {},
+ "device": {}
+ }
+ ],
+ "roles": [
+ "read"
+ ]
+ }
+ ]
+
+Note that adding a permission can fail if a conflicting permission
+already exists for the file/folder.
+
+To update an existing permission, include both the Permission ID and the
+new roles to be assigned. roles is the only property that can be
+changed.
+
+To remove permissions, pass in a blob containing only the permissions
+you wish to keep (which can be empty, to remove all.)
+
+Note that both reading and writing permissions requires extra API calls,
+so if you don't need to read or write permissions it is recommended to
+omit --onedrive-metadata-permissions.
+
+Metadata and permissions are supported for Folders (directories) as well
+as Files. Note that setting the mtime or btime on a Folder requires one
+extra API call on OneDrive Business only.
+
+OneDrive does not currently support User Metadata. When writing
+metadata, only writeable system properties will be written -- any
+read-only or unrecognized keys passed in will be ignored.
+
+TIP: to see the metadata and permissions for any file or folder, run:
+
+ rclone lsjson remote:path --stat -M --onedrive-metadata-permissions read
+
+Here are the possible system metadata items for the onedrive backend.
+
+ ------------------------------------------------------------------------------------------------------------------------------------------
+ Name Help Type Example Read Only
+ ------------------------------- ---------------------------------- ----------- -------------------------------------- --------------------
+ btime Time of file birth (creation) with RFC 3339 2006-01-02T15:04:05Z N
+ S accuracy (mS for OneDrive
+ Personal).
+
+ content-type The MIME type of the file. string text/plain Y
+
+ created-by-display-name Display name of the user that string John Doe Y
+ created the item.
+
+ created-by-id ID of the user that created the string 48d31887-5fad-4d73-a9f5-3c356e68a038 Y
+ item.
+
+ description A short description of the file. string Contract for signing N
+ Max 1024 characters. Only
+ supported for OneDrive Personal.
+
+ id The unique identifier of the item string 01BYE5RZ6QN3ZWBTUFOFD3GSPGOHDJD36K Y
+ within OneDrive.
+
+ last-modified-by-display-name Display name of the user that last string John Doe Y
+ modified the item.
+
+ last-modified-by-id ID of the user that last modified string 48d31887-5fad-4d73-a9f5-3c356e68a038 Y
+ the item.
+
+ malware-detected Whether OneDrive has detected that boolean true Y
+ the item contains malware.
+
+ mtime Time of last modification with S RFC 3339 2006-01-02T15:04:05Z N
+ accuracy (mS for OneDrive
+ Personal).
+ package-type If present, indicates that this string oneNote Y
+ item is a package instead of a
+ folder or file. Packages are
+ treated like files in some
+ contexts and folders in others.
- ## Limitations
+ permissions Permissions in a JSON dump of JSON {} N
+ OneDrive format. Enable with
+ --onedrive-metadata-permissions.
+ Properties: id, grantedTo,
+ grantedToIdentities, invitation,
+ inheritedFrom, link, roles,
+ shareId
- If you don't use rclone for 90 days the refresh token will
- expire. This will result in authorization problems. This is easy to
- fix by running the `rclone config reconnect remote:` command to get a
- new token and refresh token.
+ shared-by-id ID of the user that shared the string 48d31887-5fad-4d73-a9f5-3c356e68a038 Y
+ item (if shared).
+
+ shared-owner-id ID of the owner of the shared item string 48d31887-5fad-4d73-a9f5-3c356e68a038 Y
+ (if shared).
- ### Naming
+ shared-scope If shared, indicates the scope of string users Y
+ how the item is shared: anonymous,
+ organization, or users.
- Note that OneDrive is case insensitive so you can't have a
- file called "Hello.doc" and one called "hello.doc".
+ shared-time Time when the item was shared, RFC 3339 2006-01-02T15:04:05Z Y
+ with S accuracy (mS for OneDrive
+ Personal).
- There are quite a few characters that can't be in OneDrive file
- names. These can't occur on Windows platforms, but on non-Windows
- platforms they are common. Rclone will map these names to and from an
- identical looking unicode equivalent. For example if a file has a `?`
- in it will be mapped to `?` instead.
+ utime Time of upload with S accuracy (mS RFC 3339 2006-01-02T15:04:05Z Y
+ for OneDrive Personal).
+ ------------------------------------------------------------------------------------------------------------------------------------------
- ### File sizes
+See the metadata docs for more info.
- The largest allowed file size is 250 GiB for both OneDrive Personal and OneDrive for Business [(Updated 13 Jan 2021)](https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#individualfilesize).
+Limitations
- ### Path length
+If you don't use rclone for 90 days the refresh token will expire. This
+will result in authorization problems. This is easy to fix by running
+the rclone config reconnect remote: command to get a new token and
+refresh token.
- The entire path, including the file name, must contain fewer than 400 characters for OneDrive, OneDrive for Business and SharePoint Online. If you are encrypting file and folder names with rclone, you may want to pay attention to this limitation because the encrypted names are typically longer than the original ones.
+Naming
- ### Number of files
+Note that OneDrive is case insensitive so you can't have a file called
+"Hello.doc" and one called "hello.doc".
- OneDrive seems to be OK with at least 50,000 files in a folder, but at
- 100,000 rclone will get errors listing the directory like `couldn’t
- list files: UnknownError:`. See
- [#2707](https://github.com/rclone/rclone/issues/2707) for more info.
+There are quite a few characters that can't be in OneDrive file names.
+These can't occur on Windows platforms, but on non-Windows platforms
+they are common. Rclone will map these names to and from an identical
+looking unicode equivalent. For example if a file has a ? in it will be
+mapped to ? instead.
- An official document about the limitations for different types of OneDrive can be found [here](https://support.office.com/en-us/article/invalid-file-names-and-file-types-in-onedrive-onedrive-for-business-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa).
+File sizes
- ## Versions
+The largest allowed file size is 250 GiB for both OneDrive Personal and
+OneDrive for Business (Updated 13 Jan 2021).
- Every change in a file OneDrive causes the service to create a new
- version of the file. This counts against a users quota. For
- example changing the modification time of a file creates a second
- version, so the file apparently uses twice the space.
+Path length
- For example the `copy` command is affected by this as rclone copies
- the file and then afterwards sets the modification time to match the
- source file which uses another version.
+The entire path, including the file name, must contain fewer than 400
+characters for OneDrive, OneDrive for Business and SharePoint Online. If
+you are encrypting file and folder names with rclone, you may want to
+pay attention to this limitation because the encrypted names are
+typically longer than the original ones.
- You can use the `rclone cleanup` command (see below) to remove all old
- versions.
+Number of files
- Or you can set the `no_versions` parameter to `true` and rclone will
- remove versions after operations which create new versions. This takes
- extra transactions so only enable it if you need it.
+OneDrive seems to be OK with at least 50,000 files in a folder, but at
+100,000 rclone will get errors listing the directory like
+couldn’t list files: UnknownError:. See #2707 for more info.
- **Note** At the time of writing Onedrive Personal creates versions
- (but not for setting the modification time) but the API for removing
- them returns "API not found" so cleanup and `no_versions` should not
- be used on Onedrive Personal.
+An official document about the limitations for different types of
+OneDrive can be found here.
- ### Disabling versioning
+Versions
- Starting October 2018, users will no longer be able to
- disable versioning by default. This is because Microsoft has brought
- an
- [update](https://techcommunity.microsoft.com/t5/Microsoft-OneDrive-Blog/New-Updates-to-OneDrive-and-SharePoint-Team-Site-Versioning/ba-p/204390)
- to the mechanism. To change this new default setting, a PowerShell
- command is required to be run by a SharePoint admin. If you are an
- admin, you can run these commands in PowerShell to change that
- setting:
+Every change in a file OneDrive causes the service to create a new
+version of the file. This counts against a users quota. For example
+changing the modification time of a file creates a second version, so
+the file apparently uses twice the space.
- 1. `Install-Module -Name Microsoft.Online.SharePoint.PowerShell` (in case you haven't installed this already)
- 2. `Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking`
- 3. `Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM` (replacing `YOURSITE`, `YOU`, `YOURSITE.COM` with the actual values; this will prompt for your credentials)
- 4. `Set-SPOTenant -EnableMinimumVersionRequirement $False`
- 5. `Disconnect-SPOService` (to disconnect from the server)
+For example the copy command is affected by this as rclone copies the
+file and then afterwards sets the modification time to match the source
+file which uses another version.
- *Below are the steps for normal users to disable versioning. If you don't see the "No Versioning" option, make sure the above requirements are met.*
+You can use the rclone cleanup command (see below) to remove all old
+versions.
+
+Or you can set the no_versions parameter to true and rclone will remove
+versions after operations which create new versions. This takes extra
+transactions so only enable it if you need it.
+
+Note At the time of writing Onedrive Personal creates versions (but not
+for setting the modification time) but the API for removing them returns
+"API not found" so cleanup and no_versions should not be used on
+Onedrive Personal.
+
+Disabling versioning
+
+Starting October 2018, users will no longer be able to disable
+versioning by default. This is because Microsoft has brought an update
+to the mechanism. To change this new default setting, a PowerShell
+command is required to be run by a SharePoint admin. If you are an
+admin, you can run these commands in PowerShell to change that setting:
- User [Weropol](https://github.com/Weropol) has found a method to disable
- versioning on OneDrive
+1. Install-Module -Name Microsoft.Online.SharePoint.PowerShell (in case
+ you haven't installed this already)
+2. Import-Module Microsoft.Online.SharePoint.PowerShell -DisableNameChecking
+3. Connect-SPOService -Url https://YOURSITE-admin.sharepoint.com -Credential YOU@YOURSITE.COM
+ (replacing YOURSITE, YOU, YOURSITE.COM with the actual values; this
+ will prompt for your credentials)
+4. Set-SPOTenant -EnableMinimumVersionRequirement $False
+5. Disconnect-SPOService (to disconnect from the server)
- 1. Open the settings menu by clicking on the gear symbol at the top of the OneDrive Business page.
- 2. Click Site settings.
- 3. Once on the Site settings page, navigate to Site Administration > Site libraries and lists.
- 4. Click Customize "Documents".
- 5. Click General Settings > Versioning Settings.
- 6. Under Document Version History select the option No versioning.
- Note: This will disable the creation of new file versions, but will not remove any previous versions. Your documents are safe.
- 7. Apply the changes by clicking OK.
- 8. Use rclone to upload or modify files. (I also use the --no-update-modtime flag)
- 9. Restore the versioning settings after using rclone. (Optional)
+Below are the steps for normal users to disable versioning. If you don't
+see the "No Versioning" option, make sure the above requirements are
+met.
- ## Cleanup
+User Weropol has found a method to disable versioning on OneDrive
- OneDrive supports `rclone cleanup` which causes rclone to look through
- every file under the path supplied and delete all version but the
- current version. Because this involves traversing all the files, then
- querying each file for versions it can be quite slow. Rclone does
- `--checkers` tests in parallel. The command also supports `--interactive`/`i`
- or `--dry-run` which is a great way to see what it would do.
+1. Open the settings menu by clicking on the gear symbol at the top of
+ the OneDrive Business page.
+2. Click Site settings.
+3. Once on the Site settings page, navigate to Site Administration >
+ Site libraries and lists.
+4. Click Customize "Documents".
+5. Click General Settings > Versioning Settings.
+6. Under Document Version History select the option No versioning.
+ Note: This will disable the creation of new file versions, but will
+ not remove any previous versions. Your documents are safe.
+7. Apply the changes by clicking OK.
+8. Use rclone to upload or modify files. (I also use the
+ --no-update-modtime flag)
+9. Restore the versioning settings after using rclone. (Optional)
+
+Cleanup
+
+OneDrive supports rclone cleanup which causes rclone to look through
+every file under the path supplied and delete all version but the
+current version. Because this involves traversing all the files, then
+querying each file for versions it can be quite slow. Rclone does
+--checkers tests in parallel. The command also supports --interactive/i
+or --dry-run which is a great way to see what it would do.
+
+ rclone cleanup --interactive remote:path/subdir # interactively remove all old version for path/subdir
+ rclone cleanup remote:path/subdir # unconditionally remove all old version for path/subdir
+
+NB Onedrive personal can't currently delete versions
+
+Troubleshooting
+
+Excessive throttling or blocked on SharePoint
+
+If you experience excessive throttling or is being blocked on SharePoint
+then it may help to set the user agent explicitly with a flag like this:
+--user-agent "ISV|rclone.org|rclone/v1.55.1"
+
+The specific details can be found in the Microsoft document: Avoid
+getting throttled or blocked in SharePoint Online
+
+Unexpected file size/hash differences on Sharepoint
+
+It is a known issue that Sharepoint (not OneDrive or OneDrive for
+Business) silently modifies uploaded files, mainly Office files (.docx,
+.xlsx, etc.), causing file size and hash checks to fail. There are also
+other situations that will cause OneDrive to report inconsistent file
+sizes. To use rclone with such affected files on Sharepoint, you may
+disable these checks with the following command line arguments:
+
+ --ignore-checksum --ignore-size
+
+Alternatively, if you have write access to the OneDrive files, it may be
+possible to fix this problem for certain files, by attempting the steps
+below. Open the web interface for OneDrive and find the affected files
+(which will be in the error messages/log for rclone). Simply click on
+each of these files, causing OneDrive to open them on the web. This will
+cause each file to be converted in place to a format that is
+functionally equivalent but which will no longer trigger the size
+discrepancy. Once all problematic files are converted you will no longer
+need the ignore options above.
+
+Replacing/deleting existing files on Sharepoint gets "item not found"
+
+It is a known issue that Sharepoint (not OneDrive or OneDrive for
+Business) may return "item not found" errors when users try to replace
+or delete uploaded files; this seems to mainly affect Office files
+(.docx, .xlsx, etc.) and web files (.html, .aspx, etc.). As a
+workaround, you may use the --backup-dir command line
+argument so rclone moves the files to be replaced/deleted into a given
+backup directory (instead of directly replacing/deleting them). For
+example, to instruct rclone to move the files into the directory
+rclone-backup-dir on backend mysharepoint, you may use:
- rclone cleanup --interactive remote:path/subdir # interactively remove all old version for path/subdir
- rclone cleanup remote:path/subdir # unconditionally remove all old version for path/subdir
+ --backup-dir mysharepoint:rclone-backup-dir
- **NB** Onedrive personal can't currently delete versions
+access_denied (AADSTS65005)
- ## Troubleshooting ##
+ Error: access_denied
+ Code: AADSTS65005
+ Description: Using application 'rclone' is currently not supported for your organization [YOUR_ORGANIZATION] because it is in an unmanaged state. An administrator needs to claim ownership of the company by DNS validation of [YOUR_ORGANIZATION] before the application rclone can be provisioned.
- ### Excessive throttling or blocked on SharePoint
+This means that rclone can't use the OneDrive for Business API with your
+account. You can't do much about it, maybe write an email to your
+admins.
- If you experience excessive throttling or is being blocked on SharePoint then it may help to set the user agent explicitly with a flag like this: `--user-agent "ISV|rclone.org|rclone/v1.55.1"`
+However, there are other ways to interact with your OneDrive account.
+Have a look at the WebDAV backend: https://rclone.org/webdav/#sharepoint
- The specific details can be found in the Microsoft document: [Avoid getting throttled or blocked in SharePoint Online](https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online#how-to-decorate-your-http-traffic-to-avoid-throttling)
+invalid_grant (AADSTS50076)
- ### Unexpected file size/hash differences on Sharepoint ####
+ Error: invalid_grant
+ Code: AADSTS50076
+ Description: Due to a configuration change made by your administrator, or because you moved to a new location, you must use multi-factor authentication to access '...'.
- It is a
- [known](https://github.com/OneDrive/onedrive-api-docs/issues/935#issuecomment-441741631)
- issue that Sharepoint (not OneDrive or OneDrive for Business) silently modifies
- uploaded files, mainly Office files (.docx, .xlsx, etc.), causing file size and
- hash checks to fail. There are also other situations that will cause OneDrive to
- report inconsistent file sizes. To use rclone with such
- affected files on Sharepoint, you
- may disable these checks with the following command line arguments:
+If you see the error above after enabling multi-factor authentication
+for your account, you can fix it by refreshing your OAuth refresh token.
+To do that, run rclone config, and choose to edit your OneDrive backend.
+Then, you don't need to actually make any changes until you reach this
+question: Already have a token - refresh?. For this question, answer y
+and go through the process to refresh your token, just like the first
+time the backend is configured. After this, rclone should work again for
+this backend.
---ignore-checksum --ignore-size
+Invalid request when making public links
+On Sharepoint and OneDrive for Business, rclone link may return an
+"Invalid request" error. A possible cause is that the organisation admin
+didn't allow public links to be made for the organisation/sharepoint
+library. To fix the permissions as an admin, take a look at the docs: 1,
+2.
- Alternatively, if you have write access to the OneDrive files, it may be possible
- to fix this problem for certain files, by attempting the steps below.
- Open the web interface for [OneDrive](https://onedrive.live.com) and find the
- affected files (which will be in the error messages/log for rclone). Simply click on
- each of these files, causing OneDrive to open them on the web. This will cause each
- file to be converted in place to a format that is functionally equivalent
- but which will no longer trigger the size discrepancy. Once all problematic files
- are converted you will no longer need the ignore options above.
+Can not access Shared with me files
- ### Replacing/deleting existing files on Sharepoint gets "item not found" ####
+Shared with me files is not supported by rclone currently, but there is
+a workaround:
- It is a [known](https://github.com/OneDrive/onedrive-api-docs/issues/1068) issue
- that Sharepoint (not OneDrive or OneDrive for Business) may return "item not
- found" errors when users try to replace or delete uploaded files; this seems to
- mainly affect Office files (.docx, .xlsx, etc.) and web files (.html, .aspx, etc.). As a workaround, you may use
- the `--backup-dir ` command line argument so rclone moves the
- files to be replaced/deleted into a given backup directory (instead of directly
- replacing/deleting them). For example, to instruct rclone to move the files into
- the directory `rclone-backup-dir` on backend `mysharepoint`, you may use:
+1. Visit https://onedrive.live.com
+2. Right click a item in Shared, then click Add shortcut to My files in
+ the context [make_shortcut]
+3. The shortcut will appear in My files, you can access it with rclone,
+ it behaves like a normal folder/file. [in_my_files] [rclone_mount]
---backup-dir mysharepoint:rclone-backup-dir
+Live Photos uploaded from iOS (small video clips in .heic files)
+The iOS OneDrive app introduced upload and storage of Live Photos in
+2020. The usage and download of these uploaded Live Photos is
+unfortunately still work-in-progress and this introduces several issues
+when copying, synchronising and mounting – both in rclone and in the
+native OneDrive client on Windows.
- ### access\_denied (AADSTS65005) ####
+The root cause can easily be seen if you locate one of your Live Photos
+in the OneDrive web interface. Then download the photo from the web
+interface. You will then see that the size of downloaded .heic file is
+smaller than the size displayed in the web interface. The downloaded
+file is smaller because it only contains a single frame (still photo)
+extracted from the Live Photo (movie) stored in OneDrive.
-Error: access_denied Code: AADSTS65005 Description: Using application
-'rclone' is currently not supported for your organization
-[YOUR_ORGANIZATION] because it is in an unmanaged state. An
-administrator needs to claim ownership of the company by DNS validation
-of [YOUR_ORGANIZATION] before the application rclone can be provisioned.
+The different sizes will cause rclone copy/sync to repeatedly recopy
+unmodified photos something like this:
+ DEBUG : 20230203_123826234_iOS.heic: Sizes differ (src 4470314 vs dst 1298667)
+ DEBUG : 20230203_123826234_iOS.heic: sha1 = fc2edde7863b7a7c93ca6771498ac797f8460750 OK
+ INFO : 20230203_123826234_iOS.heic: Copied (replaced existing)
- This means that rclone can't use the OneDrive for Business API with your account. You can't do much about it, maybe write an email to your admins.
+These recopies can be worked around by adding --ignore-size. Please note
+that this workaround only syncs the still-picture not the movie clip,
+and relies on modification dates being correctly updated on all files in
+all situations.
- However, there are other ways to interact with your OneDrive account. Have a look at the WebDAV backend: https://rclone.org/webdav/#sharepoint
+The different sizes will also cause rclone check to report size errors
+something like this:
- ### invalid\_grant (AADSTS50076) ####
+ ERROR : 20230203_123826234_iOS.heic: sizes differ
-Error: invalid_grant Code: AADSTS50076 Description: Due to a
-configuration change made by your administrator, or because you moved to
-a new location, you must use multi-factor authentication to access
-'...'.
+These check errors can be suppressed by adding --ignore-size.
+The different sizes will also cause rclone mount to fail downloading
+with an error something like this:
- If you see the error above after enabling multi-factor authentication for your account, you can fix it by refreshing your OAuth refresh token. To do that, run `rclone config`, and choose to edit your OneDrive backend. Then, you don't need to actually make any changes until you reach this question: `Already have a token - refresh?`. For this question, answer `y` and go through the process to refresh your token, just like the first time the backend is configured. After this, rclone should work again for this backend.
+ ERROR : 20230203_123826234_iOS.heic: ReadFileHandle.Read error: low level retry 1/10: unexpected EOF
- ### Invalid request when making public links ####
+or like this when using --cache-mode=full:
- On Sharepoint and OneDrive for Business, `rclone link` may return an "Invalid
- request" error. A possible cause is that the organisation admin didn't allow
- public links to be made for the organisation/sharepoint library. To fix the
- permissions as an admin, take a look at the docs:
- [1](https://docs.microsoft.com/en-us/sharepoint/turn-external-sharing-on-or-off),
- [2](https://support.microsoft.com/en-us/office/set-up-and-manage-access-requests-94b26e0b-2822-49d4-929a-8455698654b3).
+ INFO : 20230203_123826234_iOS.heic: vfs cache: downloader: error count now 1: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
+ ERROR : 20230203_123826234_iOS.heic: vfs cache: failed to download: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
- ### Can not access `Shared` with me files
+OpenDrive
- Shared with me files is not supported by rclone [currently](https://github.com/rclone/rclone/issues/4062), but there is a workaround:
+Paths are specified as remote:path
- 1. Visit [https://onedrive.live.com](https://onedrive.live.com/)
- 2. Right click a item in `Shared`, then click `Add shortcut to My files` in the context
- ![make_shortcut](https://user-images.githubusercontent.com/60313789/206118040-7e762b3b-aa61-41a1-8649-cc18889f3572.png "Screenshot (Shared with me)")
- 3. The shortcut will appear in `My files`, you can access it with rclone, it behaves like a normal folder/file.
- ![in_my_files](https://i.imgur.com/0S8H3li.png "Screenshot (My Files)")
- ![rclone_mount](https://i.imgur.com/2Iq66sW.png "Screenshot (rclone mount)")
+Paths may be as deep as required, e.g. remote:directory/subdirectory.
- ### Live Photos uploaded from iOS (small video clips in .heic files)
+Configuration
- The iOS OneDrive app introduced [upload and storage](https://techcommunity.microsoft.com/t5/microsoft-onedrive-blog/live-photos-come-to-onedrive/ba-p/1953452)
- of [Live Photos](https://support.apple.com/en-gb/HT207310) in 2020.
- The usage and download of these uploaded Live Photos is unfortunately still work-in-progress
- and this introduces several issues when copying, synchronising and mounting – both in rclone and in the native OneDrive client on Windows.
+Here is an example of how to make a remote called remote. First run:
- The root cause can easily be seen if you locate one of your Live Photos in the OneDrive web interface.
- Then download the photo from the web interface. You will then see that the size of downloaded .heic file is smaller than the size displayed in the web interface.
- The downloaded file is smaller because it only contains a single frame (still photo) extracted from the Live Photo (movie) stored in OneDrive.
+ rclone config
- The different sizes will cause `rclone copy/sync` to repeatedly recopy unmodified photos something like this:
+This will guide you through an interactive setup process:
- DEBUG : 20230203_123826234_iOS.heic: Sizes differ (src 4470314 vs dst 1298667)
- DEBUG : 20230203_123826234_iOS.heic: sha1 = fc2edde7863b7a7c93ca6771498ac797f8460750 OK
- INFO : 20230203_123826234_iOS.heic: Copied (replaced existing)
-
- These recopies can be worked around by adding `--ignore-size`. Please note that this workaround only syncs the still-picture not the movie clip,
- and relies on modification dates being correctly updated on all files in all situations.
-
- The different sizes will also cause `rclone check` to report size errors something like this:
-
- ERROR : 20230203_123826234_iOS.heic: sizes differ
-
- These check errors can be suppressed by adding `--ignore-size`.
-
- The different sizes will also cause `rclone mount` to fail downloading with an error something like this:
-
- ERROR : 20230203_123826234_iOS.heic: ReadFileHandle.Read error: low level retry 1/10: unexpected EOF
-
- or like this when using `--cache-mode=full`:
-
- INFO : 20230203_123826234_iOS.heic: vfs cache: downloader: error count now 1: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
- ERROR : 20230203_123826234_iOS.heic: vfs cache: failed to download: vfs reader: failed to write to cache file: 416 Requested Range Not Satisfiable:
-
- # OpenDrive
-
- Paths are specified as `remote:path`
-
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
- ## Configuration
-
- Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
- This will guide you through an interactive setup process:
-
-n) New remote
-o) Delete remote
-p) Quit config e/n/d/q> n name> remote Type of storage to configure.
- Choose a number from below, or type in your own value [snip] XX /
- OpenDrive "opendrive" [snip] Storage> opendrive Username username>
+ n) New remote
+ d) Delete remote
+ q) Quit config
+ e/n/d/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / OpenDrive
+ \ "opendrive"
+ [snip]
+ Storage> opendrive
+ Username
+ username>
Password
-q) Yes type in my own password
-r) Generate random password y/g> y Enter the password: password:
- Confirm the password: password: -------------------- [remote]
- username = password = *** ENCRYPTED *** --------------------
-s) Yes this is OK
-t) Edit this remote
-u) Delete this remote y/e/d> y
+ y) Yes type in my own password
+ g) Generate random password
+ y/g> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ --------------------
+ [remote]
+ username =
+ password = *** ENCRYPTED ***
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+List directories in top level of your OpenDrive
- List directories in top level of your OpenDrive
+ rclone lsd remote:
- rclone lsd remote:
+List all the files in your OpenDrive
- List all the files in your OpenDrive
+ rclone ls remote:
- rclone ls remote:
+To copy a local directory to an OpenDrive directory called backup
- To copy a local directory to an OpenDrive directory called backup
+ rclone copy /home/source remote:backup
- rclone copy /home/source remote:backup
+Modification times and hashes
- ### Modification times and hashes
+OpenDrive allows modification times to be set on objects accurate to 1
+second. These will be used to detect whether objects need syncing or
+not.
- OpenDrive allows modification times to be set on objects accurate to 1
- second. These will be used to detect whether objects need syncing or
- not.
+The MD5 hash algorithm is supported.
- The MD5 hash algorithm is supported.
+Restricted filename characters
- ### Restricted filename characters
+ Character Value Replacement
+ ----------- ------- -------------
+ NUL 0x00 ␀
+ / 0x2F /
+ " 0x22 "
+ * 0x2A *
+ : 0x3A :
+ < 0x3C <
+ > 0x3E >
+ ? 0x3F ?
+ \ 0x5C \
+ | 0x7C |
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | NUL | 0x00 | ␀ |
- | / | 0x2F | / |
- | " | 0x22 | " |
- | * | 0x2A | * |
- | : | 0x3A | : |
- | < | 0x3C | < |
- | > | 0x3E | > |
- | ? | 0x3F | ? |
- | \ | 0x5C | \ |
- | \| | 0x7C | | |
+File names can also not begin or end with the following characters.
+These only get replaced if they are the first or last character in the
+name:
- File names can also not begin or end with the following characters.
- These only get replaced if they are the first or last character in the name:
+ Character Value Replacement
+ ----------- ------- -------------
+ SP 0x20 ␠
+ HT 0x09 ␉
+ LF 0x0A ␊
+ VT 0x0B ␋
+ CR 0x0D ␍
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | SP | 0x20 | ␠ |
- | HT | 0x09 | ␉ |
- | LF | 0x0A | ␊ |
- | VT | 0x0B | ␋ |
- | CR | 0x0D | ␍ |
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
+Standard options
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
+Here are the Standard options specific to opendrive (OpenDrive).
+--opendrive-username
- ### Standard options
+Username.
- Here are the Standard options specific to opendrive (OpenDrive).
+Properties:
- #### --opendrive-username
+- Config: username
+- Env Var: RCLONE_OPENDRIVE_USERNAME
+- Type: string
+- Required: true
- Username.
+--opendrive-password
- Properties:
+Password.
- - Config: username
- - Env Var: RCLONE_OPENDRIVE_USERNAME
- - Type: string
- - Required: true
+NB Input to this must be obscured - see rclone obscure.
- #### --opendrive-password
+Properties:
- Password.
+- Config: password
+- Env Var: RCLONE_OPENDRIVE_PASSWORD
+- Type: string
+- Required: true
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+Advanced options
- Properties:
+Here are the Advanced options specific to opendrive (OpenDrive).
- - Config: password
- - Env Var: RCLONE_OPENDRIVE_PASSWORD
- - Type: string
- - Required: true
+--opendrive-encoding
- ### Advanced options
+The encoding for the backend.
- Here are the Advanced options specific to opendrive (OpenDrive).
+See the encoding section in the overview for more info.
- #### --opendrive-encoding
+Properties:
- The encoding for the backend.
+- Config: encoding
+- Env Var: RCLONE_OPENDRIVE_ENCODING
+- Type: Encoding
+- Default:
+ Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+--opendrive-chunk-size
- Properties:
+Files will be uploaded in chunks this size.
- - Config: encoding
- - Env Var: RCLONE_OPENDRIVE_ENCODING
- - Type: Encoding
- - Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,LeftSpace,LeftCrLfHtVt,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot
+Note that these chunks are buffered in memory so increasing them will
+increase memory use.
- #### --opendrive-chunk-size
+Properties:
- Files will be uploaded in chunks this size.
+- Config: chunk_size
+- Env Var: RCLONE_OPENDRIVE_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 10Mi
- Note that these chunks are buffered in memory so increasing them will
- increase memory use.
+--opendrive-description
- Properties:
+Description of the remote
- - Config: chunk_size
- - Env Var: RCLONE_OPENDRIVE_CHUNK_SIZE
- - Type: SizeSuffix
- - Default: 10Mi
+Properties:
+- Config: description
+- Env Var: RCLONE_OPENDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+Limitations
- ## Limitations
+Note that OpenDrive is case insensitive so you can't have a file called
+"Hello.doc" and one called "hello.doc".
- Note that OpenDrive is case insensitive so you can't have a
- file called "Hello.doc" and one called "hello.doc".
+There are quite a few characters that can't be in OpenDrive file names.
+These can't occur on Windows platforms, but on non-Windows platforms
+they are common. Rclone will map these names to and from an identical
+looking unicode equivalent. For example if a file has a ? in it will be
+mapped to ? instead.
- There are quite a few characters that can't be in OpenDrive file
- names. These can't occur on Windows platforms, but on non-Windows
- platforms they are common. Rclone will map these names to and from an
- identical looking unicode equivalent. For example if a file has a `?`
- in it will be mapped to `?` instead.
+rclone about is not supported by the OpenDrive backend. Backends without
+this capability cannot determine free space for an rclone mount or use
+policy mfs (most free space) as a member of an rclone union remote.
- `rclone about` is not supported by the OpenDrive backend. Backends without
- this capability cannot determine free space for an rclone mount or
- use policy `mfs` (most free space) as a member of an rclone union
- remote.
+See List of backends that do not support rclone about and rclone about
- See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
+Oracle Object Storage
- # Oracle Object Storage
- - [Oracle Object Storage Overview](https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/objectstorageoverview.htm)
- - [Oracle Object Storage FAQ](https://www.oracle.com/cloud/storage/object-storage/faq/)
- - [Oracle Object Storage Limits](https://docs.oracle.com/en-us/iaas/Content/Resources/Assets/whitepapers/oci-object-storage-best-practices.pdf)
+- Oracle Object Storage Overview
+- Oracle Object Storage FAQ
+- Oracle Object Storage Limits
- Paths are specified as `remote:bucket` (or `remote:` for the `lsd` command.) You may put subdirectories in
- too, e.g. `remote:bucket/path/to/dir`.
+Paths are specified as remote:bucket (or remote: for the lsd command.)
+You may put subdirectories in too, e.g. remote:bucket/path/to/dir.
- Sample command to transfer local artifacts to remote:bucket in oracle object storage:
+Sample command to transfer local artifacts to remote:bucket in oracle
+object storage:
- `rclone -vvv --progress --stats-one-line --max-stats-groups 10 --log-format date,time,UTC,longfile --fast-list --buffer-size 256Mi --oos-no-check-bucket --oos-upload-cutoff 10Mi --multi-thread-cutoff 16Mi --multi-thread-streams 3000 --transfers 3000 --checkers 64 --retries 2 --oos-chunk-size 10Mi --oos-upload-concurrency 10000 --oos-attempt-resume-upload --oos-leave-parts-on-error sync ./artifacts remote:bucket -vv`
+rclone -vvv --progress --stats-one-line --max-stats-groups 10 --log-format date,time,UTC,longfile --fast-list --buffer-size 256Mi --oos-no-check-bucket --oos-upload-cutoff 10Mi --multi-thread-cutoff 16Mi --multi-thread-streams 3000 --transfers 3000 --checkers 64 --retries 2 --oos-chunk-size 10Mi --oos-upload-concurrency 10000 --oos-attempt-resume-upload --oos-leave-parts-on-error sync ./artifacts remote:bucket -vv
- ## Configuration
+Configuration
- Here is an example of making an oracle object storage configuration. `rclone config` walks you
- through it.
+Here is an example of making an oracle object storage configuration.
+rclone config walks you through it.
- Here is an example of how to make a remote called `remote`. First run:
+Here is an example of how to make a remote called remote. First run:
- rclone config
+ rclone config
- This will guide you through an interactive setup process:
+This will guide you through an interactive setup process:
-n) New remote
-o) Delete remote
-p) Rename remote
-q) Copy remote
-r) Set configuration password
-s) Quit config e/n/d/r/c/s/q> n
+ n) New remote
+ d) Delete remote
+ r) Rename remote
+ c) Copy remote
+ s) Set configuration password
+ q) Quit config
+ e/n/d/r/c/s/q> n
-Enter name for new remote. name> remote
+ Enter name for new remote.
+ name> remote
-Option Storage. Type of storage to configure. Choose a number from
-below, or type in your own value. [snip] XX / Oracle Cloud
-Infrastructure Object Storage (oracleobjectstorage) Storage>
-oracleobjectstorage
-
-Option provider. Choose your Auth Provider Choose a number from below,
-or type in your own string value. Press Enter for the default
-(env_auth). 1 / automatically pickup the credentials from runtime(env),
-first one to provide auth wins (env_auth) / use an OCI user and an API
-key for authentication. 2 | you’ll need to put in a config file your
-tenancy OCID, user OCID, region, the path, fingerprint to an API key. |
-https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
- (user_principal_auth) / use instance principals to authorize an
-instance to make API calls. 3 | each instance has its own identity, and
-authenticates using the certificates that are read from instance
-metadata. |
-https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
- (instance_principal_auth) 4 / use resource principals to make API calls
- (resource_principal_auth) 5 / no credentials needed, this is typically
-for reading public buckets (no_auth) provider> 2
-
-Option namespace. Object storage namespace Enter a value. namespace>
-idbamagbg734
-
-Option compartment. Object storage compartment OCID Enter a value.
-compartment>
-ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
-
-Option region. Object storage Region Enter a value. region> us-ashburn-1
-
-Option endpoint. Endpoint for Object storage API. Leave blank to use the
-default endpoint for the region. Enter a value. Press Enter to leave
-empty. endpoint>
-
-Option config_file. Full Path to OCI config file Choose a number from
-below, or type in your own string value. Press Enter for the default
-(~/.oci/config). 1 / oci configuration file location (~/.oci/config)
-config_file> /etc/oci/dev.conf
-
-Option config_profile. Profile name inside OCI config file Choose a
-number from below, or type in your own string value. Press Enter for the
-default (Default). 1 / Use the default profile (Default)
-config_profile> Test
-
-Edit advanced config? y) Yes n) No (default) y/n> n
-
-Configuration complete. Options: - type: oracleobjectstorage -
-namespace: idbamagbg734 - compartment:
-ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
-- region: us-ashburn-1 - provider: user_principal_auth - config_file:
-/etc/oci/dev.conf - config_profile: Test Keep this "remote" remote? y)
-Yes this is OK (default) e) Edit this remote d) Delete this remote
-y/e/d> y
-
-
- See all buckets
-
- rclone lsd remote:
-
- Create a new bucket
-
- rclone mkdir remote:bucket
-
- List the contents of a bucket
-
- rclone ls remote:bucket
- rclone ls remote:bucket --max-depth 1
-
- ## Authentication Providers
-
- OCI has various authentication methods. To learn more about authentication methods please refer [oci authentication
- methods](https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdk_authentication_methods.htm)
- These choices can be specified in the rclone config file.
-
- Rclone supports the following OCI authentication provider.
-
- User Principal
- Instance Principal
- Resource Principal
- No authentication
-
- ### User Principal
-
- Sample rclone config file for Authentication Provider User Principal:
-
- [oos]
- type = oracleobjectstorage
- namespace = id34
- compartment = ocid1.compartment.oc1..aaba
- region = us-ashburn-1
- provider = user_principal_auth
- config_file = /home/opc/.oci/config
- config_profile = Default
-
- Advantages:
- - One can use this method from any server within OCI or on-premises or from other cloud provider.
-
- Considerations:
- - you need to configure user’s privileges / policy to allow access to object storage
- - Overhead of managing users and keys.
- - If the user is deleted, the config file will no longer work and may cause automation regressions that use the user's credentials.
-
- ### Instance Principal
-
- An OCI compute instance can be authorized to use rclone by using it's identity and certificates as an instance principal.
- With this approach no credentials have to be stored and managed.
-
- Sample rclone configuration file for Authentication Provider Instance Principal:
-
- [opc@rclone ~]$ cat ~/.config/rclone/rclone.conf
- [oos]
- type = oracleobjectstorage
- namespace = idfn
- compartment = ocid1.compartment.oc1..aak7a
- region = us-ashburn-1
- provider = instance_principal_auth
-
- Advantages:
-
- - With instance principals, you don't need to configure user credentials and transfer/ save it to disk in your compute
- instances or rotate the credentials.
- - You don’t need to deal with users and keys.
- - Greatly helps in automation as you don't have to manage access keys, user private keys, storing them in vault,
- using kms etc.
-
- Considerations:
-
- - You need to configure a dynamic group having this instance as member and add policy to read object storage to that
- dynamic group.
- - Everyone who has access to this machine can execute the CLI commands.
- - It is applicable for oci compute instances only. It cannot be used on external instance or resources.
-
- ### Resource Principal
-
- Resource principal auth is very similar to instance principal auth but used for resources that are not
- compute instances such as [serverless functions](https://docs.oracle.com/en-us/iaas/Content/Functions/Concepts/functionsoverview.htm).
- To use resource principal ensure Rclone process is started with these environment variables set in its process.
-
- export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
- export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
- export OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM=/usr/share/model-server/key.pem
- export OCI_RESOURCE_PRINCIPAL_RPST=/usr/share/model-server/security_token
-
- Sample rclone configuration file for Authentication Provider Resource Principal:
-
- [oos]
- type = oracleobjectstorage
- namespace = id34
- compartment = ocid1.compartment.oc1..aaba
- region = us-ashburn-1
- provider = resource_principal_auth
-
- ### No authentication
-
- Public buckets do not require any authentication mechanism to read objects.
- Sample rclone configuration file for No authentication:
-
- [oos]
- type = oracleobjectstorage
- namespace = id34
- compartment = ocid1.compartment.oc1..aaba
- region = us-ashburn-1
- provider = no_auth
-
- ### Modification times and hashes
-
- The modification time is stored as metadata on the object as
- `opc-meta-mtime` as floating point since the epoch, accurate to 1 ns.
-
- If the modification time needs to be updated rclone will attempt to perform a server
- side copy to update the modification if the object can be copied in a single part.
- In the case the object is larger than 5Gb, the object will be uploaded rather than copied.
-
- Note that reading this from the object takes an additional `HEAD` request as the metadata
- isn't returned in object listings.
-
- The MD5 hash algorithm is supported.
-
- ### Multipart uploads
-
- rclone supports multipart uploads with OOS which means that it can
- upload files bigger than 5 GiB.
-
- Note that files uploaded *both* with multipart upload *and* through
- crypt remotes do not have MD5 sums.
-
- rclone switches from single part uploads to multipart uploads at the
- point specified by `--oos-upload-cutoff`. This can be a maximum of 5 GiB
- and a minimum of 0 (ie always upload multipart files).
-
- The chunk sizes used in the multipart upload are specified by
- `--oos-chunk-size` and the number of chunks uploaded concurrently is
- specified by `--oos-upload-concurrency`.
-
- Multipart uploads will use `--transfers` * `--oos-upload-concurrency` *
- `--oos-chunk-size` extra memory. Single part uploads to not use extra
- memory.
-
- Single part transfers can be faster than multipart transfers or slower
- depending on your latency from oos - the more latency, the more likely
- single part transfers will be faster.
-
- Increasing `--oos-upload-concurrency` will increase throughput (8 would
- be a sensible value) and increasing `--oos-chunk-size` also increases
- throughput (16M would be sensible). Increasing either of these will
- use more memory. The default values are high enough to gain most of
- the possible performance without using too much memory.
-
-
- ### Standard options
-
- Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
-
- #### --oos-provider
+ Option Storage.
+ Type of storage to configure.
+ Choose a number from below, or type in your own value.
+ [snip]
+ XX / Oracle Cloud Infrastructure Object Storage
+ \ (oracleobjectstorage)
+ Storage> oracleobjectstorage
+ Option provider.
Choose your Auth Provider
+ Choose a number from below, or type in your own string value.
+ Press Enter for the default (env_auth).
+ 1 / automatically pickup the credentials from runtime(env), first one to provide auth wins
+ \ (env_auth)
+ / use an OCI user and an API key for authentication.
+ 2 | you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
+ | https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
+ \ (user_principal_auth)
+ / use instance principals to authorize an instance to make API calls.
+ 3 | each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
+ | https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
+ \ (instance_principal_auth)
+ / use workload identity to grant Kubernetes pods policy-driven access to Oracle Cloud
+ 4 | Infrastructure (OCI) resources using OCI Identity and Access Management (IAM).
+ | https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
+ \ (workload_identity_auth)
+ 5 / use resource principals to make API calls
+ \ (resource_principal_auth)
+ 6 / no credentials needed, this is typically for reading public buckets
+ \ (no_auth)
+ provider> 2
- Properties:
-
- - Config: provider
- - Env Var: RCLONE_OOS_PROVIDER
- - Type: string
- - Default: "env_auth"
- - Examples:
- - "env_auth"
- - automatically pickup the credentials from runtime(env), first one to provide auth wins
- - "user_principal_auth"
- - use an OCI user and an API key for authentication.
- - you’ll need to put in a config file your tenancy OCID, user OCID, region, the path, fingerprint to an API key.
- - https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
- - "instance_principal_auth"
- - use instance principals to authorize an instance to make API calls.
- - each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
- - https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
- - "resource_principal_auth"
- - use resource principals to make API calls
- - "no_auth"
- - no credentials needed, this is typically for reading public buckets
-
- #### --oos-namespace
-
+ Option namespace.
Object storage namespace
+ Enter a value.
+ namespace> idbamagbg734
- Properties:
-
- - Config: namespace
- - Env Var: RCLONE_OOS_NAMESPACE
- - Type: string
- - Required: true
-
- #### --oos-compartment
-
+ Option compartment.
Object storage compartment OCID
+ Enter a value.
+ compartment> ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
- Properties:
-
- - Config: compartment
- - Env Var: RCLONE_OOS_COMPARTMENT
- - Provider: !no_auth
- - Type: string
- - Required: true
-
- #### --oos-region
-
+ Option region.
Object storage Region
+ Enter a value.
+ region> us-ashburn-1
- Properties:
-
- - Config: region
- - Env Var: RCLONE_OOS_REGION
- - Type: string
- - Required: true
-
- #### --oos-endpoint
-
+ Option endpoint.
Endpoint for Object storage API.
-
Leave blank to use the default endpoint for the region.
+ Enter a value. Press Enter to leave empty.
+ endpoint>
+
+ Option config_file.
+ Full Path to OCI config file
+ Choose a number from below, or type in your own string value.
+ Press Enter for the default (~/.oci/config).
+ 1 / oci configuration file location
+ \ (~/.oci/config)
+ config_file> /etc/oci/dev.conf
+
+ Option config_profile.
+ Profile name inside OCI config file
+ Choose a number from below, or type in your own string value.
+ Press Enter for the default (Default).
+ 1 / Use the default profile
+ \ (Default)
+ config_profile> Test
+
+ Edit advanced config?
+ y) Yes
+ n) No (default)
+ y/n> n
+
+ Configuration complete.
+ Options:
+ - type: oracleobjectstorage
+ - namespace: idbamagbg734
+ - compartment: ocid1.compartment.oc1..aaaaaaaapufkxc7ame3sthry5i7ujrwfc7ejnthhu6bhanm5oqfjpyasjkba
+ - region: us-ashburn-1
+ - provider: user_principal_auth
+ - config_file: /etc/oci/dev.conf
+ - config_profile: Test
+ Keep this "remote" remote?
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+See all buckets
+
+ rclone lsd remote:
+
+Create a new bucket
+
+ rclone mkdir remote:bucket
+
+List the contents of a bucket
+
+ rclone ls remote:bucket
+ rclone ls remote:bucket --max-depth 1
+
+Authentication Providers
+
+OCI has various authentication methods. To learn more about
+authentication methods please refer oci authentication methods These
+choices can be specified in the rclone config file.
+
+Rclone supports the following OCI authentication provider.
+
+ User Principal
+ Instance Principal
+ Resource Principal
+ Workload Identity
+ No authentication
+
+User Principal
+
+Sample rclone config file for Authentication Provider User Principal:
+
+ [oos]
+ type = oracleobjectstorage
+ namespace = id34
+ compartment = ocid1.compartment.oc1..aaba
+ region = us-ashburn-1
+ provider = user_principal_auth
+ config_file = /home/opc/.oci/config
+ config_profile = Default
+
+Advantages: - One can use this method from any server within OCI or
+on-premises or from other cloud provider.
+
+Considerations: - you need to configure user’s privileges / policy to
+allow access to object storage - Overhead of managing users and keys. -
+If the user is deleted, the config file will no longer work and may
+cause automation regressions that use the user's credentials.
- Properties:
+Instance Principal
+
+An OCI compute instance can be authorized to use rclone by using it's
+identity and certificates as an instance principal. With this approach
+no credentials have to be stored and managed.
+
+Sample rclone configuration file for Authentication Provider Instance
+Principal:
+
+ [opc@rclone ~]$ cat ~/.config/rclone/rclone.conf
+ [oos]
+ type = oracleobjectstorage
+ namespace = idfn
+ compartment = ocid1.compartment.oc1..aak7a
+ region = us-ashburn-1
+ provider = instance_principal_auth
+
+Advantages:
- - Config: endpoint
- - Env Var: RCLONE_OOS_ENDPOINT
- - Type: string
- - Required: false
+- With instance principals, you don't need to configure user
+ credentials and transfer/ save it to disk in your compute instances
+ or rotate the credentials.
+- You don’t need to deal with users and keys.
+- Greatly helps in automation as you don't have to manage access keys,
+ user private keys, storing them in vault, using kms etc.
- #### --oos-config-file
+Considerations:
- Path to OCI config file
+- You need to configure a dynamic group having this instance as member
+ and add policy to read object storage to that dynamic group.
+- Everyone who has access to this machine can execute the CLI
+ commands.
+- It is applicable for oci compute instances only. It cannot be used
+ on external instance or resources.
- Properties:
+Resource Principal
- - Config: config_file
- - Env Var: RCLONE_OOS_CONFIG_FILE
- - Provider: user_principal_auth
- - Type: string
- - Default: "~/.oci/config"
- - Examples:
- - "~/.oci/config"
- - oci configuration file location
+Resource principal auth is very similar to instance principal auth but
+used for resources that are not compute instances such as serverless
+functions. To use resource principal ensure Rclone process is started
+with these environment variables set in its process.
- #### --oos-config-profile
+ export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
+ export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
+ export OCI_RESOURCE_PRINCIPAL_PRIVATE_PEM=/usr/share/model-server/key.pem
+ export OCI_RESOURCE_PRINCIPAL_RPST=/usr/share/model-server/security_token
- Profile name inside the oci config file
+Sample rclone configuration file for Authentication Provider Resource
+Principal:
- Properties:
+ [oos]
+ type = oracleobjectstorage
+ namespace = id34
+ compartment = ocid1.compartment.oc1..aaba
+ region = us-ashburn-1
+ provider = resource_principal_auth
- - Config: config_profile
- - Env Var: RCLONE_OOS_CONFIG_PROFILE
- - Provider: user_principal_auth
- - Type: string
- - Default: "Default"
- - Examples:
- - "Default"
- - Use the default profile
+Workload Identity
+
+Workload Identity auth may be used when running Rclone from Kubernetes
+pod on a Container Engine for Kubernetes (OKE) cluster. For more details
+on configuring Workload Identity, see Granting Workloads Access to OCI
+Resources. To use workload identity, ensure Rclone is started with these
+environment variables set in its process.
+
+ export OCI_RESOURCE_PRINCIPAL_VERSION=2.2
+ export OCI_RESOURCE_PRINCIPAL_REGION=us-ashburn-1
+
+No authentication
+
+Public buckets do not require any authentication mechanism to read
+objects. Sample rclone configuration file for No authentication:
+
+ [oos]
+ type = oracleobjectstorage
+ namespace = id34
+ compartment = ocid1.compartment.oc1..aaba
+ region = us-ashburn-1
+ provider = no_auth
+
+Modification times and hashes
+
+The modification time is stored as metadata on the object as
+opc-meta-mtime as floating point since the epoch, accurate to 1 ns.
+
+If the modification time needs to be updated rclone will attempt to
+perform a server side copy to update the modification if the object can
+be copied in a single part. In the case the object is larger than 5Gb,
+the object will be uploaded rather than copied.
+
+Note that reading this from the object takes an additional HEAD request
+as the metadata isn't returned in object listings.
- ### Advanced options
+The MD5 hash algorithm is supported.
- Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).
+Multipart uploads
- #### --oos-storage-tier
+rclone supports multipart uploads with OOS which means that it can
+upload files bigger than 5 GiB.
- The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm
+Note that files uploaded both with multipart upload and through crypt
+remotes do not have MD5 sums.
- Properties:
+rclone switches from single part uploads to multipart uploads at the
+point specified by --oos-upload-cutoff. This can be a maximum of 5 GiB
+and a minimum of 0 (ie always upload multipart files).
- - Config: storage_tier
- - Env Var: RCLONE_OOS_STORAGE_TIER
- - Type: string
- - Default: "Standard"
- - Examples:
- - "Standard"
- - Standard storage tier, this is the default tier
- - "InfrequentAccess"
- - InfrequentAccess storage tier
- - "Archive"
- - Archive storage tier
+The chunk sizes used in the multipart upload are specified by
+--oos-chunk-size and the number of chunks uploaded concurrently is
+specified by --oos-upload-concurrency.
- #### --oos-upload-cutoff
+Multipart uploads will use --transfers * --oos-upload-concurrency *
+--oos-chunk-size extra memory. Single part uploads to not use extra
+memory.
- Cutoff for switching to chunked upload.
+Single part transfers can be faster than multipart transfers or slower
+depending on your latency from oos - the more latency, the more likely
+single part transfers will be faster.
- Any files larger than this will be uploaded in chunks of chunk_size.
- The minimum is 0 and the maximum is 5 GiB.
+Increasing --oos-upload-concurrency will increase throughput (8 would be
+a sensible value) and increasing --oos-chunk-size also increases
+throughput (16M would be sensible). Increasing either of these will use
+more memory. The default values are high enough to gain most of the
+possible performance without using too much memory.
- Properties:
+Standard options
- - Config: upload_cutoff
- - Env Var: RCLONE_OOS_UPLOAD_CUTOFF
- - Type: SizeSuffix
- - Default: 200Mi
+Here are the Standard options specific to oracleobjectstorage (Oracle
+Cloud Infrastructure Object Storage).
- #### --oos-chunk-size
+--oos-provider
- Chunk size to use for uploading.
+Choose your Auth Provider
- When uploading files larger than upload_cutoff or files with unknown
- size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will be uploaded
- as multipart uploads using this chunk size.
+Properties:
- Note that "upload_concurrency" chunks of this size are buffered
- in memory per transfer.
+- Config: provider
+- Env Var: RCLONE_OOS_PROVIDER
+- Type: string
+- Default: "env_auth"
+- Examples:
+ - "env_auth"
+ - automatically pickup the credentials from runtime(env),
+ first one to provide auth wins
+ - "user_principal_auth"
+ - use an OCI user and an API key for authentication.
+ - you’ll need to put in a config file your tenancy OCID, user
+ OCID, region, the path, fingerprint to an API key.
+ - https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm
+ - "instance_principal_auth"
+ - use instance principals to authorize an instance to make API
+ calls.
+ - each instance has its own identity, and authenticates using
+ the certificates that are read from instance metadata.
+ - https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm
+ - "workload_identity_auth"
+ - use workload identity to grant OCI Container Engine for
+ Kubernetes workloads policy-driven access to OCI resources
+ using OCI Identity and Access Management (IAM).
+ - https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm
+ - "resource_principal_auth"
+ - use resource principals to make API calls
+ - "no_auth"
+ - no credentials needed, this is typically for reading public
+ buckets
- If you are transferring large files over high-speed links and you have
- enough memory, then increasing this will speed up the transfers.
+--oos-namespace
- Rclone will automatically increase the chunk size when uploading a
- large file of known size to stay below the 10,000 chunks limit.
+Object storage namespace
- Files of unknown size are uploaded with the configured
- chunk_size. Since the default chunk size is 5 MiB and there can be at
- most 10,000 chunks, this means that by default the maximum size of
- a file you can stream upload is 48 GiB. If you wish to stream upload
- larger files then you will need to increase chunk_size.
+Properties:
- Increasing the chunk size decreases the accuracy of the progress
- statistics displayed with "-P" flag.
+- Config: namespace
+- Env Var: RCLONE_OOS_NAMESPACE
+- Type: string
+- Required: true
+--oos-compartment
- Properties:
+Object storage compartment OCID
- - Config: chunk_size
- - Env Var: RCLONE_OOS_CHUNK_SIZE
- - Type: SizeSuffix
- - Default: 5Mi
+Properties:
- #### --oos-max-upload-parts
+- Config: compartment
+- Env Var: RCLONE_OOS_COMPARTMENT
+- Provider: !no_auth
+- Type: string
+- Required: true
- Maximum number of parts in a multipart upload.
+--oos-region
- This option defines the maximum number of multipart chunks to use
- when doing a multipart upload.
+Object storage Region
- OCI has max parts limit of 10,000 chunks.
+Properties:
- Rclone will automatically increase the chunk size when uploading a
- large file of a known size to stay below this number of chunks limit.
+- Config: region
+- Env Var: RCLONE_OOS_REGION
+- Type: string
+- Required: true
+--oos-endpoint
- Properties:
+Endpoint for Object storage API.
- - Config: max_upload_parts
- - Env Var: RCLONE_OOS_MAX_UPLOAD_PARTS
- - Type: int
- - Default: 10000
+Leave blank to use the default endpoint for the region.
- #### --oos-upload-concurrency
+Properties:
- Concurrency for multipart uploads.
+- Config: endpoint
+- Env Var: RCLONE_OOS_ENDPOINT
+- Type: string
+- Required: false
- This is the number of chunks of the same file that are uploaded
- concurrently.
+--oos-config-file
- If you are uploading small numbers of large files over high-speed links
- and these uploads do not fully utilize your bandwidth, then increasing
- this may help to speed up the transfers.
+Path to OCI config file
- Properties:
+Properties:
- - Config: upload_concurrency
- - Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
- - Type: int
- - Default: 10
+- Config: config_file
+- Env Var: RCLONE_OOS_CONFIG_FILE
+- Provider: user_principal_auth
+- Type: string
+- Default: "~/.oci/config"
+- Examples:
+ - "~/.oci/config"
+ - oci configuration file location
- #### --oos-copy-cutoff
+--oos-config-profile
- Cutoff for switching to multipart copy.
+Profile name inside the oci config file
- Any files larger than this that need to be server-side copied will be
- copied in chunks of this size.
+Properties:
- The minimum is 0 and the maximum is 5 GiB.
+- Config: config_profile
+- Env Var: RCLONE_OOS_CONFIG_PROFILE
+- Provider: user_principal_auth
+- Type: string
+- Default: "Default"
+- Examples:
+ - "Default"
+ - Use the default profile
- Properties:
+Advanced options
- - Config: copy_cutoff
- - Env Var: RCLONE_OOS_COPY_CUTOFF
- - Type: SizeSuffix
- - Default: 4.656Gi
+Here are the Advanced options specific to oracleobjectstorage (Oracle
+Cloud Infrastructure Object Storage).
- #### --oos-copy-timeout
+--oos-storage-tier
- Timeout for copy.
+The storage class to use when storing new objects in storage.
+https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm
- Copy is an asynchronous operation, specify timeout to wait for copy to succeed
+Properties:
+- Config: storage_tier
+- Env Var: RCLONE_OOS_STORAGE_TIER
+- Type: string
+- Default: "Standard"
+- Examples:
+ - "Standard"
+ - Standard storage tier, this is the default tier
+ - "InfrequentAccess"
+ - InfrequentAccess storage tier
+ - "Archive"
+ - Archive storage tier
- Properties:
+--oos-upload-cutoff
- - Config: copy_timeout
- - Env Var: RCLONE_OOS_COPY_TIMEOUT
- - Type: Duration
- - Default: 1m0s
+Cutoff for switching to chunked upload.
- #### --oos-disable-checksum
+Any files larger than this will be uploaded in chunks of chunk_size. The
+minimum is 0 and the maximum is 5 GiB.
- Don't store MD5 checksum with object metadata.
+Properties:
- Normally rclone will calculate the MD5 checksum of the input before
- uploading it so it can add it to metadata on the object. This is great
- for data integrity checking but can cause long delays for large files
- to start uploading.
+- Config: upload_cutoff
+- Env Var: RCLONE_OOS_UPLOAD_CUTOFF
+- Type: SizeSuffix
+- Default: 200Mi
- Properties:
+--oos-chunk-size
- - Config: disable_checksum
- - Env Var: RCLONE_OOS_DISABLE_CHECKSUM
- - Type: bool
- - Default: false
+Chunk size to use for uploading.
- #### --oos-encoding
+When uploading files larger than upload_cutoff or files with unknown
+size (e.g. from "rclone rcat" or uploaded with "rclone mount" they will
+be uploaded as multipart uploads using this chunk size.
- The encoding for the backend.
+Note that "upload_concurrency" chunks of this size are buffered in
+memory per transfer.
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+If you are transferring large files over high-speed links and you have
+enough memory, then increasing this will speed up the transfers.
- Properties:
+Rclone will automatically increase the chunk size when uploading a large
+file of known size to stay below the 10,000 chunks limit.
- - Config: encoding
- - Env Var: RCLONE_OOS_ENCODING
- - Type: Encoding
- - Default: Slash,InvalidUtf8,Dot
+Files of unknown size are uploaded with the configured chunk_size. Since
+the default chunk size is 5 MiB and there can be at most 10,000 chunks,
+this means that by default the maximum size of a file you can stream
+upload is 48 GiB. If you wish to stream upload larger files then you
+will need to increase chunk_size.
- #### --oos-leave-parts-on-error
+Increasing the chunk size decreases the accuracy of the progress
+statistics displayed with "-P" flag.
- If true avoid calling abort upload on a failure, leaving all successfully uploaded parts for manual recovery.
+Properties:
- It should be set to true for resuming uploads across different sessions.
+- Config: chunk_size
+- Env Var: RCLONE_OOS_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 5Mi
- WARNING: Storing parts of an incomplete multipart upload counts towards space usage on object storage and will add
- additional costs if not cleaned up.
+--oos-max-upload-parts
+Maximum number of parts in a multipart upload.
- Properties:
+This option defines the maximum number of multipart chunks to use when
+doing a multipart upload.
- - Config: leave_parts_on_error
- - Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
- - Type: bool
- - Default: false
+OCI has max parts limit of 10,000 chunks.
- #### --oos-attempt-resume-upload
+Rclone will automatically increase the chunk size when uploading a large
+file of a known size to stay below this number of chunks limit.
- If true attempt to resume previously started multipart upload for the object.
- This will be helpful to speed up multipart transfers by resuming uploads from past session.
+Properties:
- WARNING: If chunk size differs in resumed session from past incomplete session, then the resumed multipart upload is
- aborted and a new multipart upload is started with the new chunk size.
+- Config: max_upload_parts
+- Env Var: RCLONE_OOS_MAX_UPLOAD_PARTS
+- Type: int
+- Default: 10000
- The flag leave_parts_on_error must be true to resume and optimize to skip parts that were already uploaded successfully.
+--oos-upload-concurrency
+Concurrency for multipart uploads.
- Properties:
+This is the number of chunks of the same file that are uploaded
+concurrently.
- - Config: attempt_resume_upload
- - Env Var: RCLONE_OOS_ATTEMPT_RESUME_UPLOAD
- - Type: bool
- - Default: false
+If you are uploading small numbers of large files over high-speed links
+and these uploads do not fully utilize your bandwidth, then increasing
+this may help to speed up the transfers.
- #### --oos-no-check-bucket
+Properties:
- If set, don't attempt to check the bucket exists or create it.
+- Config: upload_concurrency
+- Env Var: RCLONE_OOS_UPLOAD_CONCURRENCY
+- Type: int
+- Default: 10
- This can be useful when trying to minimise the number of transactions
- rclone does if you know the bucket exists already.
+--oos-copy-cutoff
- It can also be needed if the user you are using does not have bucket
- creation permissions.
+Cutoff for switching to multipart copy.
+Any files larger than this that need to be server-side copied will be
+copied in chunks of this size.
- Properties:
+The minimum is 0 and the maximum is 5 GiB.
- - Config: no_check_bucket
- - Env Var: RCLONE_OOS_NO_CHECK_BUCKET
- - Type: bool
- - Default: false
+Properties:
- #### --oos-sse-customer-key-file
+- Config: copy_cutoff
+- Env Var: RCLONE_OOS_COPY_CUTOFF
+- Type: SizeSuffix
+- Default: 4.656Gi
- To use SSE-C, a file containing the base64-encoded string of the AES-256 encryption key associated
- with the object. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'
+--oos-copy-timeout
- Properties:
+Timeout for copy.
- - Config: sse_customer_key_file
- - Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_FILE
- - Type: string
- - Required: false
- - Examples:
- - ""
- - None
+Copy is an asynchronous operation, specify timeout to wait for copy to
+succeed
- #### --oos-sse-customer-key
+Properties:
- To use SSE-C, the optional header that specifies the base64-encoded 256-bit encryption key to use to
- encrypt or decrypt the data. Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is
- needed. For more information, see Using Your Own Keys for Server-Side Encryption
- (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)
+- Config: copy_timeout
+- Env Var: RCLONE_OOS_COPY_TIMEOUT
+- Type: Duration
+- Default: 1m0s
- Properties:
+--oos-disable-checksum
- - Config: sse_customer_key
- - Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY
- - Type: string
- - Required: false
- - Examples:
- - ""
- - None
+Don't store MD5 checksum with object metadata.
- #### --oos-sse-customer-key-sha256
+Normally rclone will calculate the MD5 checksum of the input before
+uploading it so it can add it to metadata on the object. This is great
+for data integrity checking but can cause long delays for large files to
+start uploading.
- If using SSE-C, The optional header that specifies the base64-encoded SHA256 hash of the encryption
- key. This value is used to check the integrity of the encryption key. see Using Your Own Keys for
- Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+Properties:
- Properties:
+- Config: disable_checksum
+- Env Var: RCLONE_OOS_DISABLE_CHECKSUM
+- Type: bool
+- Default: false
- - Config: sse_customer_key_sha256
- - Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_SHA256
- - Type: string
- - Required: false
- - Examples:
- - ""
- - None
+--oos-encoding
- #### --oos-sse-kms-key-id
+The encoding for the backend.
- if using your own master key in vault, this header specifies the
- OCID (https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of a master encryption key used to call
- the Key Management service to generate a data encryption key or to encrypt or decrypt a data encryption key.
- Please note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
+See the encoding section in the overview for more info.
- Properties:
+Properties:
- - Config: sse_kms_key_id
- - Env Var: RCLONE_OOS_SSE_KMS_KEY_ID
- - Type: string
- - Required: false
- - Examples:
- - ""
- - None
+- Config: encoding
+- Env Var: RCLONE_OOS_ENCODING
+- Type: Encoding
+- Default: Slash,InvalidUtf8,Dot
- #### --oos-sse-customer-algorithm
+--oos-leave-parts-on-error
- If using SSE-C, the optional header that specifies "AES256" as the encryption algorithm.
- Object Storage supports "AES256" as the encryption algorithm. For more information, see
- Using Your Own Keys for Server-Side Encryption (https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+If true avoid calling abort upload on a failure, leaving all
+successfully uploaded parts for manual recovery.
- Properties:
+It should be set to true for resuming uploads across different sessions.
- - Config: sse_customer_algorithm
- - Env Var: RCLONE_OOS_SSE_CUSTOMER_ALGORITHM
- - Type: string
- - Required: false
- - Examples:
- - ""
- - None
- - "AES256"
- - AES256
+WARNING: Storing parts of an incomplete multipart upload counts towards
+space usage on object storage and will add additional costs if not
+cleaned up.
- ## Backend commands
+Properties:
- Here are the commands specific to the oracleobjectstorage backend.
+- Config: leave_parts_on_error
+- Env Var: RCLONE_OOS_LEAVE_PARTS_ON_ERROR
+- Type: bool
+- Default: false
- Run them with
+--oos-attempt-resume-upload
- rclone backend COMMAND remote:
+If true attempt to resume previously started multipart upload for the
+object. This will be helpful to speed up multipart transfers by resuming
+uploads from past session.
- The help below will explain what arguments each command takes.
+WARNING: If chunk size differs in resumed session from past incomplete
+session, then the resumed multipart upload is aborted and a new
+multipart upload is started with the new chunk size.
- See the [backend](https://rclone.org/commands/rclone_backend/) command for more
- info on how to pass options and arguments.
+The flag leave_parts_on_error must be true to resume and optimize to
+skip parts that were already uploaded successfully.
- These can be run on a running backend using the rc command
- [backend/command](https://rclone.org/rc/#backend-command).
+Properties:
- ### rename
+- Config: attempt_resume_upload
+- Env Var: RCLONE_OOS_ATTEMPT_RESUME_UPLOAD
+- Type: bool
+- Default: false
- change the name of an object
+--oos-no-check-bucket
- rclone backend rename remote: [options] [+]
+If set, don't attempt to check the bucket exists or create it.
- This command can be used to rename a object.
+This can be useful when trying to minimise the number of transactions
+rclone does if you know the bucket exists already.
+
+It can also be needed if the user you are using does not have bucket
+creation permissions.
+
+Properties:
+
+- Config: no_check_bucket
+- Env Var: RCLONE_OOS_NO_CHECK_BUCKET
+- Type: bool
+- Default: false
+
+--oos-sse-customer-key-file
+
+To use SSE-C, a file containing the base64-encoded string of the AES-256
+encryption key associated with the object. Please note only one of
+sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.'
+
+Properties:
+
+- Config: sse_customer_key_file
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_FILE
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - None
+
+--oos-sse-customer-key
+
+To use SSE-C, the optional header that specifies the base64-encoded
+256-bit encryption key to use to encrypt or decrypt the data. Please
+note only one of sse_customer_key_file|sse_customer_key|sse_kms_key_id
+is needed. For more information, see Using Your Own Keys for Server-Side
+Encryption
+(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm)
+
+Properties:
+
+- Config: sse_customer_key
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - None
+
+--oos-sse-customer-key-sha256
+
+If using SSE-C, The optional header that specifies the base64-encoded
+SHA256 hash of the encryption key. This value is used to check the
+integrity of the encryption key. see Using Your Own Keys for Server-Side
+Encryption
+(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+
+Properties:
+
+- Config: sse_customer_key_sha256
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_KEY_SHA256
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - None
+
+--oos-sse-kms-key-id
+
+if using your own master key in vault, this header specifies the OCID
+(https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm)
+of a master encryption key used to call the Key Management service to
+generate a data encryption key or to encrypt or decrypt a data
+encryption key. Please note only one of
+sse_customer_key_file|sse_customer_key|sse_kms_key_id is needed.
+
+Properties:
+
+- Config: sse_kms_key_id
+- Env Var: RCLONE_OOS_SSE_KMS_KEY_ID
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - None
+
+--oos-sse-customer-algorithm
+
+If using SSE-C, the optional header that specifies "AES256" as the
+encryption algorithm. Object Storage supports "AES256" as the encryption
+algorithm. For more information, see Using Your Own Keys for Server-Side
+Encryption
+(https://docs.cloud.oracle.com/Content/Object/Tasks/usingyourencryptionkeys.htm).
+
+Properties:
+
+- Config: sse_customer_algorithm
+- Env Var: RCLONE_OOS_SSE_CUSTOMER_ALGORITHM
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - None
+ - "AES256"
+ - AES256
+
+--oos-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_OOS_DESCRIPTION
+- Type: string
+- Required: false
+
+Backend commands
+
+Here are the commands specific to the oracleobjectstorage backend.
+
+Run them with
+
+ rclone backend COMMAND remote:
+
+The help below will explain what arguments each command takes.
+
+See the backend command for more info on how to pass options and
+arguments.
+
+These can be run on a running backend using the rc command
+backend/command.
+
+rename
+
+change the name of an object
+
+ rclone backend rename remote: [options] [+]
+
+This command can be used to rename a object.
+
+Usage Examples:
+
+ rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
+
+list-multipart-uploads
+
+List the unfinished multipart uploads
+
+ rclone backend list-multipart-uploads remote: [options] [+]
+
+This command lists the unfinished multipart uploads in JSON format.
+
+ rclone backend list-multipart-uploads oos:bucket/path/to/object
+
+It returns a dictionary of buckets with values as lists of unfinished
+multipart uploads.
+
+You can call it with no bucket in which case it lists all bucket, with a
+bucket or with a bucket and path.
+
+ {
+ "test-bucket": [
+ {
+ "namespace": "test-namespace",
+ "bucket": "test-bucket",
+ "object": "600m.bin",
+ "uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
+ "timeCreated": "2022-07-29T06:21:16.595Z",
+ "storageTier": "Standard"
+ }
+ ]
+
+cleanup
+
+Remove unfinished multipart uploads.
+
+ rclone backend cleanup remote: [options] [+]
+
+This command removes unfinished multipart uploads of age greater than
+max-age which defaults to 24 hours.
+
+Note that you can use --interactive/-i or --dry-run with this command to
+see what it would do.
+
+ rclone backend cleanup oos:bucket/path/to/object
+ rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
+
+Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
+
+Options:
+
+- "max-age": Max age of upload to delete
+
+restore
+
+Restore objects from Archive to Standard storage
+
+ rclone backend restore remote: [options] [+]
+
+This command can be used to restore one or more objects from Archive to
+Standard storage.
Usage Examples:
- rclone backend rename oos:bucket relative-object-path-under-bucket object-new-name
+ rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
+ rclone backend restore oos:bucket -o hours=HOURS
+This flag also obeys the filters. Test first with --interactive/-i or
+--dry-run flags
- ### list-multipart-uploads
+ rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
- List the unfinished multipart uploads
+All the objects shown will be marked for restore, then
- rclone backend list-multipart-uploads remote: [options] [+]
+ rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
- This command lists the unfinished multipart uploads in JSON format.
-
- rclone backend list-multipart-uploads oos:bucket/path/to/object
-
- It returns a dictionary of buckets with values as lists of unfinished
- multipart uploads.
-
- You can call it with no bucket in which case it lists all bucket, with
- a bucket or with a bucket and path.
+ It returns a list of status dictionaries with Object Name and Status
+ keys. The Status will be "RESTORED"" if it was successful or an error message
+ if not.
+ [
{
- "test-bucket": [
- {
- "namespace": "test-namespace",
- "bucket": "test-bucket",
- "object": "600m.bin",
- "uploadId": "51dd8114-52a4-b2f2-c42f-5291f05eb3c8",
- "timeCreated": "2022-07-29T06:21:16.595Z",
- "storageTier": "Standard"
- }
- ]
+ "Object": "test.txt"
+ "Status": "RESTORED",
+ },
+ {
+ "Object": "test/file4.txt"
+ "Status": "RESTORED",
+ }
+ ]
+Options:
- ### cleanup
+- "hours": The number of hours for which this object will be restored.
+ Default is 24 hrs.
- Remove unfinished multipart uploads.
+Tutorials
- rclone backend cleanup remote: [options] [+]
+Mounting Buckets
- This command removes unfinished multipart uploads of age greater than
- max-age which defaults to 24 hours.
+QingStor
- Note that you can use --interactive/-i or --dry-run with this command to see what
- it would do.
+Paths are specified as remote:bucket (or remote: for the lsd command.)
+You may put subdirectories in too, e.g. remote:bucket/path/to/dir.
- rclone backend cleanup oos:bucket/path/to/object
- rclone backend cleanup -o max-age=7w oos:bucket/path/to/object
+Configuration
- Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
+Here is an example of making an QingStor configuration. First run
+ rclone config
- Options:
-
- - "max-age": Max age of upload to delete
-
-
-
- ## Tutorials
- ### [Mounting Buckets](https://rclone.org/oracleobjectstorage/tutorial_mount/)
-
- # QingStor
-
- Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
- command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
-
- ## Configuration
-
- Here is an example of making an QingStor configuration. First run
-
- rclone config
-
- This will guide you through an interactive setup process.
-
-No remotes found, make a new one? n) New remote r) Rename remote c) Copy
-remote s) Set configuration password q) Quit config n/r/c/s/q> n name>
-remote Type of storage to configure. Choose a number from below, or type
-in your own value [snip] XX / QingStor Object Storage "qingstor" [snip]
-Storage> qingstor Get QingStor credentials from runtime. Only applies if
-access_key_id and secret_access_key is blank. Choose a number from
-below, or type in your own value 1 / Enter QingStor credentials in the
-next step "false" 2 / Get QingStor credentials from the environment
-(env vars or IAM) "true" env_auth> 1 QingStor Access Key ID - leave
-blank for anonymous access or runtime credentials. access_key_id>
-access_key QingStor Secret Access Key (password) - leave blank for
-anonymous access or runtime credentials. secret_access_key> secret_key
-Enter an endpoint URL to connection QingStor API. Leave blank will use
-the default value "https://qingstor.com:443" endpoint> Zone connect to.
-Default is "pek3a". Choose a number from below, or type in your own
-value / The Beijing (China) Three Zone 1 | Needs location constraint
-pek3a. "pek3a" / The Shanghai (China) First Zone 2 | Needs location
-constraint sh1a. "sh1a" zone> 1 Number of connection retry. Leave blank
-will use the default value "3". connection_retries> Remote config
--------------------- [remote] env_auth = false access_key_id =
-access_key secret_access_key = secret_key endpoint = zone = pek3a
-connection_retries = -------------------- y) Yes this is OK e) Edit this
-remote d) Delete this remote y/e/d> y
-
-
- This remote is called `remote` and can now be used like this
-
- See all buckets
-
- rclone lsd remote:
-
- Make a new bucket
-
- rclone mkdir remote:bucket
-
- List the contents of a bucket
-
- rclone ls remote:bucket
-
- Sync `/home/local/directory` to the remote bucket, deleting any excess
- files in the bucket.
-
- rclone sync --interactive /home/local/directory remote:bucket
-
- ### --fast-list
-
- This remote supports `--fast-list` which allows you to use fewer
- transactions in exchange for more memory. See the [rclone
- docs](https://rclone.org/docs/#fast-list) for more details.
-
- ### Multipart uploads
-
- rclone supports multipart uploads with QingStor which means that it can
- upload files bigger than 5 GiB. Note that files uploaded with multipart
- upload don't have an MD5SUM.
-
- Note that incomplete multipart uploads older than 24 hours can be
- removed with `rclone cleanup remote:bucket` just for one bucket
- `rclone cleanup remote:` for all buckets. QingStor does not ever
- remove incomplete multipart uploads so it may be necessary to run this
- from time to time.
-
- ### Buckets and Zone
-
- With QingStor you can list buckets (`rclone lsd`) using any zone,
- but you can only access the content of a bucket from the zone it was
- created in. If you attempt to access a bucket from the wrong zone,
- you will get an error, `incorrect zone, the bucket is not in 'XXX'
- zone`.
-
- ### Authentication
-
- There are two ways to supply `rclone` with a set of QingStor
- credentials. In order of precedence:
-
- - Directly in the rclone configuration file (as configured by `rclone config`)
- - set `access_key_id` and `secret_access_key`
- - Runtime configuration:
- - set `env_auth` to `true` in the config file
- - Exporting the following environment variables before running `rclone`
- - Access Key ID: `QS_ACCESS_KEY_ID` or `QS_ACCESS_KEY`
- - Secret Access Key: `QS_SECRET_ACCESS_KEY` or `QS_SECRET_KEY`
-
- ### Restricted filename characters
-
- The control characters 0x00-0x1F and / are replaced as in the [default
- restricted characters set](https://rclone.org/overview/#restricted-characters). Note
- that 0x7F is not replaced.
-
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
-
-
- ### Standard options
-
- Here are the Standard options specific to qingstor (QingCloud Object Storage).
-
- #### --qingstor-env-auth
-
- Get QingStor credentials from runtime.
-
- Only applies if access_key_id and secret_access_key is blank.
-
- Properties:
-
- - Config: env_auth
- - Env Var: RCLONE_QINGSTOR_ENV_AUTH
- - Type: bool
- - Default: false
- - Examples:
- - "false"
- - Enter QingStor credentials in the next step.
- - "true"
- - Get QingStor credentials from the environment (env vars or IAM).
-
- #### --qingstor-access-key-id
-
- QingStor Access Key ID.
-
- Leave blank for anonymous access or runtime credentials.
-
- Properties:
-
- - Config: access_key_id
- - Env Var: RCLONE_QINGSTOR_ACCESS_KEY_ID
- - Type: string
- - Required: false
-
- #### --qingstor-secret-access-key
-
- QingStor Secret Access Key (password).
-
- Leave blank for anonymous access or runtime credentials.
-
- Properties:
-
- - Config: secret_access_key
- - Env Var: RCLONE_QINGSTOR_SECRET_ACCESS_KEY
- - Type: string
- - Required: false
-
- #### --qingstor-endpoint
+This will guide you through an interactive setup process.
+ No remotes found, make a new one?
+ n) New remote
+ r) Rename remote
+ c) Copy remote
+ s) Set configuration password
+ q) Quit config
+ n/r/c/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / QingStor Object Storage
+ \ "qingstor"
+ [snip]
+ Storage> qingstor
+ Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.
+ Choose a number from below, or type in your own value
+ 1 / Enter QingStor credentials in the next step
+ \ "false"
+ 2 / Get QingStor credentials from the environment (env vars or IAM)
+ \ "true"
+ env_auth> 1
+ QingStor Access Key ID - leave blank for anonymous access or runtime credentials.
+ access_key_id> access_key
+ QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
+ secret_access_key> secret_key
Enter an endpoint URL to connection QingStor API.
+ Leave blank will use the default value "https://qingstor.com:443"
+ endpoint>
+ Zone connect to. Default is "pek3a".
+ Choose a number from below, or type in your own value
+ / The Beijing (China) Three Zone
+ 1 | Needs location constraint pek3a.
+ \ "pek3a"
+ / The Shanghai (China) First Zone
+ 2 | Needs location constraint sh1a.
+ \ "sh1a"
+ zone> 1
+ Number of connection retry.
+ Leave blank will use the default value "3".
+ connection_retries>
+ Remote config
+ --------------------
+ [remote]
+ env_auth = false
+ access_key_id = access_key
+ secret_access_key = secret_key
+ endpoint =
+ zone = pek3a
+ connection_retries =
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
- Leave blank will use the default value "https://qingstor.com:443".
+This remote is called remote and can now be used like this
- Properties:
+See all buckets
- - Config: endpoint
- - Env Var: RCLONE_QINGSTOR_ENDPOINT
- - Type: string
- - Required: false
+ rclone lsd remote:
- #### --qingstor-zone
+Make a new bucket
- Zone to connect to.
+ rclone mkdir remote:bucket
- Default is "pek3a".
+List the contents of a bucket
- Properties:
+ rclone ls remote:bucket
- - Config: zone
- - Env Var: RCLONE_QINGSTOR_ZONE
- - Type: string
- - Required: false
- - Examples:
- - "pek3a"
- - The Beijing (China) Three Zone.
- - Needs location constraint pek3a.
- - "sh1a"
- - The Shanghai (China) First Zone.
- - Needs location constraint sh1a.
- - "gd2a"
- - The Guangdong (China) Second Zone.
- - Needs location constraint gd2a.
+Sync /home/local/directory to the remote bucket, deleting any excess
+files in the bucket.
- ### Advanced options
+ rclone sync --interactive /home/local/directory remote:bucket
- Here are the Advanced options specific to qingstor (QingCloud Object Storage).
+--fast-list
- #### --qingstor-connection-retries
+This remote supports --fast-list which allows you to use fewer
+transactions in exchange for more memory. See the rclone docs for more
+details.
- Number of connection retries.
+Multipart uploads
- Properties:
+rclone supports multipart uploads with QingStor which means that it can
+upload files bigger than 5 GiB. Note that files uploaded with multipart
+upload don't have an MD5SUM.
- - Config: connection_retries
- - Env Var: RCLONE_QINGSTOR_CONNECTION_RETRIES
- - Type: int
- - Default: 3
+Note that incomplete multipart uploads older than 24 hours can be
+removed with rclone cleanup remote:bucket just for one bucket
+rclone cleanup remote: for all buckets. QingStor does not ever remove
+incomplete multipart uploads so it may be necessary to run this from
+time to time.
- #### --qingstor-upload-cutoff
+Buckets and Zone
- Cutoff for switching to chunked upload.
+With QingStor you can list buckets (rclone lsd) using any zone, but you
+can only access the content of a bucket from the zone it was created in.
+If you attempt to access a bucket from the wrong zone, you will get an
+error, incorrect zone, the bucket is not in 'XXX' zone.
- Any files larger than this will be uploaded in chunks of chunk_size.
- The minimum is 0 and the maximum is 5 GiB.
+Authentication
- Properties:
+There are two ways to supply rclone with a set of QingStor credentials.
+In order of precedence:
- - Config: upload_cutoff
- - Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
- - Type: SizeSuffix
- - Default: 200Mi
+- Directly in the rclone configuration file (as configured by
+ rclone config)
+ - set access_key_id and secret_access_key
+- Runtime configuration:
+ - set env_auth to true in the config file
+ - Exporting the following environment variables before running
+ rclone
+ - Access Key ID: QS_ACCESS_KEY_ID or QS_ACCESS_KEY
+ - Secret Access Key: QS_SECRET_ACCESS_KEY or QS_SECRET_KEY
- #### --qingstor-chunk-size
+Restricted filename characters
- Chunk size to use for uploading.
+The control characters 0x00-0x1F and / are replaced as in the default
+restricted characters set. Note that 0x7F is not replaced.
- When uploading files larger than upload_cutoff they will be uploaded
- as multipart uploads using this chunk size.
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
- Note that "--qingstor-upload-concurrency" chunks of this size are buffered
- in memory per transfer.
+Standard options
- If you are transferring large files over high-speed links and you have
- enough memory, then increasing this will speed up the transfers.
+Here are the Standard options specific to qingstor (QingCloud Object
+Storage).
- Properties:
+--qingstor-env-auth
- - Config: chunk_size
- - Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
- - Type: SizeSuffix
- - Default: 4Mi
+Get QingStor credentials from runtime.
- #### --qingstor-upload-concurrency
+Only applies if access_key_id and secret_access_key is blank.
- Concurrency for multipart uploads.
+Properties:
- This is the number of chunks of the same file that are uploaded
- concurrently.
+- Config: env_auth
+- Env Var: RCLONE_QINGSTOR_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+ - "false"
+ - Enter QingStor credentials in the next step.
+ - "true"
+ - Get QingStor credentials from the environment (env vars or
+ IAM).
- NB if you set this to > 1 then the checksums of multipart uploads
- become corrupted (the uploads themselves are not corrupted though).
+--qingstor-access-key-id
- If you are uploading small numbers of large files over high-speed links
- and these uploads do not fully utilize your bandwidth, then increasing
- this may help to speed up the transfers.
+QingStor Access Key ID.
- Properties:
+Leave blank for anonymous access or runtime credentials.
- - Config: upload_concurrency
- - Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
- - Type: int
- - Default: 1
+Properties:
- #### --qingstor-encoding
+- Config: access_key_id
+- Env Var: RCLONE_QINGSTOR_ACCESS_KEY_ID
+- Type: string
+- Required: false
- The encoding for the backend.
+--qingstor-secret-access-key
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+QingStor Secret Access Key (password).
- Properties:
+Leave blank for anonymous access or runtime credentials.
- - Config: encoding
- - Env Var: RCLONE_QINGSTOR_ENCODING
- - Type: Encoding
- - Default: Slash,Ctl,InvalidUtf8
+Properties:
+- Config: secret_access_key
+- Env Var: RCLONE_QINGSTOR_SECRET_ACCESS_KEY
+- Type: string
+- Required: false
+--qingstor-endpoint
- ## Limitations
+Enter an endpoint URL to connection QingStor API.
- `rclone about` is not supported by the qingstor backend. Backends without
- this capability cannot determine free space for an rclone mount or
- use policy `mfs` (most free space) as a member of an rclone union
- remote.
+Leave blank will use the default value "https://qingstor.com:443".
- See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/)
+Properties:
- # Quatrix
+- Config: endpoint
+- Env Var: RCLONE_QINGSTOR_ENDPOINT
+- Type: string
+- Required: false
- Quatrix by Maytech is [Quatrix Secure Compliant File Sharing | Maytech](https://www.maytech.net/products/quatrix-business).
+--qingstor-zone
- Paths are specified as `remote:path`
+Zone to connect to.
- Paths may be as deep as required, e.g., `remote:directory/subdirectory`.
+Default is "pek3a".
- The initial setup for Quatrix involves getting an API Key from Quatrix. You can get the API key in the user's profile at `https:///profile/api-keys`
- or with the help of the API - https://docs.maytech.net/quatrix/quatrix-api/api-explorer#/API-Key/post_api_key_create.
+Properties:
- See complete Swagger documentation for Quatrix - https://docs.maytech.net/quatrix/quatrix-api/api-explorer
+- Config: zone
+- Env Var: RCLONE_QINGSTOR_ZONE
+- Type: string
+- Required: false
+- Examples:
+ - "pek3a"
+ - The Beijing (China) Three Zone.
+ - Needs location constraint pek3a.
+ - "sh1a"
+ - The Shanghai (China) First Zone.
+ - Needs location constraint sh1a.
+ - "gd2a"
+ - The Guangdong (China) Second Zone.
+ - Needs location constraint gd2a.
- ## Configuration
+Advanced options
- Here is an example of how to make a remote called `remote`. First run:
+Here are the Advanced options specific to qingstor (QingCloud Object
+Storage).
- rclone config
+--qingstor-connection-retries
- This will guide you through an interactive setup process:
+Number of connection retries.
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / Quatrix by Maytech "quatrix" [snip] Storage> quatrix API key for
-accessing Quatrix account. api_key> your_api_key Host name of Quatrix
-account. host> example.quatrix.it
+Properties:
- --------------------
- [remote] api_key =
- your_api_key host =
- example.quatrix.it
- --------------------
- y) Yes this is OK e)
- Edit this remote d)
- Delete this remote
- y/e/d> y ```
+- Config: connection_retries
+- Env Var: RCLONE_QINGSTOR_CONNECTION_RETRIES
+- Type: int
+- Default: 3
- Once configured you
- can then use rclone
- like this,
+--qingstor-upload-cutoff
- List directories in
- top level of your
- Quatrix
+Cutoff for switching to chunked upload.
- rclone lsd remote:
+Any files larger than this will be uploaded in chunks of chunk_size. The
+minimum is 0 and the maximum is 5 GiB.
- List all the files
- in your Quatrix
+Properties:
- rclone ls remote:
+- Config: upload_cutoff
+- Env Var: RCLONE_QINGSTOR_UPLOAD_CUTOFF
+- Type: SizeSuffix
+- Default: 200Mi
- To copy a local
- directory to an
- Quatrix directory
- called backup
-
- rclone copy
- /home/source
- remote:backup
-
- ### API key validity
-
- API Key is created
- with no expiration
- date. It will be
- valid until you
- delete or deactivate
- it in your account.
- After disabling, the
- API Key can be
- enabled back. If the
- API Key was deleted
- and a new key was
- created, you can
- update it in rclone
- config. The same
- happens if the
- hostname was
- changed.
-
- ``` $ rclone config
- Current remotes:
-
- Name Type ==== ====
- remote quatrix
-
- e) Edit existing
- remote n) New remote
- d) Delete remote r)
- Rename remote c)
- Copy remote s) Set
- configuration
- password q) Quit
- config
- e/n/d/r/c/s/q> e
- Choose a number from
- below, or type in an
- existing value 1 >
- remote remote>
- remote
- --------------------
-
-[remote] type = quatrix host = some_host.quatrix.it api_key =
-your_api_key -------------------- Edit remote Option api_key. API key
-for accessing Quatrix account Enter a string value. Press Enter for the
-default (your_api_key) api_key> Option host. Host name of Quatrix
-account Enter a string value. Press Enter for the default
-(some_host.quatrix.it).
-
- --------------------------------------------------
- [remote] type = quatrix host =
- some_host.quatrix.it api_key = your_api_key
- --------------------------------------------------
- y) Yes this is OK e) Edit this remote d) Delete
- this remote y/e/d> y ```
-
- ### Modification times and hashes
-
- Quatrix allows modification times to be set on
- objects accurate to 1 microsecond. These will be
- used to detect whether objects need syncing or
- not.
-
- Quatrix does not support hashes, so you cannot use
- the --checksum flag.
-
- ### Restricted filename characters
-
- File names in Quatrix are case sensitive and have
- limitations like the maximum length of a filename
- is 255, and the minimum length is 1. A file name
- cannot be equal to . or .. nor contain / , \ or
- non-printable ascii.
+--qingstor-chunk-size
- ### Transfers
+Chunk size to use for uploading.
- For files above 50 MiB rclone will use a chunked
- transfer. Rclone will upload up to --transfers
- chunks at the same time (shared among all
- multipart uploads). Chunks are buffered in memory,
- and the minimal chunk size is 10_000_000 bytes by
- default, and it can be changed in the advanced
- configuration, so increasing --transfers will
- increase the memory use. The chunk size has a
- maximum size limit, which is set to 100_000_000
- bytes by default and can be changed in the
- advanced configuration. The size of the uploaded
- chunk will dynamically change depending on the
- upload speed. The total memory use equals the
- number of transfers multiplied by the minimal
- chunk size. In case there's free memory allocated
- for the upload (which equals the difference of
- maximal_summary_chunk_size and minimal_chunk_size
- * transfers), the chunk size may increase in case
- of high upload speed. As well as it can decrease
- in case of upload speed problems. If no free
- memory is available, all chunks will equal
- minimal_chunk_size.
+When uploading files larger than upload_cutoff they will be uploaded as
+multipart uploads using this chunk size.
- ### Deleting files
+Note that "--qingstor-upload-concurrency" chunks of this size are
+buffered in memory per transfer.
- Files you delete with rclone will end up in Trash
- and be stored there for 30 days. Quatrix also
- provides an API to permanently delete files and an
- API to empty the Trash so that you can remove
- files permanently from your account.
+If you are transferring large files over high-speed links and you have
+enough memory, then increasing this will speed up the transfers.
- ### Standard options
+Properties:
- Here are the Standard options specific to quatrix
- (Quatrix by Maytech).
+- Config: chunk_size
+- Env Var: RCLONE_QINGSTOR_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 4Mi
- #### --quatrix-api-key
+--qingstor-upload-concurrency
- API key for accessing Quatrix account
+Concurrency for multipart uploads.
- Properties:
+This is the number of chunks of the same file that are uploaded
+concurrently.
- - Config: api_key - Env Var:
- RCLONE_QUATRIX_API_KEY - Type: string - Required:
- true
+NB if you set this to > 1 then the checksums of multipart uploads become
+corrupted (the uploads themselves are not corrupted though).
- #### --quatrix-host
+If you are uploading small numbers of large files over high-speed links
+and these uploads do not fully utilize your bandwidth, then increasing
+this may help to speed up the transfers.
- Host name of Quatrix account
+Properties:
- Properties:
+- Config: upload_concurrency
+- Env Var: RCLONE_QINGSTOR_UPLOAD_CONCURRENCY
+- Type: int
+- Default: 1
- - Config: host - Env Var: RCLONE_QUATRIX_HOST -
- Type: string - Required: true
+--qingstor-encoding
- ### Advanced options
+The encoding for the backend.
- Here are the Advanced options specific to quatrix
- (Quatrix by Maytech).
+See the encoding section in the overview for more info.
- #### --quatrix-encoding
+Properties:
- The encoding for the backend.
+- Config: encoding
+- Env Var: RCLONE_QINGSTOR_ENCODING
+- Type: Encoding
+- Default: Slash,Ctl,InvalidUtf8
- See the encoding section in the overview for more
- info.
+--qingstor-description
- Properties:
+Description of the remote
- - Config: encoding - Env Var:
- RCLONE_QUATRIX_ENCODING - Type: Encoding -
- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+Properties:
- #### --quatrix-effective-upload-time
+- Config: description
+- Env Var: RCLONE_QINGSTOR_DESCRIPTION
+- Type: string
+- Required: false
- Wanted upload time for one chunk
+Limitations
- Properties:
+rclone about is not supported by the qingstor backend. Backends without
+this capability cannot determine free space for an rclone mount or use
+policy mfs (most free space) as a member of an rclone union remote.
- - Config: effective_upload_time - Env Var:
- RCLONE_QUATRIX_EFFECTIVE_UPLOAD_TIME - Type:
- string - Default: "4s"
+See List of backends that do not support rclone about and rclone about
- #### --quatrix-minimal-chunk-size
+Quatrix
- The minimal size for one chunk
+Quatrix by Maytech is Quatrix Secure Compliant File Sharing | Maytech.
- Properties:
+Paths are specified as remote:path
- - Config: minimal_chunk_size - Env Var:
- RCLONE_QUATRIX_MINIMAL_CHUNK_SIZE - Type:
- SizeSuffix - Default: 9.537Mi
-
- #### --quatrix-maximal-summary-chunk-size
-
- The maximal summary for all chunks. It should not
- be less than 'transfers'*'minimal_chunk_size'
+Paths may be as deep as required, e.g., remote:directory/subdirectory.
- Properties:
-
- - Config: maximal_summary_chunk_size - Env Var:
- RCLONE_QUATRIX_MAXIMAL_SUMMARY_CHUNK_SIZE - Type:
- SizeSuffix - Default: 95.367Mi
+The initial setup for Quatrix involves getting an API Key from Quatrix.
+You can get the API key in the user's profile at
+https:///profile/api-keys or with the help of the API -
+https://docs.maytech.net/quatrix/quatrix-api/api-explorer#/API-Key/post_api_key_create.
- #### --quatrix-hard-delete
+See complete Swagger documentation for Quatrix -
+https://docs.maytech.net/quatrix/quatrix-api/api-explorer
- Delete files permanently rather than putting them
- into the trash.
+Configuration
- Properties:
+Here is an example of how to make a remote called remote. First run:
- - Config: hard_delete - Env Var:
- RCLONE_QUATRIX_HARD_DELETE - Type: bool - Default:
- false
+ rclone config
- ## Storage usage
+This will guide you through an interactive setup process:
- The storage usage in Quatrix is restricted to the
- account during the purchase. You can restrict any
- user with a smaller storage limit. The account
- limit is applied if the user has no custom storage
- limit. Once you've reached the limit, the upload
- of files will fail. This can be fixed by freeing
- up the space or increasing the quota.
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Quatrix by Maytech
+ \ "quatrix"
+ [snip]
+ Storage> quatrix
+ API key for accessing Quatrix account.
+ api_key> your_api_key
+ Host name of Quatrix account.
+ host> example.quatrix.it
- ## Server-side operations
-
- Quatrix supports server-side operations (copy and
- move). In case of conflict, files are overwritten
- during server-side operation.
+ --------------------
+ [remote]
+ api_key = your_api_key
+ host = example.quatrix.it
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+Once configured you can then use rclone like this,
- # Sia
+List directories in top level of your Quatrix
+
+ rclone lsd remote:
+
+List all the files in your Quatrix
- Sia (sia.tech) is a decentralized cloud storage
- platform based on the blockchain technology. With
- rclone you can use it like any other remote
- filesystem or mount Sia folders locally. The
- technology behind it involves a number of new
- concepts such as Siacoins and Wallet, Blockchain
- and Consensus, Renting and Hosting, and so on. If
- you are new to it, you'd better first familiarize
- yourself using their excellent support
- documentation.
+ rclone ls remote:
- ## Introduction
+To copy a local directory to an Quatrix directory called backup
- Before you can use rclone with Sia, you will need
- to have a running copy of Sia-UI or siad (the Sia
- daemon) locally on your computer or on local
- network (e.g. a NAS). Please follow the Get
- started guide and install one.
+ rclone copy /home/source remote:backup
- rclone interacts with Sia network by talking to
- the Sia daemon via HTTP API which is usually
- available on port 9980. By default you will run
- the daemon locally on the same computer so it's
- safe to leave the API password blank (the API URL
- will be http://127.0.0.1:9980 making external
- access impossible).
+API key validity
- However, if you want to access Sia daemon running
- on another node, for example due to memory
- constraints or because you want to share single
- daemon between several rclone and Sia-UI
- instances, you'll need to make a few more
- provisions: - Ensure you have Sia daemon installed
- directly or in a docker container because Sia-UI
- does not support this mode natively. - Run it on
- externally accessible port, for example provide
- --api-addr :9980 and --disable-api-security
- arguments on the daemon command line. - Enforce
- API password for the siad daemon via environment
- variable SIA_API_PASSWORD or text file named
- apipassword in the daemon directory. - Set rclone
- backend option api_password taking it from above
- locations.
+API Key is created with no expiration date. It will be valid until you
+delete or deactivate it in your account. After disabling, the API Key
+can be enabled back. If the API Key was deleted and a new key was
+created, you can update it in rclone config. The same happens if the
+hostname was changed.
- Notes: 1. If your wallet is locked, rclone cannot
- unlock it automatically. You should either unlock
- it in advance by using Sia-UI or via command line
- siac wallet unlock. Alternatively you can make
- siad unlock your wallet automatically upon startup
- by running it with environment variable
- SIA_WALLET_PASSWORD. 2. If siad cannot find the
- SIA_API_PASSWORD variable or the apipassword file
- in the SIA_DIR directory, it will generate a
- random password and store in the text file named
- apipassword under YOUR_HOME/.sia/ directory on
- Unix or
- C:\Users\YOUR_HOME\AppData\Local\Sia\apipassword
- on Windows. Remember this when you configure
- password in rclone. 3. The only way to use siad
- without API password is to run it on localhost
- with command line argument --authorize-api=false,
- but this is insecure and strongly discouraged.
-
- ## Configuration
-
- Here is an example of how to make a sia remote
- called mySia. First, run:
-
- rclone config
-
- This will guide you through an interactive setup
- process:
-
- ``` No remotes found, make a new one? n) New
- remote s) Set configuration password q) Quit
- config n/s/q> n name> mySia Type of storage to
- configure. Enter a string value. Press Enter for
- the default (""). Choose a number from below, or
- type in your own value ... 29 / Sia Decentralized
- Cloud "sia" ... Storage> sia Sia daemon API URL,
- like http://sia.daemon.host:9980. Note that siad
- must run with --disable-api-security to open API
- port for other hosts (not recommended). Keep
- default if Sia daemon runs on localhost. Enter a
- string value. Press Enter for the default
- ("http://127.0.0.1:9980"). api_url>
- http://127.0.0.1:9980 Sia Daemon API Password. Can
- be found in the apipassword file located in
- HOME/.sia/ or in the daemon directory. y) Yes type
- in my own password g) Generate random password n)
- No leave this optional password blank (default)
- y/g/n> y Enter the password: password: Confirm the
- password: password: Edit advanced config? y) Yes
- n) No (default) y/n> n
- --------------------------------------------------
-
-[mySia] type = sia api_url = http://127.0.0.1:9980 api_password = ***
-ENCRYPTED *** -------------------- y) Yes this is OK (default) e) Edit
-this remote d) Delete this remote y/e/d> y
-
-
- Once configured, you can then use `rclone` like this:
-
- - List directories in top level of your Sia storage
-
-rclone lsd mySia:
-
-
- - List all the files in your Sia storage
-
-rclone ls mySia:
-
-
- - Upload a local directory to the Sia directory called _backup_
-
-rclone copy /home/source mySia:backup
-
-
-
- ### Standard options
-
- Here are the Standard options specific to sia (Sia Decentralized Cloud).
-
- #### --sia-api-url
+ $ rclone config
+ Current remotes:
+ Name Type
+ ==== ====
+ remote quatrix
+
+ e) Edit existing remote
+ n) New remote
+ d) Delete remote
+ r) Rename remote
+ c) Copy remote
+ s) Set configuration password
+ q) Quit config
+ e/n/d/r/c/s/q> e
+ Choose a number from below, or type in an existing value
+ 1 > remote
+ remote> remote
+ --------------------
+ [remote]
+ type = quatrix
+ host = some_host.quatrix.it
+ api_key = your_api_key
+ --------------------
+ Edit remote
+ Option api_key.
+ API key for accessing Quatrix account
+ Enter a string value. Press Enter for the default (your_api_key)
+ api_key>
+ Option host.
+ Host name of Quatrix account
+ Enter a string value. Press Enter for the default (some_host.quatrix.it).
+
+ --------------------
+ [remote]
+ type = quatrix
+ host = some_host.quatrix.it
+ api_key = your_api_key
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+Modification times and hashes
+
+Quatrix allows modification times to be set on objects accurate to 1
+microsecond. These will be used to detect whether objects need syncing
+or not.
+
+Quatrix does not support hashes, so you cannot use the --checksum flag.
+
+Restricted filename characters
+
+File names in Quatrix are case sensitive and have limitations like the
+maximum length of a filename is 255, and the minimum length is 1. A file
+name cannot be equal to . or .. nor contain / , \ or non-printable
+ascii.
+
+Transfers
+
+For files above 50 MiB rclone will use a chunked transfer. Rclone will
+upload up to --transfers chunks at the same time (shared among all
+multipart uploads). Chunks are buffered in memory, and the minimal chunk
+size is 10_000_000 bytes by default, and it can be changed in the
+advanced configuration, so increasing --transfers will increase the
+memory use. The chunk size has a maximum size limit, which is set to
+100_000_000 bytes by default and can be changed in the advanced
+configuration. The size of the uploaded chunk will dynamically change
+depending on the upload speed. The total memory use equals the number of
+transfers multiplied by the minimal chunk size. In case there's free
+memory allocated for the upload (which equals the difference of
+maximal_summary_chunk_size and minimal_chunk_size * transfers), the
+chunk size may increase in case of high upload speed. As well as it can
+decrease in case of upload speed problems. If no free memory is
+available, all chunks will equal minimal_chunk_size.
+
+Deleting files
+
+Files you delete with rclone will end up in Trash and be stored there
+for 30 days. Quatrix also provides an API to permanently delete files
+and an API to empty the Trash so that you can remove files permanently
+from your account.
+
+Standard options
+
+Here are the Standard options specific to quatrix (Quatrix by Maytech).
+
+--quatrix-api-key
+
+API key for accessing Quatrix account
+
+Properties:
+
+- Config: api_key
+- Env Var: RCLONE_QUATRIX_API_KEY
+- Type: string
+- Required: true
+
+--quatrix-host
+
+Host name of Quatrix account
+
+Properties:
+
+- Config: host
+- Env Var: RCLONE_QUATRIX_HOST
+- Type: string
+- Required: true
+
+Advanced options
+
+Here are the Advanced options specific to quatrix (Quatrix by Maytech).
+
+--quatrix-encoding
+
+The encoding for the backend.
+
+See the encoding section in the overview for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_QUATRIX_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--quatrix-effective-upload-time
+
+Wanted upload time for one chunk
+
+Properties:
+
+- Config: effective_upload_time
+- Env Var: RCLONE_QUATRIX_EFFECTIVE_UPLOAD_TIME
+- Type: string
+- Default: "4s"
+
+--quatrix-minimal-chunk-size
+
+The minimal size for one chunk
+
+Properties:
+
+- Config: minimal_chunk_size
+- Env Var: RCLONE_QUATRIX_MINIMAL_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 9.537Mi
+
+--quatrix-maximal-summary-chunk-size
+
+The maximal summary for all chunks. It should not be less than
+'transfers'*'minimal_chunk_size'
+
+Properties:
+
+- Config: maximal_summary_chunk_size
+- Env Var: RCLONE_QUATRIX_MAXIMAL_SUMMARY_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 95.367Mi
+
+--quatrix-hard-delete
+
+Delete files permanently rather than putting them into the trash
+
+Properties:
+
+- Config: hard_delete
+- Env Var: RCLONE_QUATRIX_HARD_DELETE
+- Type: bool
+- Default: false
+
+--quatrix-skip-project-folders
+
+Skip project folders in operations
+
+Properties:
+
+- Config: skip_project_folders
+- Env Var: RCLONE_QUATRIX_SKIP_PROJECT_FOLDERS
+- Type: bool
+- Default: false
+
+--quatrix-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_QUATRIX_DESCRIPTION
+- Type: string
+- Required: false
+
+Storage usage
+
+The storage usage in Quatrix is restricted to the account during the
+purchase. You can restrict any user with a smaller storage limit. The
+account limit is applied if the user has no custom storage limit. Once
+you've reached the limit, the upload of files will fail. This can be
+fixed by freeing up the space or increasing the quota.
+
+Server-side operations
+
+Quatrix supports server-side operations (copy and move). In case of
+conflict, files are overwritten during server-side operation.
+
+Sia
+
+Sia (sia.tech) is a decentralized cloud storage platform based on the
+blockchain technology. With rclone you can use it like any other remote
+filesystem or mount Sia folders locally. The technology behind it
+involves a number of new concepts such as Siacoins and Wallet,
+Blockchain and Consensus, Renting and Hosting, and so on. If you are new
+to it, you'd better first familiarize yourself using their excellent
+support documentation.
+
+Introduction
+
+Before you can use rclone with Sia, you will need to have a running copy
+of Sia-UI or siad (the Sia daemon) locally on your computer or on local
+network (e.g. a NAS). Please follow the Get started guide and install
+one.
+
+rclone interacts with Sia network by talking to the Sia daemon via HTTP
+API which is usually available on port 9980. By default you will run the
+daemon locally on the same computer so it's safe to leave the API
+password blank (the API URL will be http://127.0.0.1:9980 making
+external access impossible).
+
+However, if you want to access Sia daemon running on another node, for
+example due to memory constraints or because you want to share single
+daemon between several rclone and Sia-UI instances, you'll need to make
+a few more provisions: - Ensure you have Sia daemon installed directly
+or in a docker container because Sia-UI does not support this mode
+natively. - Run it on externally accessible port, for example provide
+--api-addr :9980 and --disable-api-security arguments on the daemon
+command line. - Enforce API password for the siad daemon via environment
+variable SIA_API_PASSWORD or text file named apipassword in the daemon
+directory. - Set rclone backend option api_password taking it from above
+locations.
+
+Notes: 1. If your wallet is locked, rclone cannot unlock it
+automatically. You should either unlock it in advance by using Sia-UI or
+via command line siac wallet unlock. Alternatively you can make siad
+unlock your wallet automatically upon startup by running it with
+environment variable SIA_WALLET_PASSWORD. 2. If siad cannot find the
+SIA_API_PASSWORD variable or the apipassword file in the SIA_DIR
+directory, it will generate a random password and store in the text file
+named apipassword under YOUR_HOME/.sia/ directory on Unix or
+C:\Users\YOUR_HOME\AppData\Local\Sia\apipassword on Windows. Remember
+this when you configure password in rclone. 3. The only way to use siad
+without API password is to run it on localhost with command line
+argument --authorize-api=false, but this is insecure and strongly
+discouraged.
+
+Configuration
+
+Here is an example of how to make a sia remote called mySia. First, run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> mySia
+ Type of storage to configure.
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ ...
+ 29 / Sia Decentralized Cloud
+ \ "sia"
+ ...
+ Storage> sia
Sia daemon API URL, like http://sia.daemon.host:9980.
-
Note that siad must run with --disable-api-security to open API port for other hosts (not recommended).
Keep default if Sia daemon runs on localhost.
-
- Properties:
-
- - Config: api_url
- - Env Var: RCLONE_SIA_API_URL
- - Type: string
- - Default: "http://127.0.0.1:9980"
-
- #### --sia-api-password
-
+ Enter a string value. Press Enter for the default ("http://127.0.0.1:9980").
+ api_url> http://127.0.0.1:9980
Sia Daemon API Password.
-
Can be found in the apipassword file located in HOME/.sia/ or in the daemon directory.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank (default)
+ y/g/n> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ Edit advanced config?
+ y) Yes
+ n) No (default)
+ y/n> n
+ --------------------
+ [mySia]
+ type = sia
+ api_url = http://127.0.0.1:9980
+ api_password = *** ENCRYPTED ***
+ --------------------
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+Once configured, you can then use rclone like this:
- Properties:
+- List directories in top level of your Sia storage
- - Config: api_password
- - Env Var: RCLONE_SIA_API_PASSWORD
- - Type: string
- - Required: false
+ rclone lsd mySia:
- ### Advanced options
+- List all the files in your Sia storage
- Here are the Advanced options specific to sia (Sia Decentralized Cloud).
+ rclone ls mySia:
- #### --sia-user-agent
+- Upload a local directory to the Sia directory called backup
- Siad User Agent
+ rclone copy /home/source mySia:backup
- Sia daemon requires the 'Sia-Agent' user agent by default for security
+Standard options
- Properties:
+Here are the Standard options specific to sia (Sia Decentralized Cloud).
- - Config: user_agent
- - Env Var: RCLONE_SIA_USER_AGENT
- - Type: string
- - Default: "Sia-Agent"
+--sia-api-url
- #### --sia-encoding
+Sia daemon API URL, like http://sia.daemon.host:9980.
- The encoding for the backend.
+Note that siad must run with --disable-api-security to open API port for
+other hosts (not recommended). Keep default if Sia daemon runs on
+localhost.
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+Properties:
- Properties:
+- Config: api_url
+- Env Var: RCLONE_SIA_API_URL
+- Type: string
+- Default: "http://127.0.0.1:9980"
- - Config: encoding
- - Env Var: RCLONE_SIA_ENCODING
- - Type: Encoding
- - Default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot
+--sia-api-password
+Sia Daemon API Password.
+Can be found in the apipassword file located in HOME/.sia/ or in the
+daemon directory.
- ## Limitations
+NB Input to this must be obscured - see rclone obscure.
- - Modification times not supported
- - Checksums not supported
- - `rclone about` not supported
- - rclone can work only with _Siad_ or _Sia-UI_ at the moment,
- the **SkyNet daemon is not supported yet.**
- - Sia does not allow control characters or symbols like question and pound
- signs in file names. rclone will transparently [encode](https://rclone.org/overview/#encoding)
- them for you, but you'd better be aware
+Properties:
- # Swift
+- Config: api_password
+- Env Var: RCLONE_SIA_API_PASSWORD
+- Type: string
+- Required: false
- Swift refers to [OpenStack Object Storage](https://docs.openstack.org/swift/latest/).
- Commercial implementations of that being:
+Advanced options
- * [Rackspace Cloud Files](https://www.rackspace.com/cloud/files/)
- * [Memset Memstore](https://www.memset.com/cloud/storage/)
- * [OVH Object Storage](https://www.ovh.co.uk/public-cloud/storage/object-storage/)
- * [Oracle Cloud Storage](https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html)
- * [Blomp Cloud Storage](https://www.blomp.com/cloud-storage/)
- * [IBM Bluemix Cloud ObjectStorage Swift](https://console.bluemix.net/docs/infrastructure/objectstorage-swift/index.html)
+Here are the Advanced options specific to sia (Sia Decentralized Cloud).
- Paths are specified as `remote:container` (or `remote:` for the `lsd`
- command.) You may put subdirectories in too, e.g. `remote:container/path/to/dir`.
+--sia-user-agent
- ## Configuration
+Siad User Agent
- Here is an example of making a swift configuration. First run
+Sia daemon requires the 'Sia-Agent' user agent by default for security
- rclone config
+Properties:
- This will guide you through an interactive setup process.
+- Config: user_agent
+- Env Var: RCLONE_SIA_USER_AGENT
+- Type: string
+- Default: "Sia-Agent"
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset
-Memstore, OVH) "swift" [snip] Storage> swift Get swift credentials from
-environment variables in standard OpenStack form. Choose a number from
-below, or type in your own value 1 / Enter swift credentials in the next
-step "false" 2 / Get swift credentials from environment vars. Leave
-other fields blank if using this. "true" env_auth> true User name to
-log in (OS_USERNAME). user> API key or password (OS_PASSWORD). key>
-Authentication URL for server (OS_AUTH_URL). Choose a number from below,
-or type in your own value 1 / Rackspace US
- "https://auth.api.rackspacecloud.com/v1.0" 2 / Rackspace UK
- "https://lon.auth.api.rackspacecloud.com/v1.0" 3 / Rackspace v2
- "https://identity.api.rackspacecloud.com/v2.0" 4 / Memset Memstore UK
- "https://auth.storage.memset.com/v1.0" 5 / Memset Memstore UK v2
- "https://auth.storage.memset.com/v2.0" 6 / OVH
- "https://auth.cloud.ovh.net/v3" 7 / Blomp Cloud Storage
- "https://authenticate.ain.net" auth> User ID to log in - optional -
-most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
-user_id> User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME) domain>
-Tenant name - optional for v1 auth, this or tenant_id required otherwise
-(OS_TENANT_NAME or OS_PROJECT_NAME) tenant> Tenant ID - optional for v1
-auth, this or tenant required otherwise (OS_TENANT_ID) tenant_id> Tenant
-domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME) tenant_domain>
-Region name - optional (OS_REGION_NAME) region> Storage URL - optional
-(OS_STORAGE_URL) storage_url> Auth Token from alternate authentication -
-optional (OS_AUTH_TOKEN) auth_token> AuthVersion - optional - set to
-(1,2,3) if your auth URL has no version (ST_AUTH_VERSION) auth_version>
-Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)
-Choose a number from below, or type in your own value 1 / Public
-(default, choose this if not sure) "public" 2 / Internal (use internal
-service net) "internal" 3 / Admin "admin" endpoint_type> Remote config
--------------------- [test] env_auth = true user = key = auth = user_id
-= domain = tenant = tenant_id = tenant_domain = region = storage_url =
-auth_token = auth_version = endpoint_type = -------------------- y) Yes
-this is OK e) Edit this remote d) Delete this remote y/e/d> y
+--sia-encoding
+The encoding for the backend.
- This remote is called `remote` and can now be used like this
+See the encoding section in the overview for more info.
- See all containers
+Properties:
- rclone lsd remote:
+- Config: encoding
+- Env Var: RCLONE_SIA_ENCODING
+- Type: Encoding
+- Default: Slash,Question,Hash,Percent,Del,Ctl,InvalidUtf8,Dot
- Make a new container
+--sia-description
- rclone mkdir remote:container
+Description of the remote
- List the contents of a container
+Properties:
- rclone ls remote:container
+- Config: description
+- Env Var: RCLONE_SIA_DESCRIPTION
+- Type: string
+- Required: false
- Sync `/home/local/directory` to the remote container, deleting any
- excess files in the container.
+Limitations
- rclone sync --interactive /home/local/directory remote:container
+- Modification times not supported
+- Checksums not supported
+- rclone about not supported
+- rclone can work only with Siad or Sia-UI at the moment, the SkyNet
+ daemon is not supported yet.
+- Sia does not allow control characters or symbols like question and
+ pound signs in file names. rclone will transparently encode them for
+ you, but you'd better be aware
- ### Configuration from an OpenStack credentials file
+Swift
- An OpenStack credentials file typically looks something something
- like this (without the comments)
+Swift refers to OpenStack Object Storage. Commercial implementations of
+that being:
-export OS_AUTH_URL=https://a.provider.net/v2.0 export
-OS_TENANT_ID=ffffffffffffffffffffffffffffffff export
-OS_TENANT_NAME="1234567890123456" export OS_USERNAME="123abc567xy" echo
-"Please enter your OpenStack Password: " read -sr OS_PASSWORD_INPUT
-export
-OS_PASSWORD=$OS_PASSWORD_INPUT export OS_REGION_NAME="SBG1" if [ -z "$OS_REGION_NAME"
-]; then unset OS_REGION_NAME; fi
+- Rackspace Cloud Files
+- Memset Memstore
+- OVH Object Storage
+- Oracle Cloud Storage
+- Blomp Cloud Storage
+- IBM Bluemix Cloud ObjectStorage Swift
+Paths are specified as remote:container (or remote: for the lsd
+command.) You may put subdirectories in too, e.g.
+remote:container/path/to/dir.
- The config file needs to look something like this where `$OS_USERNAME`
- represents the value of the `OS_USERNAME` variable - `123abc567xy` in
- the example above.
+Configuration
-[remote] type = swift user = $OS_USERNAME key = $OS_PASSWORD auth =
-$OS_AUTH_URL tenant = $OS_TENANT_NAME
+Here is an example of making a swift configuration. First run
+ rclone config
- Note that you may (or may not) need to set `region` too - try without first.
-
- ### Configuration from the environment
-
- If you prefer you can configure rclone to use swift using a standard
- set of OpenStack environment variables.
-
- When you run through the config, make sure you choose `true` for
- `env_auth` and leave everything else blank.
-
- rclone will then set any empty config parameters from the environment
- using standard OpenStack environment variables. There is [a list of
- the
- variables](https://godoc.org/github.com/ncw/swift#Connection.ApplyEnvironment)
- in the docs for the swift library.
-
- ### Using an alternate authentication method
-
- If your OpenStack installation uses a non-standard authentication method
- that might not be yet supported by rclone or the underlying swift library,
- you can authenticate externally (e.g. calling manually the `openstack`
- commands to get a token). Then, you just need to pass the two
- configuration variables ``auth_token`` and ``storage_url``.
- If they are both provided, the other variables are ignored. rclone will
- not try to authenticate but instead assume it is already authenticated
- and use these two variables to access the OpenStack installation.
-
- #### Using rclone without a config file
-
- You can use rclone with swift without a config file, if desired, like
- this:
-
-source openstack-credentials-file export
-RCLONE_CONFIG_MYREMOTE_TYPE=swift export
-RCLONE_CONFIG_MYREMOTE_ENV_AUTH=true rclone lsd myremote:
-
-
- ### --fast-list
-
- This remote supports `--fast-list` which allows you to use fewer
- transactions in exchange for more memory. See the [rclone
- docs](https://rclone.org/docs/#fast-list) for more details.
-
- ### --update and --use-server-modtime
-
- As noted below, the modified time is stored on metadata on the object. It is
- used by default for all operations that require checking the time a file was
- last updated. It allows rclone to treat the remote more like a true filesystem,
- but it is inefficient because it requires an extra API call to retrieve the
- metadata.
-
- For many operations, the time the object was last uploaded to the remote is
- sufficient to determine if it is "dirty". By using `--update` along with
- `--use-server-modtime`, you can avoid the extra API call and simply upload
- files whose local modtime is newer than the time it was last uploaded.
-
- ### Modification times and hashes
-
- The modified time is stored as metadata on the object as
- `X-Object-Meta-Mtime` as floating point since the epoch accurate to 1
- ns.
-
- This is a de facto standard (used in the official python-swiftclient
- amongst others) for storing the modification time for an object.
-
- The MD5 hash algorithm is supported.
-
- ### Restricted filename characters
-
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | NUL | 0x00 | ␀ |
- | / | 0x2F | / |
-
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
-
-
- ### Standard options
-
- Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
-
- #### --swift-env-auth
+This will guide you through an interactive setup process.
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)
+ \ "swift"
+ [snip]
+ Storage> swift
Get swift credentials from environment variables in standard OpenStack form.
-
- Properties:
-
- - Config: env_auth
- - Env Var: RCLONE_SWIFT_ENV_AUTH
- - Type: bool
- - Default: false
- - Examples:
- - "false"
- - Enter swift credentials in the next step.
- - "true"
- - Get swift credentials from environment vars.
- - Leave other fields blank if using this.
-
- #### --swift-user
-
+ Choose a number from below, or type in your own value
+ 1 / Enter swift credentials in the next step
+ \ "false"
+ 2 / Get swift credentials from environment vars. Leave other fields blank if using this.
+ \ "true"
+ env_auth> true
User name to log in (OS_USERNAME).
-
- Properties:
-
- - Config: user
- - Env Var: RCLONE_SWIFT_USER
- - Type: string
- - Required: false
-
- #### --swift-key
-
+ user>
API key or password (OS_PASSWORD).
-
- Properties:
-
- - Config: key
- - Env Var: RCLONE_SWIFT_KEY
- - Type: string
- - Required: false
-
- #### --swift-auth
-
+ key>
Authentication URL for server (OS_AUTH_URL).
-
- Properties:
-
- - Config: auth
- - Env Var: RCLONE_SWIFT_AUTH
- - Type: string
- - Required: false
- - Examples:
- - "https://auth.api.rackspacecloud.com/v1.0"
- - Rackspace US
- - "https://lon.auth.api.rackspacecloud.com/v1.0"
- - Rackspace UK
- - "https://identity.api.rackspacecloud.com/v2.0"
- - Rackspace v2
- - "https://auth.storage.memset.com/v1.0"
- - Memset Memstore UK
- - "https://auth.storage.memset.com/v2.0"
- - Memset Memstore UK v2
- - "https://auth.cloud.ovh.net/v3"
- - OVH
- - "https://authenticate.ain.net"
- - Blomp Cloud Storage
-
- #### --swift-user-id
-
+ Choose a number from below, or type in your own value
+ 1 / Rackspace US
+ \ "https://auth.api.rackspacecloud.com/v1.0"
+ 2 / Rackspace UK
+ \ "https://lon.auth.api.rackspacecloud.com/v1.0"
+ 3 / Rackspace v2
+ \ "https://identity.api.rackspacecloud.com/v2.0"
+ 4 / Memset Memstore UK
+ \ "https://auth.storage.memset.com/v1.0"
+ 5 / Memset Memstore UK v2
+ \ "https://auth.storage.memset.com/v2.0"
+ 6 / OVH
+ \ "https://auth.cloud.ovh.net/v3"
+ 7 / Blomp Cloud Storage
+ \ "https://authenticate.ain.net"
+ auth>
User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).
-
- Properties:
-
- - Config: user_id
- - Env Var: RCLONE_SWIFT_USER_ID
- - Type: string
- - Required: false
-
- #### --swift-domain
-
+ user_id>
User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
+ domain>
+ Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)
+ tenant>
+ Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)
+ tenant_id>
+ Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)
+ tenant_domain>
+ Region name - optional (OS_REGION_NAME)
+ region>
+ Storage URL - optional (OS_STORAGE_URL)
+ storage_url>
+ Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)
+ auth_token>
+ AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)
+ auth_version>
+ Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)
+ Choose a number from below, or type in your own value
+ 1 / Public (default, choose this if not sure)
+ \ "public"
+ 2 / Internal (use internal service net)
+ \ "internal"
+ 3 / Admin
+ \ "admin"
+ endpoint_type>
+ Remote config
+ --------------------
+ [test]
+ env_auth = true
+ user =
+ key =
+ auth =
+ user_id =
+ domain =
+ tenant =
+ tenant_id =
+ tenant_domain =
+ region =
+ storage_url =
+ auth_token =
+ auth_version =
+ endpoint_type =
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+This remote is called remote and can now be used like this
+
+See all containers
+
+ rclone lsd remote:
+
+Make a new container
+
+ rclone mkdir remote:container
+
+List the contents of a container
+
+ rclone ls remote:container
+
+Sync /home/local/directory to the remote container, deleting any excess
+files in the container.
+
+ rclone sync --interactive /home/local/directory remote:container
+
+Configuration from an OpenStack credentials file
+
+An OpenStack credentials file typically looks something something like
+this (without the comments)
+
+ export OS_AUTH_URL=https://a.provider.net/v2.0
+ export OS_TENANT_ID=ffffffffffffffffffffffffffffffff
+ export OS_TENANT_NAME="1234567890123456"
+ export OS_USERNAME="123abc567xy"
+ echo "Please enter your OpenStack Password: "
+ read -sr OS_PASSWORD_INPUT
+ export OS_PASSWORD=$OS_PASSWORD_INPUT
+ export OS_REGION_NAME="SBG1"
+ if [ -z "$OS_REGION_NAME" ]; then unset OS_REGION_NAME; fi
+
+The config file needs to look something like this where $OS_USERNAME
+represents the value of the OS_USERNAME variable - 123abc567xy in the
+example above.
+
+ [remote]
+ type = swift
+ user = $OS_USERNAME
+ key = $OS_PASSWORD
+ auth = $OS_AUTH_URL
+ tenant = $OS_TENANT_NAME
+
+Note that you may (or may not) need to set region too - try without
+first.
+
+Configuration from the environment
+
+If you prefer you can configure rclone to use swift using a standard set
+of OpenStack environment variables.
+
+When you run through the config, make sure you choose true for env_auth
+and leave everything else blank.
+
+rclone will then set any empty config parameters from the environment
+using standard OpenStack environment variables. There is a list of the
+variables in the docs for the swift library.
+
+Using an alternate authentication method
- Properties:
+If your OpenStack installation uses a non-standard authentication method
+that might not be yet supported by rclone or the underlying swift
+library, you can authenticate externally (e.g. calling manually the
+openstack commands to get a token). Then, you just need to pass the two
+configuration variables auth_token and storage_url. If they are both
+provided, the other variables are ignored. rclone will not try to
+authenticate but instead assume it is already authenticated and use
+these two variables to access the OpenStack installation.
- - Config: domain
- - Env Var: RCLONE_SWIFT_DOMAIN
- - Type: string
- - Required: false
+Using rclone without a config file
- #### --swift-tenant
+You can use rclone with swift without a config file, if desired, like
+this:
- Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).
+ source openstack-credentials-file
+ export RCLONE_CONFIG_MYREMOTE_TYPE=swift
+ export RCLONE_CONFIG_MYREMOTE_ENV_AUTH=true
+ rclone lsd myremote:
- Properties:
+--fast-list
- - Config: tenant
- - Env Var: RCLONE_SWIFT_TENANT
- - Type: string
- - Required: false
+This remote supports --fast-list which allows you to use fewer
+transactions in exchange for more memory. See the rclone docs for more
+details.
- #### --swift-tenant-id
+--update and --use-server-modtime
- Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).
+As noted below, the modified time is stored on metadata on the object.
+It is used by default for all operations that require checking the time
+a file was last updated. It allows rclone to treat the remote more like
+a true filesystem, but it is inefficient because it requires an extra
+API call to retrieve the metadata.
- Properties:
+For many operations, the time the object was last uploaded to the remote
+is sufficient to determine if it is "dirty". By using --update along
+with --use-server-modtime, you can avoid the extra API call and simply
+upload files whose local modtime is newer than the time it was last
+uploaded.
- - Config: tenant_id
- - Env Var: RCLONE_SWIFT_TENANT_ID
- - Type: string
- - Required: false
+Modification times and hashes
- #### --swift-tenant-domain
+The modified time is stored as metadata on the object as
+X-Object-Meta-Mtime as floating point since the epoch accurate to 1 ns.
- Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).
+This is a de facto standard (used in the official python-swiftclient
+amongst others) for storing the modification time for an object.
- Properties:
+The MD5 hash algorithm is supported.
- - Config: tenant_domain
- - Env Var: RCLONE_SWIFT_TENANT_DOMAIN
- - Type: string
- - Required: false
+Restricted filename characters
- #### --swift-region
+ Character Value Replacement
+ ----------- ------- -------------
+ NUL 0x00 ␀
+ / 0x2F /
- Region name - optional (OS_REGION_NAME).
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
- Properties:
+Standard options
- - Config: region
- - Env Var: RCLONE_SWIFT_REGION
- - Type: string
- - Required: false
+Here are the Standard options specific to swift (OpenStack Swift
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
- #### --swift-storage-url
+--swift-env-auth
- Storage URL - optional (OS_STORAGE_URL).
+Get swift credentials from environment variables in standard OpenStack
+form.
- Properties:
+Properties:
- - Config: storage_url
- - Env Var: RCLONE_SWIFT_STORAGE_URL
- - Type: string
- - Required: false
+- Config: env_auth
+- Env Var: RCLONE_SWIFT_ENV_AUTH
+- Type: bool
+- Default: false
+- Examples:
+ - "false"
+ - Enter swift credentials in the next step.
+ - "true"
+ - Get swift credentials from environment vars.
+ - Leave other fields blank if using this.
- #### --swift-auth-token
+--swift-user
- Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).
+User name to log in (OS_USERNAME).
- Properties:
+Properties:
- - Config: auth_token
- - Env Var: RCLONE_SWIFT_AUTH_TOKEN
- - Type: string
- - Required: false
+- Config: user
+- Env Var: RCLONE_SWIFT_USER
+- Type: string
+- Required: false
- #### --swift-application-credential-id
+--swift-key
- Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).
+API key or password (OS_PASSWORD).
- Properties:
+Properties:
- - Config: application_credential_id
- - Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_ID
- - Type: string
- - Required: false
+- Config: key
+- Env Var: RCLONE_SWIFT_KEY
+- Type: string
+- Required: false
- #### --swift-application-credential-name
+--swift-auth
- Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).
+Authentication URL for server (OS_AUTH_URL).
- Properties:
+Properties:
- - Config: application_credential_name
- - Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_NAME
- - Type: string
- - Required: false
+- Config: auth
+- Env Var: RCLONE_SWIFT_AUTH
+- Type: string
+- Required: false
+- Examples:
+ - "https://auth.api.rackspacecloud.com/v1.0"
+ - Rackspace US
+ - "https://lon.auth.api.rackspacecloud.com/v1.0"
+ - Rackspace UK
+ - "https://identity.api.rackspacecloud.com/v2.0"
+ - Rackspace v2
+ - "https://auth.storage.memset.com/v1.0"
+ - Memset Memstore UK
+ - "https://auth.storage.memset.com/v2.0"
+ - Memset Memstore UK v2
+ - "https://auth.cloud.ovh.net/v3"
+ - OVH
+ - "https://authenticate.ain.net"
+ - Blomp Cloud Storage
- #### --swift-application-credential-secret
+--swift-user-id
- Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).
+User ID to log in - optional - most swift systems use user and leave
+this blank (v3 auth) (OS_USER_ID).
- Properties:
+Properties:
- - Config: application_credential_secret
- - Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_SECRET
- - Type: string
- - Required: false
+- Config: user_id
+- Env Var: RCLONE_SWIFT_USER_ID
+- Type: string
+- Required: false
- #### --swift-auth-version
+--swift-domain
- AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).
+User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)
- Properties:
+Properties:
- - Config: auth_version
- - Env Var: RCLONE_SWIFT_AUTH_VERSION
- - Type: int
- - Default: 0
+- Config: domain
+- Env Var: RCLONE_SWIFT_DOMAIN
+- Type: string
+- Required: false
- #### --swift-endpoint-type
+--swift-tenant
- Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).
+Tenant name - optional for v1 auth, this or tenant_id required otherwise
+(OS_TENANT_NAME or OS_PROJECT_NAME).
- Properties:
+Properties:
- - Config: endpoint_type
- - Env Var: RCLONE_SWIFT_ENDPOINT_TYPE
- - Type: string
- - Default: "public"
- - Examples:
- - "public"
- - Public (default, choose this if not sure)
- - "internal"
- - Internal (use internal service net)
- - "admin"
- - Admin
+- Config: tenant
+- Env Var: RCLONE_SWIFT_TENANT
+- Type: string
+- Required: false
- #### --swift-storage-policy
+--swift-tenant-id
- The storage policy to use when creating a new container.
+Tenant ID - optional for v1 auth, this or tenant required otherwise
+(OS_TENANT_ID).
- This applies the specified storage policy when creating a new
- container. The policy cannot be changed afterwards. The allowed
- configuration values and their meaning depend on your Swift storage
- provider.
+Properties:
- Properties:
+- Config: tenant_id
+- Env Var: RCLONE_SWIFT_TENANT_ID
+- Type: string
+- Required: false
- - Config: storage_policy
- - Env Var: RCLONE_SWIFT_STORAGE_POLICY
- - Type: string
- - Required: false
- - Examples:
- - ""
- - Default
- - "pcs"
- - OVH Public Cloud Storage
- - "pca"
- - OVH Public Cloud Archive
+--swift-tenant-domain
- ### Advanced options
+Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).
- Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
+Properties:
- #### --swift-leave-parts-on-error
+- Config: tenant_domain
+- Env Var: RCLONE_SWIFT_TENANT_DOMAIN
+- Type: string
+- Required: false
- If true avoid calling abort upload on a failure.
+--swift-region
- It should be set to true for resuming uploads across different sessions.
+Region name - optional (OS_REGION_NAME).
- Properties:
+Properties:
- - Config: leave_parts_on_error
- - Env Var: RCLONE_SWIFT_LEAVE_PARTS_ON_ERROR
- - Type: bool
- - Default: false
+- Config: region
+- Env Var: RCLONE_SWIFT_REGION
+- Type: string
+- Required: false
- #### --swift-chunk-size
+--swift-storage-url
- Above this size files will be chunked into a _segments container.
+Storage URL - optional (OS_STORAGE_URL).
- Above this size files will be chunked into a _segments container. The
- default for this is 5 GiB which is its maximum value.
+Properties:
- Properties:
+- Config: storage_url
+- Env Var: RCLONE_SWIFT_STORAGE_URL
+- Type: string
+- Required: false
- - Config: chunk_size
- - Env Var: RCLONE_SWIFT_CHUNK_SIZE
- - Type: SizeSuffix
- - Default: 5Gi
+--swift-auth-token
- #### --swift-no-chunk
+Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).
- Don't chunk files during streaming upload.
+Properties:
- When doing streaming uploads (e.g. using rcat or mount) setting this
- flag will cause the swift backend to not upload chunked files.
+- Config: auth_token
+- Env Var: RCLONE_SWIFT_AUTH_TOKEN
+- Type: string
+- Required: false
- This will limit the maximum upload size to 5 GiB. However non chunked
- files are easier to deal with and have an MD5SUM.
+--swift-application-credential-id
- Rclone will still chunk files bigger than chunk_size when doing normal
- copy operations.
+Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).
- Properties:
+Properties:
- - Config: no_chunk
- - Env Var: RCLONE_SWIFT_NO_CHUNK
- - Type: bool
- - Default: false
+- Config: application_credential_id
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_ID
+- Type: string
+- Required: false
- #### --swift-no-large-objects
+--swift-application-credential-name
- Disable support for static and dynamic large objects
+Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).
- Swift cannot transparently store files bigger than 5 GiB. There are
- two schemes for doing that, static or dynamic large objects, and the
- API does not allow rclone to determine whether a file is a static or
- dynamic large object without doing a HEAD on the object. Since these
- need to be treated differently, this means rclone has to issue HEAD
- requests for objects for example when reading checksums.
+Properties:
- When `no_large_objects` is set, rclone will assume that there are no
- static or dynamic large objects stored. This means it can stop doing
- the extra HEAD calls which in turn increases performance greatly
- especially when doing a swift to swift transfer with `--checksum` set.
+- Config: application_credential_name
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_NAME
+- Type: string
+- Required: false
- Setting this option implies `no_chunk` and also that no files will be
- uploaded in chunks, so files bigger than 5 GiB will just fail on
- upload.
+--swift-application-credential-secret
- If you set this option and there *are* static or dynamic large objects,
- then this will give incorrect hashes for them. Downloads will succeed,
- but other operations such as Remove and Copy will fail.
+Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).
+Properties:
- Properties:
+- Config: application_credential_secret
+- Env Var: RCLONE_SWIFT_APPLICATION_CREDENTIAL_SECRET
+- Type: string
+- Required: false
- - Config: no_large_objects
- - Env Var: RCLONE_SWIFT_NO_LARGE_OBJECTS
- - Type: bool
- - Default: false
+--swift-auth-version
- #### --swift-encoding
+AuthVersion - optional - set to (1,2,3) if your auth URL has no version
+(ST_AUTH_VERSION).
- The encoding for the backend.
+Properties:
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+- Config: auth_version
+- Env Var: RCLONE_SWIFT_AUTH_VERSION
+- Type: int
+- Default: 0
- Properties:
+--swift-endpoint-type
- - Config: encoding
- - Env Var: RCLONE_SWIFT_ENCODING
- - Type: Encoding
- - Default: Slash,InvalidUtf8
+Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).
+Properties:
+- Config: endpoint_type
+- Env Var: RCLONE_SWIFT_ENDPOINT_TYPE
+- Type: string
+- Default: "public"
+- Examples:
+ - "public"
+ - Public (default, choose this if not sure)
+ - "internal"
+ - Internal (use internal service net)
+ - "admin"
+ - Admin
- ## Limitations
+--swift-storage-policy
- The Swift API doesn't return a correct MD5SUM for segmented files
- (Dynamic or Static Large Objects) so rclone won't check or use the
- MD5SUM for these.
+The storage policy to use when creating a new container.
- ## Troubleshooting
+This applies the specified storage policy when creating a new container.
+The policy cannot be changed afterwards. The allowed configuration
+values and their meaning depend on your Swift storage provider.
- ### Rclone gives Failed to create file system for "remote:": Bad Request
+Properties:
- Due to an oddity of the underlying swift library, it gives a "Bad
- Request" error rather than a more sensible error when the
- authentication fails for Swift.
+- Config: storage_policy
+- Env Var: RCLONE_SWIFT_STORAGE_POLICY
+- Type: string
+- Required: false
+- Examples:
+ - ""
+ - Default
+ - "pcs"
+ - OVH Public Cloud Storage
+ - "pca"
+ - OVH Public Cloud Archive
- So this most likely means your username / password is wrong. You can
- investigate further with the `--dump-bodies` flag.
+Advanced options
- This may also be caused by specifying the region when you shouldn't
- have (e.g. OVH).
+Here are the Advanced options specific to swift (OpenStack Swift
+(Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).
- ### Rclone gives Failed to create file system: Response didn't have storage url and auth token
+--swift-leave-parts-on-error
- This is most likely caused by forgetting to specify your tenant when
- setting up a swift remote.
+If true avoid calling abort upload on a failure.
- ## OVH Cloud Archive
+It should be set to true for resuming uploads across different sessions.
- To use rclone with OVH cloud archive, first use `rclone config` to set up a `swift` backend with OVH, choosing `pca` as the `storage_policy`.
+Properties:
- ### Uploading Objects
+- Config: leave_parts_on_error
+- Env Var: RCLONE_SWIFT_LEAVE_PARTS_ON_ERROR
+- Type: bool
+- Default: false
- Uploading objects to OVH cloud archive is no different to object storage, you just simply run the command you like (move, copy or sync) to upload the objects. Once uploaded the objects will show in a "Frozen" state within the OVH control panel.
+--swift-chunk-size
- ### Retrieving Objects
+Above this size files will be chunked into a _segments container.
- To retrieve objects use `rclone copy` as normal. If the objects are in a frozen state then rclone will ask for them all to be unfrozen and it will wait at the end of the output with a message like the following:
+Above this size files will be chunked into a _segments container. The
+default for this is 5 GiB which is its maximum value.
- `2019/03/23 13:06:33 NOTICE: Received retry after error - sleeping until 2019-03-23T13:16:33.481657164+01:00 (9m59.99985121s)`
+Properties:
- Rclone will wait for the time specified then retry the copy.
+- Config: chunk_size
+- Env Var: RCLONE_SWIFT_CHUNK_SIZE
+- Type: SizeSuffix
+- Default: 5Gi
- # pCloud
+--swift-no-chunk
- Paths are specified as `remote:path`
+Don't chunk files during streaming upload.
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
+When doing streaming uploads (e.g. using rcat or mount) setting this
+flag will cause the swift backend to not upload chunked files.
- ## Configuration
+This will limit the maximum upload size to 5 GiB. However non chunked
+files are easier to deal with and have an MD5SUM.
- The initial setup for pCloud involves getting a token from pCloud which you
- need to do in your browser. `rclone config` walks you through it.
+Rclone will still chunk files bigger than chunk_size when doing normal
+copy operations.
- Here is an example of how to make a remote called `remote`. First run:
+Properties:
- rclone config
+- Config: no_chunk
+- Env Var: RCLONE_SWIFT_NO_CHUNK
+- Type: bool
+- Default: false
- This will guide you through an interactive setup process:
+--swift-no-large-objects
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / Pcloud "pcloud" [snip] Storage> pcloud Pcloud App Client Id -
-leave blank normally. client_id> Pcloud App Client Secret - leave blank
-normally. client_secret> Remote config Use web browser to automatically
-authenticate rclone with remote? * Say Y if the machine running rclone
-has a web browser you can use * Say N if running rclone on a (remote)
-machine without web browser access If not sure try Y. If Y failed, try
-N. y) Yes n) No y/n> y If your browser doesn't open automatically go to
-the following link: http://127.0.0.1:53682/auth Log in and authorize
-rclone for access Waiting for code... Got code --------------------
-[remote] client_id = client_secret = token =
-{"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d> y
+Disable support for static and dynamic large objects
+Swift cannot transparently store files bigger than 5 GiB. There are two
+schemes for doing that, static or dynamic large objects, and the API
+does not allow rclone to determine whether a file is a static or dynamic
+large object without doing a HEAD on the object. Since these need to be
+treated differently, this means rclone has to issue HEAD requests for
+objects for example when reading checksums.
- See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
- machine with no Internet browser available.
+When no_large_objects is set, rclone will assume that there are no
+static or dynamic large objects stored. This means it can stop doing the
+extra HEAD calls which in turn increases performance greatly especially
+when doing a swift to swift transfer with --checksum set.
- Note that rclone runs a webserver on your local machine to collect the
- token as returned from pCloud. This only runs from the moment it opens
- your browser to the moment you get back the verification code. This
- is on `http://127.0.0.1:53682/` and this it may require you to unblock
- it temporarily if you are running a host firewall.
+Setting this option implies no_chunk and also that no files will be
+uploaded in chunks, so files bigger than 5 GiB will just fail on upload.
- Once configured you can then use `rclone` like this,
+If you set this option and there are static or dynamic large objects,
+then this will give incorrect hashes for them. Downloads will succeed,
+but other operations such as Remove and Copy will fail.
- List directories in top level of your pCloud
+Properties:
- rclone lsd remote:
+- Config: no_large_objects
+- Env Var: RCLONE_SWIFT_NO_LARGE_OBJECTS
+- Type: bool
+- Default: false
- List all the files in your pCloud
+--swift-encoding
- rclone ls remote:
+The encoding for the backend.
- To copy a local directory to a pCloud directory called backup
+See the encoding section in the overview for more info.
- rclone copy /home/source remote:backup
+Properties:
- ### Modification times and hashes
+- Config: encoding
+- Env Var: RCLONE_SWIFT_ENCODING
+- Type: Encoding
+- Default: Slash,InvalidUtf8
- pCloud allows modification times to be set on objects accurate to 1
- second. These will be used to detect whether objects need syncing or
- not. In order to set a Modification time pCloud requires the object
- be re-uploaded.
+--swift-description
- pCloud supports MD5 and SHA1 hashes in the US region, and SHA1 and SHA256
- hashes in the EU region, so you can use the `--checksum` flag.
+Description of the remote
- ### Restricted filename characters
+Properties:
- In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
- the following characters are also replaced:
+- Config: description
+- Env Var: RCLONE_SWIFT_DESCRIPTION
+- Type: string
+- Required: false
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | \ | 0x5C | \ |
+Limitations
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
+The Swift API doesn't return a correct MD5SUM for segmented files
+(Dynamic or Static Large Objects) so rclone won't check or use the
+MD5SUM for these.
- ### Deleting files
+Troubleshooting
- Deleted files will be moved to the trash. Your subscription level
- will determine how long items stay in the trash. `rclone cleanup` can
- be used to empty the trash.
+Rclone gives Failed to create file system for "remote:": Bad Request
- ### Emptying the trash
+Due to an oddity of the underlying swift library, it gives a "Bad
+Request" error rather than a more sensible error when the authentication
+fails for Swift.
- Due to an API limitation, the `rclone cleanup` command will only work if you
- set your username and password in the advanced options for this backend.
- Since we generally want to avoid storing user passwords in the rclone config
- file, we advise you to only set this up if you need the `rclone cleanup` command to work.
+So this most likely means your username / password is wrong. You can
+investigate further with the --dump-bodies flag.
- ### Root folder ID
+This may also be caused by specifying the region when you shouldn't have
+(e.g. OVH).
- You can set the `root_folder_id` for rclone. This is the directory
- (identified by its `Folder ID`) that rclone considers to be the root
- of your pCloud drive.
+Rclone gives Failed to create file system: Response didn't have storage url and auth token
- Normally you will leave this blank and rclone will determine the
- correct root to use itself.
+This is most likely caused by forgetting to specify your tenant when
+setting up a swift remote.
- However you can set this to restrict rclone to a specific folder
- hierarchy.
+OVH Cloud Archive
- In order to do this you will have to find the `Folder ID` of the
- directory you wish rclone to display. This will be the `folder` field
- of the URL when you open the relevant folder in the pCloud web
- interface.
+To use rclone with OVH cloud archive, first use rclone config to set up
+a swift backend with OVH, choosing pca as the storage_policy.
- So if the folder you want rclone to use has a URL which looks like
- `https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid`
- in the browser, then you use `5xxxxxxxx8` as
- the `root_folder_id` in the config.
+Uploading Objects
+Uploading objects to OVH cloud archive is no different to object
+storage, you just simply run the command you like (move, copy or sync)
+to upload the objects. Once uploaded the objects will show in a "Frozen"
+state within the OVH control panel.
- ### Standard options
+Retrieving Objects
- Here are the Standard options specific to pcloud (Pcloud).
+To retrieve objects use rclone copy as normal. If the objects are in a
+frozen state then rclone will ask for them all to be unfrozen and it
+will wait at the end of the output with a message like the following:
- #### --pcloud-client-id
+2019/03/23 13:06:33 NOTICE: Received retry after error - sleeping until 2019-03-23T13:16:33.481657164+01:00 (9m59.99985121s)
- OAuth Client Id.
+Rclone will wait for the time specified then retry the copy.
- Leave blank normally.
+pCloud
- Properties:
+Paths are specified as remote:path
- - Config: client_id
- - Env Var: RCLONE_PCLOUD_CLIENT_ID
- - Type: string
- - Required: false
+Paths may be as deep as required, e.g. remote:directory/subdirectory.
- #### --pcloud-client-secret
+Configuration
- OAuth Client Secret.
+The initial setup for pCloud involves getting a token from pCloud which
+you need to do in your browser. rclone config walks you through it.
- Leave blank normally.
+Here is an example of how to make a remote called remote. First run:
- Properties:
+ rclone config
- - Config: client_secret
- - Env Var: RCLONE_PCLOUD_CLIENT_SECRET
- - Type: string
- - Required: false
+This will guide you through an interactive setup process:
- ### Advanced options
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Pcloud
+ \ "pcloud"
+ [snip]
+ Storage> pcloud
+ Pcloud App Client Id - leave blank normally.
+ client_id>
+ Pcloud App Client Secret - leave blank normally.
+ client_secret>
+ Remote config
+ Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+ If not sure try Y. If Y failed, try N.
+ y) Yes
+ n) No
+ y/n> y
+ If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+ Log in and authorize rclone for access
+ Waiting for code...
+ Got code
+ --------------------
+ [remote]
+ client_id =
+ client_secret =
+ token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
- Here are the Advanced options specific to pcloud (Pcloud).
+See the remote setup docs for how to set it up on a machine with no
+Internet browser available.
- #### --pcloud-token
+Note that rclone runs a webserver on your local machine to collect the
+token as returned from pCloud. This only runs from the moment it opens
+your browser to the moment you get back the verification code. This is
+on http://127.0.0.1:53682/ and this it may require you to unblock it
+temporarily if you are running a host firewall.
- OAuth Access Token as a JSON blob.
+Once configured you can then use rclone like this,
- Properties:
+List directories in top level of your pCloud
- - Config: token
- - Env Var: RCLONE_PCLOUD_TOKEN
- - Type: string
- - Required: false
+ rclone lsd remote:
- #### --pcloud-auth-url
+List all the files in your pCloud
- Auth server URL.
+ rclone ls remote:
- Leave blank to use the provider defaults.
+To copy a local directory to a pCloud directory called backup
- Properties:
+ rclone copy /home/source remote:backup
- - Config: auth_url
- - Env Var: RCLONE_PCLOUD_AUTH_URL
- - Type: string
- - Required: false
+Modification times and hashes
- #### --pcloud-token-url
+pCloud allows modification times to be set on objects accurate to 1
+second. These will be used to detect whether objects need syncing or
+not. In order to set a Modification time pCloud requires the object be
+re-uploaded.
- Token server url.
+pCloud supports MD5 and SHA1 hashes in the US region, and SHA1 and
+SHA256 hashes in the EU region, so you can use the --checksum flag.
- Leave blank to use the provider defaults.
+Restricted filename characters
- Properties:
+In addition to the default restricted characters set the following
+characters are also replaced:
- - Config: token_url
- - Env Var: RCLONE_PCLOUD_TOKEN_URL
- - Type: string
- - Required: false
+ Character Value Replacement
+ ----------- ------- -------------
+ \ 0x5C \
- #### --pcloud-encoding
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
- The encoding for the backend.
+Deleting files
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+Deleted files will be moved to the trash. Your subscription level will
+determine how long items stay in the trash. rclone cleanup can be used
+to empty the trash.
- Properties:
+Emptying the trash
- - Config: encoding
- - Env Var: RCLONE_PCLOUD_ENCODING
- - Type: Encoding
- - Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+Due to an API limitation, the rclone cleanup command will only work if
+you set your username and password in the advanced options for this
+backend. Since we generally want to avoid storing user passwords in the
+rclone config file, we advise you to only set this up if you need the
+rclone cleanup command to work.
- #### --pcloud-root-folder-id
+Root folder ID
- Fill in for rclone to use a non root folder as its starting point.
+You can set the root_folder_id for rclone. This is the directory
+(identified by its Folder ID) that rclone considers to be the root of
+your pCloud drive.
- Properties:
+Normally you will leave this blank and rclone will determine the correct
+root to use itself.
- - Config: root_folder_id
- - Env Var: RCLONE_PCLOUD_ROOT_FOLDER_ID
- - Type: string
- - Default: "d0"
+However you can set this to restrict rclone to a specific folder
+hierarchy.
- #### --pcloud-hostname
+In order to do this you will have to find the Folder ID of the directory
+you wish rclone to display. This will be the folder field of the URL
+when you open the relevant folder in the pCloud web interface.
- Hostname to connect to.
+So if the folder you want rclone to use has a URL which looks like
+https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid
+in the browser, then you use 5xxxxxxxx8 as the root_folder_id in the
+config.
- This is normally set when rclone initially does the oauth connection,
- however you will need to set it by hand if you are using remote config
- with rclone authorize.
+Standard options
+Here are the Standard options specific to pcloud (Pcloud).
- Properties:
+--pcloud-client-id
- - Config: hostname
- - Env Var: RCLONE_PCLOUD_HOSTNAME
- - Type: string
- - Default: "api.pcloud.com"
- - Examples:
- - "api.pcloud.com"
- - Original/US region
- - "eapi.pcloud.com"
- - EU region
+OAuth Client Id.
- #### --pcloud-username
+Leave blank normally.
- Your pcloud username.
-
- This is only required when you want to use the cleanup command. Due to a bug
- in the pcloud API the required API does not support OAuth authentication so
- we have to rely on user password authentication for it.
+Properties:
- Properties:
+- Config: client_id
+- Env Var: RCLONE_PCLOUD_CLIENT_ID
+- Type: string
+- Required: false
- - Config: username
- - Env Var: RCLONE_PCLOUD_USERNAME
- - Type: string
- - Required: false
+--pcloud-client-secret
- #### --pcloud-password
+OAuth Client Secret.
- Your pcloud password.
+Leave blank normally.
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+Properties:
- Properties:
+- Config: client_secret
+- Env Var: RCLONE_PCLOUD_CLIENT_SECRET
+- Type: string
+- Required: false
- - Config: password
- - Env Var: RCLONE_PCLOUD_PASSWORD
- - Type: string
- - Required: false
+Advanced options
+Here are the Advanced options specific to pcloud (Pcloud).
+--pcloud-token
- # PikPak
+OAuth Access Token as a JSON blob.
- PikPak is [a private cloud drive](https://mypikpak.com/).
+Properties:
- Paths are specified as `remote:path`, and may be as deep as required, e.g. `remote:directory/subdirectory`.
+- Config: token
+- Env Var: RCLONE_PCLOUD_TOKEN
+- Type: string
+- Required: false
- ## Configuration
+--pcloud-auth-url
- Here is an example of making a remote for PikPak.
+Auth server URL.
- First run:
+Leave blank to use the provider defaults.
- rclone config
+Properties:
- This will guide you through an interactive setup process:
+- Config: auth_url
+- Env Var: RCLONE_PCLOUD_AUTH_URL
+- Type: string
+- Required: false
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n
+--pcloud-token-url
-Enter name for new remote. name> remote
+Token server url.
-Option Storage. Type of storage to configure. Choose a number from
-below, or type in your own value. XX / PikPak (pikpak) Storage> XX
+Leave blank to use the provider defaults.
-Option user. Pikpak username. Enter a value. user> USERNAME
+Properties:
-Option pass. Pikpak password. Choose an alternative below. y) Yes, type
-in my own password g) Generate random password y/g> y Enter the
-password: password: Confirm the password: password:
+- Config: token_url
+- Env Var: RCLONE_PCLOUD_TOKEN_URL
+- Type: string
+- Required: false
-Edit advanced config? y) Yes n) No (default) y/n>
+--pcloud-encoding
-Configuration complete. Options: - type: pikpak - user: USERNAME - pass:
-*** ENCRYPTED *** - token:
-{"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
-Keep this "remote" remote? y) Yes this is OK (default) e) Edit this
-remote d) Delete this remote y/e/d> y
+The encoding for the backend.
+See the encoding section in the overview for more info.
- ### Modification times and hashes
+Properties:
- PikPak keeps modification times on objects, and updates them when uploading objects,
- but it does not support changing only the modification time
+- Config: encoding
+- Env Var: RCLONE_PCLOUD_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
- The MD5 hash algorithm is supported.
+--pcloud-root-folder-id
+Fill in for rclone to use a non root folder as its starting point.
- ### Standard options
+Properties:
- Here are the Standard options specific to pikpak (PikPak).
+- Config: root_folder_id
+- Env Var: RCLONE_PCLOUD_ROOT_FOLDER_ID
+- Type: string
+- Default: "d0"
- #### --pikpak-user
+--pcloud-hostname
+Hostname to connect to.
+
+This is normally set when rclone initially does the oauth connection,
+however you will need to set it by hand if you are using remote config
+with rclone authorize.
+
+Properties:
+
+- Config: hostname
+- Env Var: RCLONE_PCLOUD_HOSTNAME
+- Type: string
+- Default: "api.pcloud.com"
+- Examples:
+ - "api.pcloud.com"
+ - Original/US region
+ - "eapi.pcloud.com"
+ - EU region
+
+--pcloud-username
+
+Your pcloud username.
+
+This is only required when you want to use the cleanup command. Due to a
+bug in the pcloud API the required API does not support OAuth
+authentication so we have to rely on user password authentication for
+it.
+
+Properties:
+
+- Config: username
+- Env Var: RCLONE_PCLOUD_USERNAME
+- Type: string
+- Required: false
+
+--pcloud-password
+
+Your pcloud password.
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: password
+- Env Var: RCLONE_PCLOUD_PASSWORD
+- Type: string
+- Required: false
+
+--pcloud-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PCLOUD_DESCRIPTION
+- Type: string
+- Required: false
+
+PikPak
+
+PikPak is a private cloud drive.
+
+Paths are specified as remote:path, and may be as deep as required, e.g.
+remote:directory/subdirectory.
+
+Configuration
+
+Here is an example of making a remote for PikPak.
+
+First run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+
+ Enter name for new remote.
+ name> remote
+
+ Option Storage.
+ Type of storage to configure.
+ Choose a number from below, or type in your own value.
+ XX / PikPak
+ \ (pikpak)
+ Storage> XX
+
+ Option user.
Pikpak username.
+ Enter a value.
+ user> USERNAME
- Properties:
-
- - Config: user
- - Env Var: RCLONE_PIKPAK_USER
- - Type: string
- - Required: true
-
- #### --pikpak-pass
-
+ Option pass.
Pikpak password.
+ Choose an alternative below.
+ y) Yes, type in my own password
+ g) Generate random password
+ y/g> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+ Edit advanced config?
+ y) Yes
+ n) No (default)
+ y/n>
- Properties:
+ Configuration complete.
+ Options:
+ - type: pikpak
+ - user: USERNAME
+ - pass: *** ENCRYPTED ***
+ - token: {"access_token":"eyJ...","token_type":"Bearer","refresh_token":"os...","expiry":"2023-01-26T18:54:32.170582647+09:00"}
+ Keep this "remote" remote?
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
- - Config: pass
- - Env Var: RCLONE_PIKPAK_PASS
- - Type: string
- - Required: true
+Modification times and hashes
- ### Advanced options
+PikPak keeps modification times on objects, and updates them when
+uploading objects, but it does not support changing only the
+modification time
- Here are the Advanced options specific to pikpak (PikPak).
+The MD5 hash algorithm is supported.
- #### --pikpak-client-id
+Standard options
- OAuth Client Id.
+Here are the Standard options specific to pikpak (PikPak).
- Leave blank normally.
+--pikpak-user
- Properties:
+Pikpak username.
- - Config: client_id
- - Env Var: RCLONE_PIKPAK_CLIENT_ID
- - Type: string
- - Required: false
+Properties:
- #### --pikpak-client-secret
+- Config: user
+- Env Var: RCLONE_PIKPAK_USER
+- Type: string
+- Required: true
- OAuth Client Secret.
+--pikpak-pass
- Leave blank normally.
+Pikpak password.
- Properties:
+NB Input to this must be obscured - see rclone obscure.
- - Config: client_secret
- - Env Var: RCLONE_PIKPAK_CLIENT_SECRET
- - Type: string
- - Required: false
+Properties:
- #### --pikpak-token
+- Config: pass
+- Env Var: RCLONE_PIKPAK_PASS
+- Type: string
+- Required: true
- OAuth Access Token as a JSON blob.
+Advanced options
- Properties:
+Here are the Advanced options specific to pikpak (PikPak).
- - Config: token
- - Env Var: RCLONE_PIKPAK_TOKEN
- - Type: string
- - Required: false
+--pikpak-client-id
- #### --pikpak-auth-url
+OAuth Client Id.
- Auth server URL.
+Leave blank normally.
- Leave blank to use the provider defaults.
+Properties:
- Properties:
+- Config: client_id
+- Env Var: RCLONE_PIKPAK_CLIENT_ID
+- Type: string
+- Required: false
- - Config: auth_url
- - Env Var: RCLONE_PIKPAK_AUTH_URL
- - Type: string
- - Required: false
+--pikpak-client-secret
- #### --pikpak-token-url
+OAuth Client Secret.
- Token server url.
+Leave blank normally.
- Leave blank to use the provider defaults.
+Properties:
- Properties:
+- Config: client_secret
+- Env Var: RCLONE_PIKPAK_CLIENT_SECRET
+- Type: string
+- Required: false
- - Config: token_url
- - Env Var: RCLONE_PIKPAK_TOKEN_URL
- - Type: string
- - Required: false
+--pikpak-token
- #### --pikpak-root-folder-id
+OAuth Access Token as a JSON blob.
- ID of the root folder.
- Leave blank normally.
+Properties:
- Fill in for rclone to use a non root folder as its starting point.
+- Config: token
+- Env Var: RCLONE_PIKPAK_TOKEN
+- Type: string
+- Required: false
+--pikpak-auth-url
- Properties:
+Auth server URL.
- - Config: root_folder_id
- - Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
- - Type: string
- - Required: false
+Leave blank to use the provider defaults.
- #### --pikpak-use-trash
+Properties:
- Send files to the trash instead of deleting permanently.
+- Config: auth_url
+- Env Var: RCLONE_PIKPAK_AUTH_URL
+- Type: string
+- Required: false
- Defaults to true, namely sending files to the trash.
- Use `--pikpak-use-trash=false` to delete files permanently instead.
+--pikpak-token-url
- Properties:
+Token server url.
- - Config: use_trash
- - Env Var: RCLONE_PIKPAK_USE_TRASH
- - Type: bool
- - Default: true
+Leave blank to use the provider defaults.
- #### --pikpak-trashed-only
+Properties:
- Only show files that are in the trash.
+- Config: token_url
+- Env Var: RCLONE_PIKPAK_TOKEN_URL
+- Type: string
+- Required: false
- This will show trashed files in their original directory structure.
+--pikpak-root-folder-id
- Properties:
+ID of the root folder. Leave blank normally.
- - Config: trashed_only
- - Env Var: RCLONE_PIKPAK_TRASHED_ONLY
- - Type: bool
- - Default: false
+Fill in for rclone to use a non root folder as its starting point.
- #### --pikpak-hash-memory-limit
+Properties:
- Files bigger than this will be cached on disk to calculate hash if required.
+- Config: root_folder_id
+- Env Var: RCLONE_PIKPAK_ROOT_FOLDER_ID
+- Type: string
+- Required: false
- Properties:
+--pikpak-use-trash
- - Config: hash_memory_limit
- - Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
- - Type: SizeSuffix
- - Default: 10Mi
+Send files to the trash instead of deleting permanently.
- #### --pikpak-encoding
+Defaults to true, namely sending files to the trash. Use
+--pikpak-use-trash=false to delete files permanently instead.
- The encoding for the backend.
+Properties:
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+- Config: use_trash
+- Env Var: RCLONE_PIKPAK_USE_TRASH
+- Type: bool
+- Default: true
- Properties:
+--pikpak-trashed-only
- - Config: encoding
- - Env Var: RCLONE_PIKPAK_ENCODING
- - Type: Encoding
- - Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
+Only show files that are in the trash.
- ## Backend commands
+This will show trashed files in their original directory structure.
- Here are the commands specific to the pikpak backend.
+Properties:
- Run them with
+- Config: trashed_only
+- Env Var: RCLONE_PIKPAK_TRASHED_ONLY
+- Type: bool
+- Default: false
- rclone backend COMMAND remote:
+--pikpak-hash-memory-limit
- The help below will explain what arguments each command takes.
+Files bigger than this will be cached on disk to calculate hash if
+required.
- See the [backend](https://rclone.org/commands/rclone_backend/) command for more
- info on how to pass options and arguments.
+Properties:
- These can be run on a running backend using the rc command
- [backend/command](https://rclone.org/rc/#backend-command).
+- Config: hash_memory_limit
+- Env Var: RCLONE_PIKPAK_HASH_MEMORY_LIMIT
+- Type: SizeSuffix
+- Default: 10Mi
- ### addurl
+--pikpak-encoding
- Add offline download task for url
+The encoding for the backend.
- rclone backend addurl remote: [options] [+]
+See the encoding section in the overview for more info.
- This command adds offline download task for url.
+Properties:
- Usage:
+- Config: encoding
+- Env Var: RCLONE_PIKPAK_ENCODING
+- Type: Encoding
+- Default:
+ Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,LeftSpace,RightSpace,RightPeriod,InvalidUtf8,Dot
- rclone backend addurl pikpak:dirpath url
+--pikpak-description
- Downloads will be stored in 'dirpath'. If 'dirpath' is invalid,
- download will fallback to default 'My Pack' folder.
+Description of the remote
+Properties:
- ### decompress
+- Config: description
+- Env Var: RCLONE_PIKPAK_DESCRIPTION
+- Type: string
+- Required: false
- Request decompress of a file/files in a folder
+Backend commands
- rclone backend decompress remote: [options] [+]
+Here are the commands specific to the pikpak backend.
- This command requests decompress of file/files in a folder.
+Run them with
- Usage:
+ rclone backend COMMAND remote:
- rclone backend decompress pikpak:dirpath {filename} -o password=password
- rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
+The help below will explain what arguments each command takes.
- An optional argument 'filename' can be specified for a file located in
- 'pikpak:dirpath'. You may want to pass '-o password=password' for a
- password-protected files. Also, pass '-o delete-src-file' to delete
- source files after decompression finished.
+See the backend command for more info on how to pass options and
+arguments.
- Result:
+These can be run on a running backend using the rc command
+backend/command.
- {
- "Decompressed": 17,
- "SourceDeleted": 0,
- "Errors": 0
- }
+addurl
+Add offline download task for url
+ rclone backend addurl remote: [options] [+]
+This command adds offline download task for url.
- ## Limitations
+Usage:
- ### Hashes may be empty
+ rclone backend addurl pikpak:dirpath url
- PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.
+Downloads will be stored in 'dirpath'. If 'dirpath' is invalid, download
+will fallback to default 'My Pack' folder.
- ### Deleted files still visible with trashed-only
+decompress
- Deleted files will still be visible with `--pikpak-trashed-only` even after the
- trash emptied. This goes away after few days.
+Request decompress of a file/files in a folder
- # premiumize.me
+ rclone backend decompress remote: [options] [+]
- Paths are specified as `remote:path`
+This command requests decompress of file/files in a folder.
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
+Usage:
- ## Configuration
+ rclone backend decompress pikpak:dirpath {filename} -o password=password
+ rclone backend decompress pikpak:dirpath {filename} -o delete-src-file
- The initial setup for [premiumize.me](https://premiumize.me/) involves getting a token from premiumize.me which you
- need to do in your browser. `rclone config` walks you through it.
+An optional argument 'filename' can be specified for a file located in
+'pikpak:dirpath'. You may want to pass '-o password=password' for a
+password-protected files. Also, pass '-o delete-src-file' to delete
+source files after decompression finished.
- Here is an example of how to make a remote called `remote`. First run:
+Result:
- rclone config
+ {
+ "Decompressed": 17,
+ "SourceDeleted": 0,
+ "Errors": 0
+ }
- This will guide you through an interactive setup process:
+Limitations
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Enter a string value. Press Enter for the default ("").
-Choose a number from below, or type in your own value [snip] XX /
-premiumize.me "premiumizeme" [snip] Storage> premiumizeme ** See help
-for premiumizeme backend at: https://rclone.org/premiumizeme/ **
+Hashes may be empty
-Remote config Use web browser to automatically authenticate rclone with
-remote? * Say Y if the machine running rclone has a web browser you can
-use * Say N if running rclone on a (remote) machine without web browser
-access If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y If
-your browser doesn't open automatically go to the following link:
-http://127.0.0.1:53682/auth Log in and authorize rclone for access
-Waiting for code... Got code -------------------- [remote] type =
-premiumizeme token =
-{"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"}
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d>
+PikPak supports MD5 hash, but sometimes given empty especially for
+user-uploaded files.
+Deleted files still visible with trashed-only
- See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
- machine with no Internet browser available.
+Deleted files will still be visible with --pikpak-trashed-only even
+after the trash emptied. This goes away after few days.
- Note that rclone runs a webserver on your local machine to collect the
- token as returned from premiumize.me. This only runs from the moment it opens
- your browser to the moment you get back the verification code. This
- is on `http://127.0.0.1:53682/` and this it may require you to unblock
- it temporarily if you are running a host firewall.
+premiumize.me
- Once configured you can then use `rclone` like this,
+Paths are specified as remote:path
- List directories in top level of your premiumize.me
+Paths may be as deep as required, e.g. remote:directory/subdirectory.
- rclone lsd remote:
+Configuration
- List all the files in your premiumize.me
+The initial setup for premiumize.me involves getting a token from
+premiumize.me which you need to do in your browser. rclone config walks
+you through it.
- rclone ls remote:
+Here is an example of how to make a remote called remote. First run:
- To copy a local directory to an premiumize.me directory called backup
+ rclone config
- rclone copy /home/source remote:backup
+This will guide you through an interactive setup process:
- ### Modification times and hashes
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / premiumize.me
+ \ "premiumizeme"
+ [snip]
+ Storage> premiumizeme
+ ** See help for premiumizeme backend at: https://rclone.org/premiumizeme/ **
- premiumize.me does not support modification times or hashes, therefore
- syncing will default to `--size-only` checking. Note that using
- `--update` will work.
+ Remote config
+ Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+ If not sure try Y. If Y failed, try N.
+ y) Yes
+ n) No
+ y/n> y
+ If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+ Log in and authorize rclone for access
+ Waiting for code...
+ Got code
+ --------------------
+ [remote]
+ type = premiumizeme
+ token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"}
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d>
- ### Restricted filename characters
+See the remote setup docs for how to set it up on a machine with no
+Internet browser available.
- In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
- the following characters are also replaced:
+Note that rclone runs a webserver on your local machine to collect the
+token as returned from premiumize.me. This only runs from the moment it
+opens your browser to the moment you get back the verification code.
+This is on http://127.0.0.1:53682/ and this it may require you to
+unblock it temporarily if you are running a host firewall.
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | \ | 0x5C | \ |
- | " | 0x22 | " |
+Once configured you can then use rclone like this,
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
+List directories in top level of your premiumize.me
+ rclone lsd remote:
- ### Standard options
+List all the files in your premiumize.me
- Here are the Standard options specific to premiumizeme (premiumize.me).
+ rclone ls remote:
- #### --premiumizeme-client-id
+To copy a local directory to an premiumize.me directory called backup
- OAuth Client Id.
+ rclone copy /home/source remote:backup
- Leave blank normally.
+Modification times and hashes
- Properties:
+premiumize.me does not support modification times or hashes, therefore
+syncing will default to --size-only checking. Note that using --update
+will work.
- - Config: client_id
- - Env Var: RCLONE_PREMIUMIZEME_CLIENT_ID
- - Type: string
- - Required: false
+Restricted filename characters
- #### --premiumizeme-client-secret
+In addition to the default restricted characters set the following
+characters are also replaced:
- OAuth Client Secret.
+ Character Value Replacement
+ ----------- ------- -------------
+ \ 0x5C \
+ " 0x22 "
- Leave blank normally.
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
- Properties:
+Standard options
- - Config: client_secret
- - Env Var: RCLONE_PREMIUMIZEME_CLIENT_SECRET
- - Type: string
- - Required: false
+Here are the Standard options specific to premiumizeme (premiumize.me).
- #### --premiumizeme-api-key
+--premiumizeme-client-id
- API Key.
+OAuth Client Id.
- This is not normally used - use oauth instead.
+Leave blank normally.
+Properties:
- Properties:
+- Config: client_id
+- Env Var: RCLONE_PREMIUMIZEME_CLIENT_ID
+- Type: string
+- Required: false
- - Config: api_key
- - Env Var: RCLONE_PREMIUMIZEME_API_KEY
- - Type: string
- - Required: false
+--premiumizeme-client-secret
- ### Advanced options
+OAuth Client Secret.
- Here are the Advanced options specific to premiumizeme (premiumize.me).
+Leave blank normally.
- #### --premiumizeme-token
+Properties:
- OAuth Access Token as a JSON blob.
+- Config: client_secret
+- Env Var: RCLONE_PREMIUMIZEME_CLIENT_SECRET
+- Type: string
+- Required: false
- Properties:
+--premiumizeme-api-key
- - Config: token
- - Env Var: RCLONE_PREMIUMIZEME_TOKEN
- - Type: string
- - Required: false
+API Key.
- #### --premiumizeme-auth-url
+This is not normally used - use oauth instead.
- Auth server URL.
+Properties:
- Leave blank to use the provider defaults.
+- Config: api_key
+- Env Var: RCLONE_PREMIUMIZEME_API_KEY
+- Type: string
+- Required: false
- Properties:
+Advanced options
- - Config: auth_url
- - Env Var: RCLONE_PREMIUMIZEME_AUTH_URL
- - Type: string
- - Required: false
+Here are the Advanced options specific to premiumizeme (premiumize.me).
- #### --premiumizeme-token-url
+--premiumizeme-token
- Token server url.
+OAuth Access Token as a JSON blob.
- Leave blank to use the provider defaults.
+Properties:
- Properties:
+- Config: token
+- Env Var: RCLONE_PREMIUMIZEME_TOKEN
+- Type: string
+- Required: false
- - Config: token_url
- - Env Var: RCLONE_PREMIUMIZEME_TOKEN_URL
- - Type: string
- - Required: false
+--premiumizeme-auth-url
- #### --premiumizeme-encoding
+Auth server URL.
- The encoding for the backend.
+Leave blank to use the provider defaults.
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+Properties:
- Properties:
+- Config: auth_url
+- Env Var: RCLONE_PREMIUMIZEME_AUTH_URL
+- Type: string
+- Required: false
- - Config: encoding
- - Env Var: RCLONE_PREMIUMIZEME_ENCODING
- - Type: Encoding
- - Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
+--premiumizeme-token-url
+Token server url.
+Leave blank to use the provider defaults.
- ## Limitations
+Properties:
- Note that premiumize.me is case insensitive so you can't have a file called
- "Hello.doc" and one called "hello.doc".
+- Config: token_url
+- Env Var: RCLONE_PREMIUMIZEME_TOKEN_URL
+- Type: string
+- Required: false
- premiumize.me file names can't have the `\` or `"` characters in.
- rclone maps these to and from an identical looking unicode equivalents
- `\` and `"`
+--premiumizeme-encoding
- premiumize.me only supports filenames up to 255 characters in length.
+The encoding for the backend.
- # Proton Drive
+See the encoding section in the overview for more info.
- [Proton Drive](https://proton.me/drive) is an end-to-end encrypted Swiss vault
- for your files that protects your data.
+Properties:
- This is an rclone backend for Proton Drive which supports the file transfer
- features of Proton Drive using the same client-side encryption.
+- Config: encoding
+- Env Var: RCLONE_PREMIUMIZEME_ENCODING
+- Type: Encoding
+- Default: Slash,DoubleQuote,BackSlash,Del,Ctl,InvalidUtf8,Dot
- Due to the fact that Proton Drive doesn't publish its API documentation, this
- backend is implemented with best efforts by reading the open-sourced client
- source code and observing the Proton Drive traffic in the browser.
+--premiumizeme-description
- **NB** This backend is currently in Beta. It is believed to be correct
- and all the integration tests pass. However the Proton Drive protocol
- has evolved over time there may be accounts it is not compatible
- with. Please [post on the rclone forum](https://forum.rclone.org/) if
- you find an incompatibility.
+Description of the remote
- Paths are specified as `remote:path`
+Properties:
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
+- Config: description
+- Env Var: RCLONE_PREMIUMIZEME_DESCRIPTION
+- Type: string
+- Required: false
- ## Configurations
+Limitations
- Here is an example of how to make a remote called `remote`. First run:
+Note that premiumize.me is case insensitive so you can't have a file
+called "Hello.doc" and one called "hello.doc".
- rclone config
+premiumize.me file names can't have the \ or " characters in. rclone
+maps these to and from an identical looking unicode equivalents \ and
+"
- This will guide you through an interactive setup process:
+premiumize.me only supports filenames up to 255 characters in length.
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / Proton Drive "Proton Drive" [snip] Storage> protondrive User name
-user> you@protonmail.com Password. y) Yes type in my own password g)
-Generate random password n) No leave this optional password blank y/g/n>
-y Enter the password: password: Confirm the password: password: Option
-2fa. 2FA code (if the account requires one) Enter a value. Press Enter
-to leave empty. 2fa> 123456 Remote config -------------------- [remote]
-type = protondrive user = you@protonmail.com pass = *** ENCRYPTED ***
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d> y
+Proton Drive
+Proton Drive is an end-to-end encrypted Swiss vault for your files that
+protects your data.
- **NOTE:** The Proton Drive encryption keys need to have been already generated
- after a regular login via the browser, otherwise attempting to use the
- credentials in `rclone` will fail.
+This is an rclone backend for Proton Drive which supports the file
+transfer features of Proton Drive using the same client-side encryption.
- Once configured you can then use `rclone` like this,
+Due to the fact that Proton Drive doesn't publish its API documentation,
+this backend is implemented with best efforts by reading the
+open-sourced client source code and observing the Proton Drive traffic
+in the browser.
- List directories in top level of your Proton Drive
+NB This backend is currently in Beta. It is believed to be correct and
+all the integration tests pass. However the Proton Drive protocol has
+evolved over time there may be accounts it is not compatible with.
+Please post on the rclone forum if you find an incompatibility.
- rclone lsd remote:
+Paths are specified as remote:path
- List all the files in your Proton Drive
+Paths may be as deep as required, e.g. remote:directory/subdirectory.
- rclone ls remote:
+Configurations
- To copy a local directory to an Proton Drive directory called backup
+Here is an example of how to make a remote called remote. First run:
- rclone copy /home/source remote:backup
+ rclone config
- ### Modification times and hashes
-
- Proton Drive Bridge does not support updating modification times yet.
-
- The SHA1 hash algorithm is supported.
-
- ### Restricted filename characters
-
- Invalid UTF-8 bytes will be [replaced](https://rclone.org/overview/#invalid-utf8), also left and
- right spaces will be removed ([code reference](https://github.com/ProtonMail/WebClients/blob/b4eba99d241af4fdae06ff7138bd651a40ef5d3c/applications/drive/src/app/store/_links/validation.ts#L51))
-
- ### Duplicated files
-
- Proton Drive can not have two files with exactly the same name and path. If the
- conflict occurs, depending on the advanced config, the file might or might not
- be overwritten.
-
- ### [Mailbox password](https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password)
-
- Please set your mailbox password in the advanced config section.
-
- ### Caching
-
- The cache is currently built for the case when the rclone is the only instance
- performing operations to the mount point. The event system, which is the proton
- API system that provides visibility of what has changed on the drive, is yet
- to be implemented, so updates from other clients won’t be reflected in the
- cache. Thus, if there are concurrent clients accessing the same mount point,
- then we might have a problem with caching the stale data.
-
-
- ### Standard options
-
- Here are the Standard options specific to protondrive (Proton Drive).
-
- #### --protondrive-username
-
- The username of your proton account
-
- Properties:
-
- - Config: username
- - Env Var: RCLONE_PROTONDRIVE_USERNAME
- - Type: string
- - Required: true
-
- #### --protondrive-password
-
- The password of your proton account.
-
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
- Properties:
-
- - Config: password
- - Env Var: RCLONE_PROTONDRIVE_PASSWORD
- - Type: string
- - Required: true
-
- #### --protondrive-2fa
-
- The 2FA code
-
- The value can also be provided with --protondrive-2fa=000000
-
- The 2FA code of your proton drive account if the account is set up with
- two-factor authentication
-
- Properties:
-
- - Config: 2fa
- - Env Var: RCLONE_PROTONDRIVE_2FA
- - Type: string
- - Required: false
-
- ### Advanced options
-
- Here are the Advanced options specific to protondrive (Proton Drive).
-
- #### --protondrive-mailbox-password
-
- The mailbox password of your two-password proton account.
-
- For more information regarding the mailbox password, please check the
- following official knowledge base article:
- https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
-
-
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
- Properties:
-
- - Config: mailbox_password
- - Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
- - Type: string
- - Required: false
-
- #### --protondrive-client-uid
-
- Client uid key (internal use only)
-
- Properties:
-
- - Config: client_uid
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
- - Type: string
- - Required: false
-
- #### --protondrive-client-access-token
-
- Client access token key (internal use only)
-
- Properties:
-
- - Config: client_access_token
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
- - Type: string
- - Required: false
-
- #### --protondrive-client-refresh-token
-
- Client refresh token key (internal use only)
-
- Properties:
-
- - Config: client_refresh_token
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
- - Type: string
- - Required: false
-
- #### --protondrive-client-salted-key-pass
-
- Client salted key pass key (internal use only)
-
- Properties:
-
- - Config: client_salted_key_pass
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
- - Type: string
- - Required: false
-
- #### --protondrive-encoding
-
- The encoding for the backend.
-
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
- Properties:
-
- - Config: encoding
- - Env Var: RCLONE_PROTONDRIVE_ENCODING
- - Type: Encoding
- - Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
-
- #### --protondrive-original-file-size
-
- Return the file size before encryption
-
- The size of the encrypted file will be different from (bigger than) the
- original file size. Unless there is a reason to return the file size
- after encryption is performed, otherwise, set this option to true, as
- features like Open() which will need to be supplied with original content
- size, will fail to operate properly
-
- Properties:
-
- - Config: original_file_size
- - Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
- - Type: bool
- - Default: true
-
- #### --protondrive-app-version
-
- The app version string
-
- The app version string indicates the client that is currently performing
- the API request. This information is required and will be sent with every
- API request.
-
- Properties:
-
- - Config: app_version
- - Env Var: RCLONE_PROTONDRIVE_APP_VERSION
- - Type: string
- - Default: "macos-drive@1.0.0-alpha.1+rclone"
-
- #### --protondrive-replace-existing-draft
-
- Create a new revision when filename conflict is detected
-
- When a file upload is cancelled or failed before completion, a draft will be
- created and the subsequent upload of the same file to the same location will be
- reported as a conflict.
-
- The value can also be set by --protondrive-replace-existing-draft=true
-
- If the option is set to true, the draft will be replaced and then the upload
- operation will restart. If there are other clients also uploading at the same
- file location at the same time, the behavior is currently unknown. Need to set
- to true for integration tests.
- If the option is set to false, an error "a draft exist - usually this means a
- file is being uploaded at another client, or, there was a failed upload attempt"
- will be returned, and no upload will happen.
-
- Properties:
-
- - Config: replace_existing_draft
- - Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
- - Type: bool
- - Default: false
-
- #### --protondrive-enable-caching
-
- Caches the files and folders metadata to reduce API calls
-
- Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
- as the current implementation doesn't update or clear the cache when there are
- external changes.
-
- The files and folders on ProtonDrive are represented as links with keyrings,
- which can be cached to improve performance and be friendly to the API server.
-
- The cache is currently built for the case when the rclone is the only instance
- performing operations to the mount point. The event system, which is the proton
- API system that provides visibility of what has changed on the drive, is yet
- to be implemented, so updates from other clients won’t be reflected in the
- cache. Thus, if there are concurrent clients accessing the same mount point,
- then we might have a problem with caching the stale data.
-
- Properties:
-
- - Config: enable_caching
- - Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
- - Type: bool
- - Default: true
-
-
-
- ## Limitations
-
- This backend uses the
- [Proton-API-Bridge](https://github.com/henrybear327/Proton-API-Bridge), which
- is based on [go-proton-api](https://github.com/henrybear327/go-proton-api), a
- fork of the [official repo](https://github.com/ProtonMail/go-proton-api).
-
- There is no official API documentation available from Proton Drive. But, thanks
- to Proton open sourcing [proton-go-api](https://github.com/ProtonMail/go-proton-api)
- and the web, iOS, and Android client codebases, we don't need to completely
- reverse engineer the APIs by observing the web client traffic!
-
- [proton-go-api](https://github.com/ProtonMail/go-proton-api) provides the basic
- building blocks of API calls and error handling, such as 429 exponential
- back-off, but it is pretty much just a barebone interface to the Proton API.
- For example, the encryption and decryption of the Proton Drive file are not
- provided in this library.
-
- The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on
- top of this quickly. This codebase handles the intricate tasks before and after
- calling Proton APIs, particularly the complex encryption scheme, allowing
- developers to implement features for other software on top of this codebase.
- There are likely quite a few errors in this library, as there isn't official
- documentation available.
-
- # put.io
-
- Paths are specified as `remote:path`
-
- put.io paths may be as deep as required, e.g.
- `remote:directory/subdirectory`.
-
- ## Configuration
-
- The initial setup for put.io involves getting a token from put.io
- which you need to do in your browser. `rclone config` walks you
- through it.
-
- Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
- This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> putio Type of storage to
-configure. Enter a string value. Press Enter for the default ("").
-Choose a number from below, or type in your own value [snip] XX / Put.io
- "putio" [snip] Storage> putio ** See help for putio backend at:
-https://rclone.org/putio/ **
-
-Remote config Use web browser to automatically authenticate rclone with
-remote? * Say Y if the machine running rclone has a web browser you can
-use * Say N if running rclone on a (remote) machine without web browser
-access If not sure try Y. If Y failed, try N. y) Yes n) No y/n> y If
-your browser doesn't open automatically go to the following link:
-http://127.0.0.1:53682/auth Log in and authorize rclone for access
-Waiting for code... Got code -------------------- [putio] type = putio
-token = {"access_token":"XXXXXXXX","expiry":"0001-01-01T00:00:00Z"}
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d> y Current remotes:
-
-Name Type ==== ==== putio putio
-
-e) Edit existing remote
-f) New remote
-g) Delete remote
-h) Rename remote
-i) Copy remote
-j) Set configuration password
-k) Quit config e/n/d/r/c/s/q> q
-
-
- See the [remote setup docs](https://rclone.org/remote_setup/) for how to set it up on a
- machine with no Internet browser available.
-
- Note that rclone runs a webserver on your local machine to collect the
- token as returned from put.io if using web browser to automatically
- authenticate. This only
- runs from the moment it opens your browser to the moment you get back
- the verification code. This is on `http://127.0.0.1:53682/` and this
- it may require you to unblock it temporarily if you are running a host
- firewall, or use manual mode.
-
- You can then use it like this,
-
- List directories in top level of your put.io
-
- rclone lsd remote:
-
- List all the files in your put.io
-
- rclone ls remote:
-
- To copy a local directory to a put.io directory called backup
-
- rclone copy /home/source remote:backup
-
- ### Restricted filename characters
-
- In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
- the following characters are also replaced:
-
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | \ | 0x5C | \ |
-
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
-
-
- ### Standard options
-
- Here are the Standard options specific to putio (Put.io).
-
- #### --putio-client-id
-
- OAuth Client Id.
-
- Leave blank normally.
-
- Properties:
-
- - Config: client_id
- - Env Var: RCLONE_PUTIO_CLIENT_ID
- - Type: string
- - Required: false
-
- #### --putio-client-secret
-
- OAuth Client Secret.
-
- Leave blank normally.
-
- Properties:
-
- - Config: client_secret
- - Env Var: RCLONE_PUTIO_CLIENT_SECRET
- - Type: string
- - Required: false
-
- ### Advanced options
-
- Here are the Advanced options specific to putio (Put.io).
-
- #### --putio-token
-
- OAuth Access Token as a JSON blob.
-
- Properties:
-
- - Config: token
- - Env Var: RCLONE_PUTIO_TOKEN
- - Type: string
- - Required: false
-
- #### --putio-auth-url
-
- Auth server URL.
-
- Leave blank to use the provider defaults.
-
- Properties:
-
- - Config: auth_url
- - Env Var: RCLONE_PUTIO_AUTH_URL
- - Type: string
- - Required: false
-
- #### --putio-token-url
-
- Token server url.
-
- Leave blank to use the provider defaults.
-
- Properties:
-
- - Config: token_url
- - Env Var: RCLONE_PUTIO_TOKEN_URL
- - Type: string
- - Required: false
-
- #### --putio-encoding
-
- The encoding for the backend.
-
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
- Properties:
-
- - Config: encoding
- - Env Var: RCLONE_PUTIO_ENCODING
- - Type: Encoding
- - Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
-
-
-
- ## Limitations
-
- put.io has rate limiting. When you hit a limit, rclone automatically
- retries after waiting the amount of time requested by the server.
-
- If you want to avoid ever hitting these limits, you may use the
- `--tpslimit` flag with a low number. Note that the imposed limits
- may be different for different operations, and may change over time.
-
- # Proton Drive
-
- [Proton Drive](https://proton.me/drive) is an end-to-end encrypted Swiss vault
- for your files that protects your data.
-
- This is an rclone backend for Proton Drive which supports the file transfer
- features of Proton Drive using the same client-side encryption.
-
- Due to the fact that Proton Drive doesn't publish its API documentation, this
- backend is implemented with best efforts by reading the open-sourced client
- source code and observing the Proton Drive traffic in the browser.
-
- **NB** This backend is currently in Beta. It is believed to be correct
- and all the integration tests pass. However the Proton Drive protocol
- has evolved over time there may be accounts it is not compatible
- with. Please [post on the rclone forum](https://forum.rclone.org/) if
- you find an incompatibility.
-
- Paths are specified as `remote:path`
-
- Paths may be as deep as required, e.g. `remote:directory/subdirectory`.
-
- ## Configurations
-
- Here is an example of how to make a remote called `remote`. First run:
-
- rclone config
-
- This will guide you through an interactive setup process:
-
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / Proton Drive "Proton Drive" [snip] Storage> protondrive User name
-user> you@protonmail.com Password. y) Yes type in my own password g)
-Generate random password n) No leave this optional password blank y/g/n>
-y Enter the password: password: Confirm the password: password: Option
-2fa. 2FA code (if the account requires one) Enter a value. Press Enter
-to leave empty. 2fa> 123456 Remote config -------------------- [remote]
-type = protondrive user = you@protonmail.com pass = *** ENCRYPTED ***
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d> y
-
-
- **NOTE:** The Proton Drive encryption keys need to have been already generated
- after a regular login via the browser, otherwise attempting to use the
- credentials in `rclone` will fail.
-
- Once configured you can then use `rclone` like this,
-
- List directories in top level of your Proton Drive
-
- rclone lsd remote:
-
- List all the files in your Proton Drive
-
- rclone ls remote:
-
- To copy a local directory to an Proton Drive directory called backup
-
- rclone copy /home/source remote:backup
-
- ### Modification times and hashes
-
- Proton Drive Bridge does not support updating modification times yet.
-
- The SHA1 hash algorithm is supported.
-
- ### Restricted filename characters
-
- Invalid UTF-8 bytes will be [replaced](https://rclone.org/overview/#invalid-utf8), also left and
- right spaces will be removed ([code reference](https://github.com/ProtonMail/WebClients/blob/b4eba99d241af4fdae06ff7138bd651a40ef5d3c/applications/drive/src/app/store/_links/validation.ts#L51))
-
- ### Duplicated files
-
- Proton Drive can not have two files with exactly the same name and path. If the
- conflict occurs, depending on the advanced config, the file might or might not
- be overwritten.
-
- ### [Mailbox password](https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password)
-
- Please set your mailbox password in the advanced config section.
-
- ### Caching
-
- The cache is currently built for the case when the rclone is the only instance
- performing operations to the mount point. The event system, which is the proton
- API system that provides visibility of what has changed on the drive, is yet
- to be implemented, so updates from other clients won’t be reflected in the
- cache. Thus, if there are concurrent clients accessing the same mount point,
- then we might have a problem with caching the stale data.
-
-
- ### Standard options
-
- Here are the Standard options specific to protondrive (Proton Drive).
-
- #### --protondrive-username
-
- The username of your proton account
-
- Properties:
-
- - Config: username
- - Env Var: RCLONE_PROTONDRIVE_USERNAME
- - Type: string
- - Required: true
-
- #### --protondrive-password
-
- The password of your proton account.
-
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
- Properties:
-
- - Config: password
- - Env Var: RCLONE_PROTONDRIVE_PASSWORD
- - Type: string
- - Required: true
-
- #### --protondrive-2fa
-
- The 2FA code
-
- The value can also be provided with --protondrive-2fa=000000
-
- The 2FA code of your proton drive account if the account is set up with
- two-factor authentication
-
- Properties:
-
- - Config: 2fa
- - Env Var: RCLONE_PROTONDRIVE_2FA
- - Type: string
- - Required: false
-
- ### Advanced options
-
- Here are the Advanced options specific to protondrive (Proton Drive).
-
- #### --protondrive-mailbox-password
-
- The mailbox password of your two-password proton account.
-
- For more information regarding the mailbox password, please check the
- following official knowledge base article:
- https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
-
-
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
-
- Properties:
-
- - Config: mailbox_password
- - Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
- - Type: string
- - Required: false
-
- #### --protondrive-client-uid
-
- Client uid key (internal use only)
-
- Properties:
-
- - Config: client_uid
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
- - Type: string
- - Required: false
-
- #### --protondrive-client-access-token
-
- Client access token key (internal use only)
-
- Properties:
-
- - Config: client_access_token
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
- - Type: string
- - Required: false
-
- #### --protondrive-client-refresh-token
-
- Client refresh token key (internal use only)
-
- Properties:
-
- - Config: client_refresh_token
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
- - Type: string
- - Required: false
-
- #### --protondrive-client-salted-key-pass
-
- Client salted key pass key (internal use only)
-
- Properties:
-
- - Config: client_salted_key_pass
- - Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
- - Type: string
- - Required: false
-
- #### --protondrive-encoding
-
- The encoding for the backend.
-
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
-
- Properties:
-
- - Config: encoding
- - Env Var: RCLONE_PROTONDRIVE_ENCODING
- - Type: Encoding
- - Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
-
- #### --protondrive-original-file-size
-
- Return the file size before encryption
-
- The size of the encrypted file will be different from (bigger than) the
- original file size. Unless there is a reason to return the file size
- after encryption is performed, otherwise, set this option to true, as
- features like Open() which will need to be supplied with original content
- size, will fail to operate properly
-
- Properties:
-
- - Config: original_file_size
- - Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
- - Type: bool
- - Default: true
-
- #### --protondrive-app-version
-
- The app version string
-
- The app version string indicates the client that is currently performing
- the API request. This information is required and will be sent with every
- API request.
-
- Properties:
-
- - Config: app_version
- - Env Var: RCLONE_PROTONDRIVE_APP_VERSION
- - Type: string
- - Default: "macos-drive@1.0.0-alpha.1+rclone"
-
- #### --protondrive-replace-existing-draft
-
- Create a new revision when filename conflict is detected
-
- When a file upload is cancelled or failed before completion, a draft will be
- created and the subsequent upload of the same file to the same location will be
- reported as a conflict.
-
- The value can also be set by --protondrive-replace-existing-draft=true
-
- If the option is set to true, the draft will be replaced and then the upload
- operation will restart. If there are other clients also uploading at the same
- file location at the same time, the behavior is currently unknown. Need to set
- to true for integration tests.
- If the option is set to false, an error "a draft exist - usually this means a
- file is being uploaded at another client, or, there was a failed upload attempt"
- will be returned, and no upload will happen.
-
- Properties:
-
- - Config: replace_existing_draft
- - Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
- - Type: bool
- - Default: false
-
- #### --protondrive-enable-caching
-
- Caches the files and folders metadata to reduce API calls
-
- Notice: If you are mounting ProtonDrive as a VFS, please disable this feature,
- as the current implementation doesn't update or clear the cache when there are
- external changes.
-
- The files and folders on ProtonDrive are represented as links with keyrings,
- which can be cached to improve performance and be friendly to the API server.
-
- The cache is currently built for the case when the rclone is the only instance
- performing operations to the mount point. The event system, which is the proton
- API system that provides visibility of what has changed on the drive, is yet
- to be implemented, so updates from other clients won’t be reflected in the
- cache. Thus, if there are concurrent clients accessing the same mount point,
- then we might have a problem with caching the stale data.
-
- Properties:
-
- - Config: enable_caching
- - Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
- - Type: bool
- - Default: true
-
-
-
- ## Limitations
-
- This backend uses the
- [Proton-API-Bridge](https://github.com/henrybear327/Proton-API-Bridge), which
- is based on [go-proton-api](https://github.com/henrybear327/go-proton-api), a
- fork of the [official repo](https://github.com/ProtonMail/go-proton-api).
-
- There is no official API documentation available from Proton Drive. But, thanks
- to Proton open sourcing [proton-go-api](https://github.com/ProtonMail/go-proton-api)
- and the web, iOS, and Android client codebases, we don't need to completely
- reverse engineer the APIs by observing the web client traffic!
-
- [proton-go-api](https://github.com/ProtonMail/go-proton-api) provides the basic
- building blocks of API calls and error handling, such as 429 exponential
- back-off, but it is pretty much just a barebone interface to the Proton API.
- For example, the encryption and decryption of the Proton Drive file are not
- provided in this library.
-
- The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on
- top of this quickly. This codebase handles the intricate tasks before and after
- calling Proton APIs, particularly the complex encryption scheme, allowing
- developers to implement features for other software on top of this codebase.
- There are likely quite a few errors in this library, as there isn't official
- documentation available.
-
- # Seafile
-
- This is a backend for the [Seafile](https://www.seafile.com/) storage service:
- - It works with both the free community edition or the professional edition.
- - Seafile versions 6.x, 7.x, 8.x and 9.x are all supported.
- - Encrypted libraries are also supported.
- - It supports 2FA enabled users
- - Using a Library API Token is **not** supported
-
- ## Configuration
-
- There are two distinct modes you can setup your remote:
- - you point your remote to the **root of the server**, meaning you don't specify a library during the configuration:
- Paths are specified as `remote:library`. You may put subdirectories in too, e.g. `remote:library/path/to/dir`.
- - you point your remote to a specific library during the configuration:
- Paths are specified as `remote:path/to/dir`. **This is the recommended mode when using encrypted libraries**. (_This mode is possibly slightly faster than the root mode_)
-
- ### Configuration in root mode
-
- Here is an example of making a seafile configuration for a user with **no** two-factor authentication. First run
-
- rclone config
-
- This will guide you through an interactive setup process. To authenticate
- you will need the URL of your server, your email (or username) and your password.
-
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> seafile Type of storage to
-configure. Enter a string value. Press Enter for the default ("").
-Choose a number from below, or type in your own value [snip] XX /
-Seafile "seafile" [snip] Storage> seafile ** See help for seafile
-backend at: https://rclone.org/seafile/ **
-
-URL of seafile host to connect to Enter a string value. Press Enter for
-the default (""). Choose a number from below, or type in your own value
-1 / Connect to cloud.seafile.com "https://cloud.seafile.com/" url>
-http://my.seafile.server/ User name (usually email address) Enter a
-string value. Press Enter for the default (""). user> me@example.com
-Password y) Yes type in my own password g) Generate random password n)
-No leave this optional password blank (default) y/g> y Enter the
-password: password: Confirm the password: password: Two-factor
-authentication ('true' if the account has 2FA enabled) Enter a boolean
-value (true or false). Press Enter for the default ("false"). 2fa> false
-Name of the library. Leave blank to access all non-encrypted libraries.
-Enter a string value. Press Enter for the default (""). library> Library
-password (for encrypted libraries only). Leave blank if you pass it
-through the command line. y) Yes type in my own password g) Generate
-random password n) No leave this optional password blank (default)
-y/g/n> n Edit advanced config? (y/n) y) Yes n) No (default) y/n> n
-Remote config Two-factor authentication is not enabled on this account.
--------------------- [seafile] type = seafile url =
-http://my.seafile.server/ user = me@example.com pass = *** ENCRYPTED ***
-2fa = false -------------------- y) Yes this is OK (default) e) Edit
-this remote d) Delete this remote y/e/d> y
-
-
- This remote is called `seafile`. It's pointing to the root of your seafile server and can now be used like this:
-
- See all libraries
-
- rclone lsd seafile:
-
- Create a new library
-
- rclone mkdir seafile:library
-
- List the contents of a library
-
- rclone ls seafile:library
-
- Sync `/home/local/directory` to the remote library, deleting any
- excess files in the library.
-
- rclone sync --interactive /home/local/directory seafile:library
-
- ### Configuration in library mode
-
- Here's an example of a configuration in library mode with a user that has the two-factor authentication enabled. Your 2FA code will be asked at the end of the configuration, and will attempt to authenticate you:
-
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> seafile Type of storage to
-configure. Enter a string value. Press Enter for the default ("").
-Choose a number from below, or type in your own value [snip] XX /
-Seafile "seafile" [snip] Storage> seafile ** See help for seafile
-backend at: https://rclone.org/seafile/ **
-
-URL of seafile host to connect to Enter a string value. Press Enter for
-the default (""). Choose a number from below, or type in your own value
-1 / Connect to cloud.seafile.com "https://cloud.seafile.com/" url>
-http://my.seafile.server/ User name (usually email address) Enter a
-string value. Press Enter for the default (""). user> me@example.com
-Password y) Yes type in my own password g) Generate random password n)
-No leave this optional password blank (default) y/g> y Enter the
-password: password: Confirm the password: password: Two-factor
-authentication ('true' if the account has 2FA enabled) Enter a boolean
-value (true or false). Press Enter for the default ("false"). 2fa> true
-Name of the library. Leave blank to access all non-encrypted libraries.
-Enter a string value. Press Enter for the default (""). library> My
-Library Library password (for encrypted libraries only). Leave blank if
-you pass it through the command line. y) Yes type in my own password g)
-Generate random password n) No leave this optional password blank
-(default) y/g/n> n Edit advanced config? (y/n) y) Yes n) No (default)
-y/n> n Remote config Two-factor authentication: please enter your 2FA
-code 2fa code> 123456 Authenticating... Success! --------------------
-[seafile] type = seafile url = http://my.seafile.server/ user =
-me@example.com pass = 2fa = true library = My Library
--------------------- y) Yes this is OK (default) e) Edit this remote d)
-Delete this remote y/e/d> y
-
-
- You'll notice your password is blank in the configuration. It's because we only need the password to authenticate you once.
-
- You specified `My Library` during the configuration. The root of the remote is pointing at the
- root of the library `My Library`:
-
- See all files in the library:
-
- rclone lsd seafile:
-
- Create a new directory inside the library
-
- rclone mkdir seafile:directory
-
- List the contents of a directory
-
- rclone ls seafile:directory
-
- Sync `/home/local/directory` to the remote library, deleting any
- excess files in the library.
-
- rclone sync --interactive /home/local/directory seafile:
-
-
- ### --fast-list
-
- Seafile version 7+ supports `--fast-list` which allows you to use fewer
- transactions in exchange for more memory. See the [rclone
- docs](https://rclone.org/docs/#fast-list) for more details.
- Please note this is not supported on seafile server version 6.x
-
-
- ### Restricted filename characters
-
- In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters)
- the following characters are also replaced:
-
- | Character | Value | Replacement |
- | --------- |:-----:|:-----------:|
- | / | 0x2F | / |
- | " | 0x22 | " |
- | \ | 0x5C | \ |
-
- Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8),
- as they can't be used in JSON strings.
-
- ### Seafile and rclone link
-
- Rclone supports generating share links for non-encrypted libraries only.
- They can either be for a file or a directory:
-
-rclone link seafile:seafile-tutorial.doc
-http://my.seafile.server/f/fdcd8a2f93f84b8b90f4/
-
-
- or if run on a directory you will get:
-
-rclone link seafile:dir http://my.seafile.server/d/9ea2455f6f55478bbb0d/
-
-
- Please note a share link is unique for each file or directory. If you run a link command on a file/dir
- that has already been shared, you will get the exact same link.
-
- ### Compatibility
-
- It has been actively developed using the [seafile docker image](https://github.com/haiwen/seafile-docker) of these versions:
- - 6.3.4 community edition
- - 7.0.5 community edition
- - 7.1.3 community edition
- - 9.0.10 community edition
-
- Versions below 6.0 are not supported.
- Versions between 6.0 and 6.3 haven't been tested and might not work properly.
-
- Each new version of `rclone` is automatically tested against the [latest docker image](https://hub.docker.com/r/seafileltd/seafile-mc/) of the seafile community server.
-
-
- ### Standard options
-
- Here are the Standard options specific to seafile (seafile).
-
- #### --seafile-url
-
- URL of seafile host to connect to.
-
- Properties:
-
- - Config: url
- - Env Var: RCLONE_SEAFILE_URL
- - Type: string
- - Required: true
- - Examples:
- - "https://cloud.seafile.com/"
- - Connect to cloud.seafile.com.
-
- #### --seafile-user
-
- User name (usually email address).
-
- Properties:
-
- - Config: user
- - Env Var: RCLONE_SEAFILE_USER
- - Type: string
- - Required: true
-
- #### --seafile-pass
+This will guide you through an interactive setup process:
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Proton Drive
+ \ "Proton Drive"
+ [snip]
+ Storage> protondrive
+ User name
+ user> you@protonmail.com
Password.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank
+ y/g/n> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ Option 2fa.
+ 2FA code (if the account requires one)
+ Enter a value. Press Enter to leave empty.
+ 2fa> 123456
+ Remote config
+ --------------------
+ [remote]
+ type = protondrive
+ user = you@protonmail.com
+ pass = *** ENCRYPTED ***
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+NOTE: The Proton Drive encryption keys need to have been already
+generated after a regular login via the browser, otherwise attempting to
+use the credentials in rclone will fail.
- Properties:
+Once configured you can then use rclone like this,
- - Config: pass
- - Env Var: RCLONE_SEAFILE_PASS
- - Type: string
- - Required: false
+List directories in top level of your Proton Drive
- #### --seafile-2fa
+ rclone lsd remote:
- Two-factor authentication ('true' if the account has 2FA enabled).
+List all the files in your Proton Drive
- Properties:
+ rclone ls remote:
- - Config: 2fa
- - Env Var: RCLONE_SEAFILE_2FA
- - Type: bool
- - Default: false
+To copy a local directory to an Proton Drive directory called backup
- #### --seafile-library
+ rclone copy /home/source remote:backup
- Name of the library.
+Modification times and hashes
- Leave blank to access all non-encrypted libraries.
+Proton Drive Bridge does not support updating modification times yet.
- Properties:
+The SHA1 hash algorithm is supported.
- - Config: library
- - Env Var: RCLONE_SEAFILE_LIBRARY
- - Type: string
- - Required: false
+Restricted filename characters
- #### --seafile-library-key
+Invalid UTF-8 bytes will be replaced, also left and right spaces will be
+removed (code reference)
- Library password (for encrypted libraries only).
+Duplicated files
- Leave blank if you pass it through the command line.
+Proton Drive can not have two files with exactly the same name and path.
+If the conflict occurs, depending on the advanced config, the file might
+or might not be overwritten.
- **NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/).
+Mailbox password
- Properties:
+Please set your mailbox password in the advanced config section.
- - Config: library_key
- - Env Var: RCLONE_SEAFILE_LIBRARY_KEY
- - Type: string
- - Required: false
+Caching
- #### --seafile-auth-token
+The cache is currently built for the case when the rclone is the only
+instance performing operations to the mount point. The event system,
+which is the proton API system that provides visibility of what has
+changed on the drive, is yet to be implemented, so updates from other
+clients won’t be reflected in the cache. Thus, if there are concurrent
+clients accessing the same mount point, then we might have a problem
+with caching the stale data.
- Authentication token.
+Standard options
- Properties:
+Here are the Standard options specific to protondrive (Proton Drive).
- - Config: auth_token
- - Env Var: RCLONE_SEAFILE_AUTH_TOKEN
- - Type: string
- - Required: false
+--protondrive-username
- ### Advanced options
+The username of your proton account
- Here are the Advanced options specific to seafile (seafile).
+Properties:
- #### --seafile-create-library
+- Config: username
+- Env Var: RCLONE_PROTONDRIVE_USERNAME
+- Type: string
+- Required: true
- Should rclone create a library if it doesn't exist.
+--protondrive-password
- Properties:
+The password of your proton account.
- - Config: create_library
- - Env Var: RCLONE_SEAFILE_CREATE_LIBRARY
- - Type: bool
- - Default: false
+NB Input to this must be obscured - see rclone obscure.
- #### --seafile-encoding
+Properties:
- The encoding for the backend.
+- Config: password
+- Env Var: RCLONE_PROTONDRIVE_PASSWORD
+- Type: string
+- Required: true
- See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info.
+--protondrive-2fa
- Properties:
+The 2FA code
- - Config: encoding
- - Env Var: RCLONE_SEAFILE_ENCODING
- - Type: Encoding
- - Default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8
+The value can also be provided with --protondrive-2fa=000000
+The 2FA code of your proton drive account if the account is set up with
+two-factor authentication
+Properties:
- # SFTP
+- Config: 2fa
+- Env Var: RCLONE_PROTONDRIVE_2FA
+- Type: string
+- Required: false
- SFTP is the [Secure (or SSH) File Transfer
- Protocol](https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol).
+Advanced options
- The SFTP backend can be used with a number of different providers:
+Here are the Advanced options specific to protondrive (Proton Drive).
+--protondrive-mailbox-password
- - Hetzner Storage Box
- - rsync.net
+The mailbox password of your two-password proton account.
+For more information regarding the mailbox password, please check the
+following official knowledge base article:
+https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
- SFTP runs over SSH v2 and is installed as standard with most modern
- SSH installations.
+NB Input to this must be obscured - see rclone obscure.
- Paths are specified as `remote:path`. If the path does not begin with
- a `/` it is relative to the home directory of the user. An empty path
- `remote:` refers to the user's home directory. For example, `rclone lsd remote:`
- would list the home directory of the user configured in the rclone remote config
- (`i.e /home/sftpuser`). However, `rclone lsd remote:/` would list the root
- directory for remote machine (i.e. `/`)
+Properties:
- Note that some SFTP servers will need the leading / - Synology is a
- good example of this. rsync.net and Hetzner, on the other hand, requires users to
- OMIT the leading /.
+- Config: mailbox_password
+- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
+- Type: string
+- Required: false
- Note that by default rclone will try to execute shell commands on
- the server, see [shell access considerations](#shell-access-considerations).
+--protondrive-client-uid
- ## Configuration
+Client uid key (internal use only)
- Here is an example of making an SFTP configuration. First run
+Properties:
- rclone config
+- Config: client_uid
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
+- Type: string
+- Required: false
- This will guide you through an interactive setup process.
+--protondrive-client-access-token
-No remotes found, make a new one? n) New remote s) Set configuration
-password q) Quit config n/s/q> n name> remote Type of storage to
-configure. Choose a number from below, or type in your own value [snip]
-XX / SSH/SFTP "sftp" [snip] Storage> sftp SSH host to connect to Choose
-a number from below, or type in your own value 1 / Connect to
-example.com "example.com" host> example.com SSH username Enter a string
-value. Press Enter for the default ("$USER"). user> sftpuser SSH port
-number Enter a signed integer. Press Enter for the default (22). port>
-SSH password, leave blank to use ssh-agent. y) Yes type in my own
-password g) Generate random password n) No leave this optional password
-blank y/g/n> n Path to unencrypted PEM-encoded private key file, leave
-blank to use ssh-agent. key_file> Remote config --------------------
-[remote] host = example.com user = sftpuser port = pass = key_file =
--------------------- y) Yes this is OK e) Edit this remote d) Delete
-this remote y/e/d> y
+Client access token key (internal use only)
+Properties:
- This remote is called `remote` and can now be used like this:
+- Config: client_access_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
+- Type: string
+- Required: false
- See all directories in the home directory
+--protondrive-client-refresh-token
- rclone lsd remote:
+Client refresh token key (internal use only)
- See all directories in the root directory
+Properties:
- rclone lsd remote:/
+- Config: client_refresh_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
+- Type: string
+- Required: false
- Make a new directory
+--protondrive-client-salted-key-pass
- rclone mkdir remote:path/to/directory
+Client salted key pass key (internal use only)
- List the contents of a directory
+Properties:
- rclone ls remote:path/to/directory
+- Config: client_salted_key_pass
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
+- Type: string
+- Required: false
- Sync `/home/local/directory` to the remote directory, deleting any
- excess files in the directory.
+--protondrive-encoding
- rclone sync --interactive /home/local/directory remote:directory
+The encoding for the backend.
- Mount the remote path `/srv/www-data/` to the local path
- `/mnt/www-data`
+See the encoding section in the overview for more info.
- rclone mount remote:/srv/www-data/ /mnt/www-data
+Properties:
- ### SSH Authentication
+- Config: encoding
+- Env Var: RCLONE_PROTONDRIVE_ENCODING
+- Type: Encoding
+- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
- The SFTP remote supports three authentication methods:
+--protondrive-original-file-size
- * Password
- * Key file, including certificate signed keys
- * ssh-agent
+Return the file size before encryption
- Key files should be PEM-encoded private key files. For instance `/home/$USER/.ssh/id_rsa`.
- Only unencrypted OpenSSH or PEM encrypted files are supported.
+The size of the encrypted file will be different from (bigger than) the
+original file size. Unless there is a reason to return the file size
+after encryption is performed, otherwise, set this option to true, as
+features like Open() which will need to be supplied with original
+content size, will fail to operate properly
- The key file can be specified in either an external file (key_file) or contained within the
- rclone config file (key_pem). If using key_pem in the config file, the entry should be on a
- single line with new line ('\n' or '\r\n') separating lines. i.e.
+Properties:
- key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
+- Config: original_file_size
+- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
+- Type: bool
+- Default: true
- This will generate it correctly for key_pem for use in the config:
+--protondrive-app-version
- awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
+The app version string
- If you don't specify `pass`, `key_file`, or `key_pem` or `ask_password` then
- rclone will attempt to contact an ssh-agent. You can also specify `key_use_agent`
- to force the usage of an ssh-agent. In this case `key_file` or `key_pem` can
- also be specified to force the usage of a specific key in the ssh-agent.
+The app version string indicates the client that is currently performing
+the API request. This information is required and will be sent with
+every API request.
- Using an ssh-agent is the only way to load encrypted OpenSSH keys at the moment.
+Properties:
- If you set the `ask_password` option, rclone will prompt for a password when
- needed and no password has been configured.
+- Config: app_version
+- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
+- Type: string
+- Default: "macos-drive@1.0.0-alpha.1+rclone"
- #### Certificate-signed keys
+--protondrive-replace-existing-draft
- With traditional key-based authentication, you configure your private key only,
- and the public key built into it will be used during the authentication process.
+Create a new revision when filename conflict is detected
- If you have a certificate you may use it to sign your public key, creating a
- separate SSH user certificate that should be used instead of the plain public key
- extracted from the private key. Then you must provide the path to the
- user certificate public key file in `pubkey_file`.
+When a file upload is cancelled or failed before completion, a draft
+will be created and the subsequent upload of the same file to the same
+location will be reported as a conflict.
- Note: This is not the traditional public key paired with your private key,
- typically saved as `/home/$USER/.ssh/id_rsa.pub`. Setting this path in
- `pubkey_file` will not work.
+The value can also be set by --protondrive-replace-existing-draft=true
- Example:
+If the option is set to true, the draft will be replaced and then the
+upload operation will restart. If there are other clients also uploading
+at the same file location at the same time, the behavior is currently
+unknown. Need to set to true for integration tests. If the option is set
+to false, an error "a draft exist - usually this means a file is being
+uploaded at another client, or, there was a failed upload attempt" will
+be returned, and no upload will happen.
-[remote] type = sftp host = example.com user = sftpuser key_file =
-~/id_rsa pubkey_file = ~/id_rsa-cert.pub
+Properties:
+- Config: replace_existing_draft
+- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
+- Type: bool
+- Default: false
- If you concatenate a cert with a private key then you can specify the
- merged file in both places.
+--protondrive-enable-caching
- Note: the cert must come first in the file. e.g.
+Caches the files and folders metadata to reduce API calls
+
+Notice: If you are mounting ProtonDrive as a VFS, please disable this
+feature, as the current implementation doesn't update or clear the cache
+when there are external changes.
+
+The files and folders on ProtonDrive are represented as links with
+keyrings, which can be cached to improve performance and be friendly to
+the API server.
+
+The cache is currently built for the case when the rclone is the only
+instance performing operations to the mount point. The event system,
+which is the proton API system that provides visibility of what has
+changed on the drive, is yet to be implemented, so updates from other
+clients won’t be reflected in the cache. Thus, if there are concurrent
+clients accessing the same mount point, then we might have a problem
+with caching the stale data.
+
+Properties:
+
+- Config: enable_caching
+- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
+- Type: bool
+- Default: true
+
+--protondrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+
+This backend uses the Proton-API-Bridge, which is based on
+go-proton-api, a fork of the official repo.
+
+There is no official API documentation available from Proton Drive. But,
+thanks to Proton open sourcing proton-go-api and the web, iOS, and
+Android client codebases, we don't need to completely reverse engineer
+the APIs by observing the web client traffic!
+
+proton-go-api provides the basic building blocks of API calls and error
+handling, such as 429 exponential back-off, but it is pretty much just a
+barebone interface to the Proton API. For example, the encryption and
+decryption of the Proton Drive file are not provided in this library.
+
+The Proton-API-Bridge, attempts to bridge the gap, so rclone can be
+built on top of this quickly. This codebase handles the intricate tasks
+before and after calling Proton APIs, particularly the complex
+encryption scheme, allowing developers to implement features for other
+software on top of this codebase. There are likely quite a few errors in
+this library, as there isn't official documentation available.
+
+put.io
+
+Paths are specified as remote:path
+
+put.io paths may be as deep as required, e.g.
+remote:directory/subdirectory.
+
+Configuration
+
+The initial setup for put.io involves getting a token from put.io which
+you need to do in your browser. rclone config walks you through it.
+
+Here is an example of how to make a remote called remote. First run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> putio
+ Type of storage to configure.
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Put.io
+ \ "putio"
+ [snip]
+ Storage> putio
+ ** See help for putio backend at: https://rclone.org/putio/ **
+
+ Remote config
+ Use web browser to automatically authenticate rclone with remote?
+ * Say Y if the machine running rclone has a web browser you can use
+ * Say N if running rclone on a (remote) machine without web browser access
+ If not sure try Y. If Y failed, try N.
+ y) Yes
+ n) No
+ y/n> y
+ If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth
+ Log in and authorize rclone for access
+ Waiting for code...
+ Got code
+ --------------------
+ [putio]
+ type = putio
+ token = {"access_token":"XXXXXXXX","expiry":"0001-01-01T00:00:00Z"}
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+ Current remotes:
+
+ Name Type
+ ==== ====
+ putio putio
+
+ e) Edit existing remote
+ n) New remote
+ d) Delete remote
+ r) Rename remote
+ c) Copy remote
+ s) Set configuration password
+ q) Quit config
+ e/n/d/r/c/s/q> q
+
+See the remote setup docs for how to set it up on a machine with no
+Internet browser available.
+
+Note that rclone runs a webserver on your local machine to collect the
+token as returned from put.io if using web browser to automatically
+authenticate. This only runs from the moment it opens your browser to
+the moment you get back the verification code. This is on
+http://127.0.0.1:53682/ and this it may require you to unblock it
+temporarily if you are running a host firewall, or use manual mode.
+
+You can then use it like this,
+
+List directories in top level of your put.io
+
+ rclone lsd remote:
+
+List all the files in your put.io
+
+ rclone ls remote:
+
+To copy a local directory to a put.io directory called backup
+
+ rclone copy /home/source remote:backup
+
+Restricted filename characters
+
+In addition to the default restricted characters set the following
+characters are also replaced:
+
+ Character Value Replacement
+ ----------- ------- -------------
+ \ 0x5C \
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
+
+Standard options
+
+Here are the Standard options specific to putio (Put.io).
+
+--putio-client-id
+
+OAuth Client Id.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_id
+- Env Var: RCLONE_PUTIO_CLIENT_ID
+- Type: string
+- Required: false
+
+--putio-client-secret
+
+OAuth Client Secret.
+
+Leave blank normally.
+
+Properties:
+
+- Config: client_secret
+- Env Var: RCLONE_PUTIO_CLIENT_SECRET
+- Type: string
+- Required: false
+
+Advanced options
+
+Here are the Advanced options specific to putio (Put.io).
+
+--putio-token
+
+OAuth Access Token as a JSON blob.
+
+Properties:
+
+- Config: token
+- Env Var: RCLONE_PUTIO_TOKEN
+- Type: string
+- Required: false
+
+--putio-auth-url
+
+Auth server URL.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: auth_url
+- Env Var: RCLONE_PUTIO_AUTH_URL
+- Type: string
+- Required: false
+
+--putio-token-url
+
+Token server url.
+
+Leave blank to use the provider defaults.
+
+Properties:
+
+- Config: token_url
+- Env Var: RCLONE_PUTIO_TOKEN_URL
+- Type: string
+- Required: false
+
+--putio-encoding
+
+The encoding for the backend.
+
+See the encoding section in the overview for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PUTIO_ENCODING
+- Type: Encoding
+- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot
+
+--putio-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PUTIO_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+
+put.io has rate limiting. When you hit a limit, rclone automatically
+retries after waiting the amount of time requested by the server.
+
+If you want to avoid ever hitting these limits, you may use the
+--tpslimit flag with a low number. Note that the imposed limits may be
+different for different operations, and may change over time.
+
+Proton Drive
+
+Proton Drive is an end-to-end encrypted Swiss vault for your files that
+protects your data.
+
+This is an rclone backend for Proton Drive which supports the file
+transfer features of Proton Drive using the same client-side encryption.
+
+Due to the fact that Proton Drive doesn't publish its API documentation,
+this backend is implemented with best efforts by reading the
+open-sourced client source code and observing the Proton Drive traffic
+in the browser.
+
+NB This backend is currently in Beta. It is believed to be correct and
+all the integration tests pass. However the Proton Drive protocol has
+evolved over time there may be accounts it is not compatible with.
+Please post on the rclone forum if you find an incompatibility.
+
+Paths are specified as remote:path
+
+Paths may be as deep as required, e.g. remote:directory/subdirectory.
+
+Configurations
+
+Here is an example of how to make a remote called remote. First run:
+
+ rclone config
+
+This will guide you through an interactive setup process:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Proton Drive
+ \ "Proton Drive"
+ [snip]
+ Storage> protondrive
+ User name
+ user> you@protonmail.com
+ Password.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank
+ y/g/n> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ Option 2fa.
+ 2FA code (if the account requires one)
+ Enter a value. Press Enter to leave empty.
+ 2fa> 123456
+ Remote config
+ --------------------
+ [remote]
+ type = protondrive
+ user = you@protonmail.com
+ pass = *** ENCRYPTED ***
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+NOTE: The Proton Drive encryption keys need to have been already
+generated after a regular login via the browser, otherwise attempting to
+use the credentials in rclone will fail.
+
+Once configured you can then use rclone like this,
+
+List directories in top level of your Proton Drive
+
+ rclone lsd remote:
+
+List all the files in your Proton Drive
+
+ rclone ls remote:
+
+To copy a local directory to an Proton Drive directory called backup
+
+ rclone copy /home/source remote:backup
+
+Modification times and hashes
+
+Proton Drive Bridge does not support updating modification times yet.
+
+The SHA1 hash algorithm is supported.
+
+Restricted filename characters
+
+Invalid UTF-8 bytes will be replaced, also left and right spaces will be
+removed (code reference)
+
+Duplicated files
+
+Proton Drive can not have two files with exactly the same name and path.
+If the conflict occurs, depending on the advanced config, the file might
+or might not be overwritten.
+
+Mailbox password
+
+Please set your mailbox password in the advanced config section.
+
+Caching
+
+The cache is currently built for the case when the rclone is the only
+instance performing operations to the mount point. The event system,
+which is the proton API system that provides visibility of what has
+changed on the drive, is yet to be implemented, so updates from other
+clients won’t be reflected in the cache. Thus, if there are concurrent
+clients accessing the same mount point, then we might have a problem
+with caching the stale data.
+
+Standard options
+
+Here are the Standard options specific to protondrive (Proton Drive).
+
+--protondrive-username
+
+The username of your proton account
+
+Properties:
+
+- Config: username
+- Env Var: RCLONE_PROTONDRIVE_USERNAME
+- Type: string
+- Required: true
+
+--protondrive-password
+
+The password of your proton account.
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: password
+- Env Var: RCLONE_PROTONDRIVE_PASSWORD
+- Type: string
+- Required: true
+
+--protondrive-2fa
+
+The 2FA code
+
+The value can also be provided with --protondrive-2fa=000000
+
+The 2FA code of your proton drive account if the account is set up with
+two-factor authentication
+
+Properties:
+
+- Config: 2fa
+- Env Var: RCLONE_PROTONDRIVE_2FA
+- Type: string
+- Required: false
+
+Advanced options
+
+Here are the Advanced options specific to protondrive (Proton Drive).
+
+--protondrive-mailbox-password
+
+The mailbox password of your two-password proton account.
+
+For more information regarding the mailbox password, please check the
+following official knowledge base article:
+https://proton.me/support/the-difference-between-the-mailbox-password-and-login-password
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: mailbox_password
+- Env Var: RCLONE_PROTONDRIVE_MAILBOX_PASSWORD
+- Type: string
+- Required: false
+
+--protondrive-client-uid
+
+Client uid key (internal use only)
+
+Properties:
+
+- Config: client_uid
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_UID
+- Type: string
+- Required: false
+
+--protondrive-client-access-token
+
+Client access token key (internal use only)
+
+Properties:
+
+- Config: client_access_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_ACCESS_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-refresh-token
+
+Client refresh token key (internal use only)
+
+Properties:
+
+- Config: client_refresh_token
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_REFRESH_TOKEN
+- Type: string
+- Required: false
+
+--protondrive-client-salted-key-pass
+
+Client salted key pass key (internal use only)
+
+Properties:
+
+- Config: client_salted_key_pass
+- Env Var: RCLONE_PROTONDRIVE_CLIENT_SALTED_KEY_PASS
+- Type: string
+- Required: false
+
+--protondrive-encoding
+
+The encoding for the backend.
+
+See the encoding section in the overview for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_PROTONDRIVE_ENCODING
+- Type: Encoding
+- Default: Slash,LeftSpace,RightSpace,InvalidUtf8,Dot
+
+--protondrive-original-file-size
+
+Return the file size before encryption
+
+The size of the encrypted file will be different from (bigger than) the
+original file size. Unless there is a reason to return the file size
+after encryption is performed, otherwise, set this option to true, as
+features like Open() which will need to be supplied with original
+content size, will fail to operate properly
+
+Properties:
+
+- Config: original_file_size
+- Env Var: RCLONE_PROTONDRIVE_ORIGINAL_FILE_SIZE
+- Type: bool
+- Default: true
+
+--protondrive-app-version
+
+The app version string
+
+The app version string indicates the client that is currently performing
+the API request. This information is required and will be sent with
+every API request.
+
+Properties:
+
+- Config: app_version
+- Env Var: RCLONE_PROTONDRIVE_APP_VERSION
+- Type: string
+- Default: "macos-drive@1.0.0-alpha.1+rclone"
+
+--protondrive-replace-existing-draft
+
+Create a new revision when filename conflict is detected
+
+When a file upload is cancelled or failed before completion, a draft
+will be created and the subsequent upload of the same file to the same
+location will be reported as a conflict.
+
+The value can also be set by --protondrive-replace-existing-draft=true
+
+If the option is set to true, the draft will be replaced and then the
+upload operation will restart. If there are other clients also uploading
+at the same file location at the same time, the behavior is currently
+unknown. Need to set to true for integration tests. If the option is set
+to false, an error "a draft exist - usually this means a file is being
+uploaded at another client, or, there was a failed upload attempt" will
+be returned, and no upload will happen.
+
+Properties:
+
+- Config: replace_existing_draft
+- Env Var: RCLONE_PROTONDRIVE_REPLACE_EXISTING_DRAFT
+- Type: bool
+- Default: false
+
+--protondrive-enable-caching
+
+Caches the files and folders metadata to reduce API calls
+
+Notice: If you are mounting ProtonDrive as a VFS, please disable this
+feature, as the current implementation doesn't update or clear the cache
+when there are external changes.
+
+The files and folders on ProtonDrive are represented as links with
+keyrings, which can be cached to improve performance and be friendly to
+the API server.
+
+The cache is currently built for the case when the rclone is the only
+instance performing operations to the mount point. The event system,
+which is the proton API system that provides visibility of what has
+changed on the drive, is yet to be implemented, so updates from other
+clients won’t be reflected in the cache. Thus, if there are concurrent
+clients accessing the same mount point, then we might have a problem
+with caching the stale data.
+
+Properties:
+
+- Config: enable_caching
+- Env Var: RCLONE_PROTONDRIVE_ENABLE_CACHING
+- Type: bool
+- Default: true
+
+--protondrive-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_PROTONDRIVE_DESCRIPTION
+- Type: string
+- Required: false
+
+Limitations
+
+This backend uses the Proton-API-Bridge, which is based on
+go-proton-api, a fork of the official repo.
+
+There is no official API documentation available from Proton Drive. But,
+thanks to Proton open sourcing proton-go-api and the web, iOS, and
+Android client codebases, we don't need to completely reverse engineer
+the APIs by observing the web client traffic!
+
+proton-go-api provides the basic building blocks of API calls and error
+handling, such as 429 exponential back-off, but it is pretty much just a
+barebone interface to the Proton API. For example, the encryption and
+decryption of the Proton Drive file are not provided in this library.
+
+The Proton-API-Bridge, attempts to bridge the gap, so rclone can be
+built on top of this quickly. This codebase handles the intricate tasks
+before and after calling Proton APIs, particularly the complex
+encryption scheme, allowing developers to implement features for other
+software on top of this codebase. There are likely quite a few errors in
+this library, as there isn't official documentation available.
+
+Seafile
+
+This is a backend for the Seafile storage service: - It works with both
+the free community edition or the professional edition. - Seafile
+versions 6.x, 7.x, 8.x and 9.x are all supported. - Encrypted libraries
+are also supported. - It supports 2FA enabled users - Using a Library
+API Token is not supported
+
+Configuration
+
+There are two distinct modes you can setup your remote: - you point your
+remote to the root of the server, meaning you don't specify a library
+during the configuration: Paths are specified as remote:library. You may
+put subdirectories in too, e.g. remote:library/path/to/dir. - you point
+your remote to a specific library during the configuration: Paths are
+specified as remote:path/to/dir. This is the recommended mode when using
+encrypted libraries. (This mode is possibly slightly faster than the
+root mode)
+
+Configuration in root mode
+
+Here is an example of making a seafile configuration for a user with no
+two-factor authentication. First run
+
+ rclone config
+
+This will guide you through an interactive setup process. To
+authenticate you will need the URL of your server, your email (or
+username) and your password.
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> seafile
+ Type of storage to configure.
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Seafile
+ \ "seafile"
+ [snip]
+ Storage> seafile
+ ** See help for seafile backend at: https://rclone.org/seafile/ **
+
+ URL of seafile host to connect to
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ 1 / Connect to cloud.seafile.com
+ \ "https://cloud.seafile.com/"
+ url> http://my.seafile.server/
+ User name (usually email address)
+ Enter a string value. Press Enter for the default ("").
+ user> me@example.com
+ Password
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank (default)
+ y/g> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ Two-factor authentication ('true' if the account has 2FA enabled)
+ Enter a boolean value (true or false). Press Enter for the default ("false").
+ 2fa> false
+ Name of the library. Leave blank to access all non-encrypted libraries.
+ Enter a string value. Press Enter for the default ("").
+ library>
+ Library password (for encrypted libraries only). Leave blank if you pass it through the command line.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank (default)
+ y/g/n> n
+ Edit advanced config? (y/n)
+ y) Yes
+ n) No (default)
+ y/n> n
+ Remote config
+ Two-factor authentication is not enabled on this account.
+ --------------------
+ [seafile]
+ type = seafile
+ url = http://my.seafile.server/
+ user = me@example.com
+ pass = *** ENCRYPTED ***
+ 2fa = false
+ --------------------
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+This remote is called seafile. It's pointing to the root of your seafile
+server and can now be used like this:
+
+See all libraries
+
+ rclone lsd seafile:
+
+Create a new library
+
+ rclone mkdir seafile:library
+
+List the contents of a library
+
+ rclone ls seafile:library
+
+Sync /home/local/directory to the remote library, deleting any excess
+files in the library.
+
+ rclone sync --interactive /home/local/directory seafile:library
+
+Configuration in library mode
+
+Here's an example of a configuration in library mode with a user that
+has the two-factor authentication enabled. Your 2FA code will be asked
+at the end of the configuration, and will attempt to authenticate you:
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> seafile
+ Type of storage to configure.
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / Seafile
+ \ "seafile"
+ [snip]
+ Storage> seafile
+ ** See help for seafile backend at: https://rclone.org/seafile/ **
+
+ URL of seafile host to connect to
+ Enter a string value. Press Enter for the default ("").
+ Choose a number from below, or type in your own value
+ 1 / Connect to cloud.seafile.com
+ \ "https://cloud.seafile.com/"
+ url> http://my.seafile.server/
+ User name (usually email address)
+ Enter a string value. Press Enter for the default ("").
+ user> me@example.com
+ Password
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank (default)
+ y/g> y
+ Enter the password:
+ password:
+ Confirm the password:
+ password:
+ Two-factor authentication ('true' if the account has 2FA enabled)
+ Enter a boolean value (true or false). Press Enter for the default ("false").
+ 2fa> true
+ Name of the library. Leave blank to access all non-encrypted libraries.
+ Enter a string value. Press Enter for the default ("").
+ library> My Library
+ Library password (for encrypted libraries only). Leave blank if you pass it through the command line.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank (default)
+ y/g/n> n
+ Edit advanced config? (y/n)
+ y) Yes
+ n) No (default)
+ y/n> n
+ Remote config
+ Two-factor authentication: please enter your 2FA code
+ 2fa code> 123456
+ Authenticating...
+ Success!
+ --------------------
+ [seafile]
+ type = seafile
+ url = http://my.seafile.server/
+ user = me@example.com
+ pass =
+ 2fa = true
+ library = My Library
+ --------------------
+ y) Yes this is OK (default)
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+You'll notice your password is blank in the configuration. It's because
+we only need the password to authenticate you once.
+
+You specified My Library during the configuration. The root of the
+remote is pointing at the root of the library My Library:
+
+See all files in the library:
+
+ rclone lsd seafile:
+
+Create a new directory inside the library
+
+ rclone mkdir seafile:directory
+
+List the contents of a directory
+
+ rclone ls seafile:directory
+
+Sync /home/local/directory to the remote library, deleting any excess
+files in the library.
+
+ rclone sync --interactive /home/local/directory seafile:
+
+--fast-list
+
+Seafile version 7+ supports --fast-list which allows you to use fewer
+transactions in exchange for more memory. See the rclone docs for more
+details. Please note this is not supported on seafile server version 6.x
+
+Restricted filename characters
+
+In addition to the default restricted characters set the following
+characters are also replaced:
+
+ Character Value Replacement
+ ----------- ------- -------------
+ / 0x2F /
+ " 0x22 "
+ \ 0x5C \
+
+Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON
+strings.
+
+Seafile and rclone link
+
+Rclone supports generating share links for non-encrypted libraries only.
+They can either be for a file or a directory:
+
+ rclone link seafile:seafile-tutorial.doc
+ http://my.seafile.server/f/fdcd8a2f93f84b8b90f4/
+
+or if run on a directory you will get:
+
+ rclone link seafile:dir
+ http://my.seafile.server/d/9ea2455f6f55478bbb0d/
+
+Please note a share link is unique for each file or directory. If you
+run a link command on a file/dir that has already been shared, you will
+get the exact same link.
+
+Compatibility
+
+It has been actively developed using the seafile docker image of these
+versions: - 6.3.4 community edition - 7.0.5 community edition - 7.1.3
+community edition - 9.0.10 community edition
+
+Versions below 6.0 are not supported. Versions between 6.0 and 6.3
+haven't been tested and might not work properly.
+
+Each new version of rclone is automatically tested against the latest
+docker image of the seafile community server.
+
+Standard options
+
+Here are the Standard options specific to seafile (seafile).
+
+--seafile-url
+
+URL of seafile host to connect to.
+
+Properties:
+
+- Config: url
+- Env Var: RCLONE_SEAFILE_URL
+- Type: string
+- Required: true
+- Examples:
+ - "https://cloud.seafile.com/"
+ - Connect to cloud.seafile.com.
+
+--seafile-user
+
+User name (usually email address).
+
+Properties:
+
+- Config: user
+- Env Var: RCLONE_SEAFILE_USER
+- Type: string
+- Required: true
+
+--seafile-pass
+
+Password.
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: pass
+- Env Var: RCLONE_SEAFILE_PASS
+- Type: string
+- Required: false
+
+--seafile-2fa
+
+Two-factor authentication ('true' if the account has 2FA enabled).
+
+Properties:
+
+- Config: 2fa
+- Env Var: RCLONE_SEAFILE_2FA
+- Type: bool
+- Default: false
+
+--seafile-library
+
+Name of the library.
+
+Leave blank to access all non-encrypted libraries.
+
+Properties:
+
+- Config: library
+- Env Var: RCLONE_SEAFILE_LIBRARY
+- Type: string
+- Required: false
+
+--seafile-library-key
+
+Library password (for encrypted libraries only).
+
+Leave blank if you pass it through the command line.
+
+NB Input to this must be obscured - see rclone obscure.
+
+Properties:
+
+- Config: library_key
+- Env Var: RCLONE_SEAFILE_LIBRARY_KEY
+- Type: string
+- Required: false
+
+--seafile-auth-token
+
+Authentication token.
+
+Properties:
+
+- Config: auth_token
+- Env Var: RCLONE_SEAFILE_AUTH_TOKEN
+- Type: string
+- Required: false
+
+Advanced options
+
+Here are the Advanced options specific to seafile (seafile).
+
+--seafile-create-library
+
+Should rclone create a library if it doesn't exist.
+
+Properties:
+
+- Config: create_library
+- Env Var: RCLONE_SEAFILE_CREATE_LIBRARY
+- Type: bool
+- Default: false
+
+--seafile-encoding
+
+The encoding for the backend.
+
+See the encoding section in the overview for more info.
+
+Properties:
+
+- Config: encoding
+- Env Var: RCLONE_SEAFILE_ENCODING
+- Type: Encoding
+- Default: Slash,DoubleQuote,BackSlash,Ctl,InvalidUtf8
+
+--seafile-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SEAFILE_DESCRIPTION
+- Type: string
+- Required: false
+
+SFTP
+
+SFTP is the Secure (or SSH) File Transfer Protocol.
+
+The SFTP backend can be used with a number of different providers:
+
+- Hetzner Storage Box
+- rsync.net
+
+SFTP runs over SSH v2 and is installed as standard with most modern SSH
+installations.
+
+Paths are specified as remote:path. If the path does not begin with a /
+it is relative to the home directory of the user. An empty path remote:
+refers to the user's home directory. For example, rclone lsd remote:
+would list the home directory of the user configured in the rclone
+remote config (i.e /home/sftpuser). However, rclone lsd remote:/ would
+list the root directory for remote machine (i.e. /)
+
+Note that some SFTP servers will need the leading / - Synology is a good
+example of this. rsync.net and Hetzner, on the other hand, requires
+users to OMIT the leading /.
+
+Note that by default rclone will try to execute shell commands on the
+server, see shell access considerations.
+
+Configuration
+
+Here is an example of making an SFTP configuration. First run
+
+ rclone config
+
+This will guide you through an interactive setup process.
+
+ No remotes found, make a new one?
+ n) New remote
+ s) Set configuration password
+ q) Quit config
+ n/s/q> n
+ name> remote
+ Type of storage to configure.
+ Choose a number from below, or type in your own value
+ [snip]
+ XX / SSH/SFTP
+ \ "sftp"
+ [snip]
+ Storage> sftp
+ SSH host to connect to
+ Choose a number from below, or type in your own value
+ 1 / Connect to example.com
+ \ "example.com"
+ host> example.com
+ SSH username
+ Enter a string value. Press Enter for the default ("$USER").
+ user> sftpuser
+ SSH port number
+ Enter a signed integer. Press Enter for the default (22).
+ port>
+ SSH password, leave blank to use ssh-agent.
+ y) Yes type in my own password
+ g) Generate random password
+ n) No leave this optional password blank
+ y/g/n> n
+ Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.
+ key_file>
+ Remote config
+ --------------------
+ [remote]
+ host = example.com
+ user = sftpuser
+ port =
+ pass =
+ key_file =
+ --------------------
+ y) Yes this is OK
+ e) Edit this remote
+ d) Delete this remote
+ y/e/d> y
+
+This remote is called remote and can now be used like this:
+
+See all directories in the home directory
+
+ rclone lsd remote:
+
+See all directories in the root directory
+
+ rclone lsd remote:/
+
+Make a new directory
+
+ rclone mkdir remote:path/to/directory
+
+List the contents of a directory
+
+ rclone ls remote:path/to/directory
+
+Sync /home/local/directory to the remote directory, deleting any excess
+files in the directory.
+
+ rclone sync --interactive /home/local/directory remote:directory
+
+Mount the remote path /srv/www-data/ to the local path /mnt/www-data
+
+ rclone mount remote:/srv/www-data/ /mnt/www-data
+
+SSH Authentication
+
+The SFTP remote supports three authentication methods:
+
+- Password
+- Key file, including certificate signed keys
+- ssh-agent
+
+Key files should be PEM-encoded private key files. For instance
+/home/$USER/.ssh/id_rsa. Only unencrypted OpenSSH or PEM encrypted files
+are supported.
+
+The key file can be specified in either an external file (key_file) or
+contained within the rclone config file (key_pem). If using key_pem in
+the config file, the entry should be on a single line with new line (''
+or '') separating lines. i.e.
+
+ key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
+
+This will generate it correctly for key_pem for use in the config:
+
+ awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
+
+If you don't specify pass, key_file, or key_pem or ask_password then
+rclone will attempt to contact an ssh-agent. You can also specify
+key_use_agent to force the usage of an ssh-agent. In this case key_file
+or key_pem can also be specified to force the usage of a specific key in
+the ssh-agent.
+
+Using an ssh-agent is the only way to load encrypted OpenSSH keys at the
+moment.
+
+If you set the ask_password option, rclone will prompt for a password
+when needed and no password has been configured.
+
+Certificate-signed keys
+
+With traditional key-based authentication, you configure your private
+key only, and the public key built into it will be used during the
+authentication process.
+
+If you have a certificate you may use it to sign your public key,
+creating a separate SSH user certificate that should be used instead of
+the plain public key extracted from the private key. Then you must
+provide the path to the user certificate public key file in pubkey_file.
+
+Note: This is not the traditional public key paired with your private
+key, typically saved as /home/$USER/.ssh/id_rsa.pub. Setting this path
+in pubkey_file will not work.
+
+Example:
+
+ [remote]
+ type = sftp
+ host = example.com
+ user = sftpuser
+ key_file = ~/id_rsa
+ pubkey_file = ~/id_rsa-cert.pub
+
+If you concatenate a cert with a private key then you can specify the
+merged file in both places.
+
+Note: the cert must come first in the file. e.g.
- ```
cat id_rsa-cert.pub id_rsa > merged_key
- ```
- ### Host key validation
+Host key validation
- By default rclone will not check the server's host key for validation. This
- can allow an attacker to replace a server with their own and if you use
- password authentication then this can lead to that password being exposed.
+By default rclone will not check the server's host key for validation.
+This can allow an attacker to replace a server with their own and if you
+use password authentication then this can lead to that password being
+exposed.
- Host key matching, using standard `known_hosts` files can be turned on by
- enabling the `known_hosts_file` option. This can point to the file maintained
- by `OpenSSH` or can point to a unique file.
+Host key matching, using standard known_hosts files can be turned on by
+enabling the known_hosts_file option. This can point to the file
+maintained by OpenSSH or can point to a unique file.
- e.g. using the OpenSSH `known_hosts` file:
+e.g. using the OpenSSH known_hosts file:
- ```
[remote]
type = sftp
host = example.com
@@ -44010,6 +47005,17 @@ Properties:
- Type: bool
- Default: false
+--sftp-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SFTP_DESCRIPTION
+- Type: string
+- Required: false
+
Limitations
On some SFTP servers (e.g. Synology) the paths are different for SSH and
@@ -44290,6 +47296,17 @@ Properties:
- Default:
Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot
+--smb-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SMB_DESCRIPTION
+- Type: string
+- Required: false
+
Storj
Storj is an encrypted, secure, and cost-effective object storage service
@@ -44575,6 +47592,22 @@ Properties:
- Type: string
- Required: false
+Advanced options
+
+Here are the Advanced options specific to storj (Storj Decentralized
+Cloud Storage).
+
+--storj-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_STORJ_DESCRIPTION
+- Type: string
+- Required: false
+
Usage
Paths are specified as remote:bucket (or remote: for the lsf command.)
@@ -44982,6 +48015,17 @@ Properties:
- Type: Encoding
- Default: Slash,Ctl,InvalidUtf8,Dot
+--sugarsync-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_SUGARSYNC_DESCRIPTION
+- Type: string
+- Required: false
+
Limitations
rclone about is not supported by the SugarSync backend. Backends without
@@ -45138,6 +48182,17 @@ Properties:
- Default:
Slash,LtGt,DoubleQuote,BackQuote,Del,Ctl,LeftSpace,InvalidUtf8,Dot
+--uptobox-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UPTOBOX_DESCRIPTION
+- Type: string
+- Required: false
+
Limitations
Uptobox will delete inactive files that have not been accessed in 60
@@ -45504,6 +48559,17 @@ Properties:
- Type: SizeSuffix
- Default: 1Gi
+--union-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_UNION_DESCRIPTION
+- Type: string
+- Required: false
+
Metadata
Any metadata supported by the underlying remote is read and written.
@@ -45782,6 +48848,28 @@ Properties:
- Type: SizeSuffix
- Default: 10Mi
+--webdav-owncloud-exclude-shares
+
+Exclude ownCloud shares
+
+Properties:
+
+- Config: owncloud_exclude_shares
+- Env Var: RCLONE_WEBDAV_OWNCLOUD_EXCLUDE_SHARES
+- Type: bool
+- Default: false
+
+--webdav-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_WEBDAV_DESCRIPTION
+- Type: string
+- Required: false
+
Provider notes
See below for notes on specific providers.
@@ -46165,6 +49253,17 @@ Properties:
- Type: Encoding
- Default: Slash,Del,Ctl,InvalidUtf8,Dot
+--yandex-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_YANDEX_DESCRIPTION
+- Type: string
+- Required: false
+
Limitations
When uploading very large files (bigger than about 5 GiB) you will need
@@ -46412,6 +49511,17 @@ Properties:
- Type: Encoding
- Default: Del,Ctl,InvalidUtf8
+--zoho-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_ZOHO_DESCRIPTION
+- Type: string
+- Required: false
+
Setting up your own client_id
For Zoho we advise you to set up your own client_id. To do so you have
@@ -46966,6 +50076,17 @@ Properties:
- Type: Encoding
- Default: Slash,Dot
+--local-description
+
+Description of the remote
+
+Properties:
+
+- Config: description
+- Env Var: RCLONE_LOCAL_DESCRIPTION
+- Type: string
+- Required: false
+
Metadata
Depending on which OS is in use the local backend may return only some
@@ -46977,6 +50098,8 @@ pkg/attrs#47).
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
+Metadata is supported on files and directories.
+
Here are the possible system metadata items for the local backend.
---------------------------------------------------------------------------------------------------
@@ -47039,6 +50162,336 @@ Options:
Changelog
+v1.66.0 - 2024-03-10
+
+See commits
+
+- Major features
+ - Rclone will now sync directory modification times if the backend
+ supports it.
+ - This can be disabled with --no-update-dir-modtime
+ - See the overview and look for the D flags in the ModTime
+ column to see which backends support it.
+ - Rclone will now sync directory metadata if the backend supports
+ it when -M/--metadata is in use.
+ - See the overview and look for the D flags in the Metadata
+ column to see which backends support it.
+ - Bisync has received many updates see below for more details or
+ bisync's changelog
+- Removed backends
+ - amazonclouddrive: Remove Amazon Drive backend code and docs
+ (Nick Craig-Wood)
+- New Features
+ - backend
+ - Add description field for all backends (Paul Stern)
+ - build
+ - Update to go1.22 and make go1.20 the minimum required
+ version (Nick Craig-Wood)
+ - Fix CVE-2024-24786 by upgrading google.golang.org/protobuf
+ (Nick Craig-Wood)
+ - check: Respect --no-unicode-normalization and --ignore-case-sync
+ for --checkfile (nielash)
+ - cmd: Much improved shell auto completion which reduces the size
+ of the completion file and works faster (Nick Craig-Wood)
+ - doc updates (albertony, ben-ba, Eli, emyarod, huajin tong, Jack
+ Provance, kapitainsky, keongalvin, Nick Craig-Wood, nielash,
+ rarspace01, rzitzer, Tera, Vincent Murphy)
+ - fs: Add more detailed logging for file includes/excludes (Kyle
+ Reynolds)
+ - lsf
+ - Add --time-format flag (nielash)
+ - Make metadata appear for directories (Nick Craig-Wood)
+ - lsjson: Make metadata appear for directories (Nick Craig-Wood)
+ - rc
+ - Add srcFs and dstFs to core/stats and core/transferred stats
+ (Nick Craig-Wood)
+ - Add operations/hashsum to the rc as rclone hashsum
+ equivalent (Nick Craig-Wood)
+ - Add config/paths to the rc as rclone config paths equivalent
+ (Nick Craig-Wood)
+ - sync
+ - Optionally report list of synced paths to file (nielash)
+ - Implement directory sync for mod times and metadata (Nick
+ Craig-Wood)
+ - Don't set directory modtimes if already set (nielash)
+ - Don't sync directory modtimes from backends which don't have
+ directories (Nick Craig-Wood)
+- Bug Fixes
+ - backend
+ - Make backends which use oauth implement the Shutdown and
+ shutdown the oauth properly (rkonfj)
+ - bisync
+ - Handle unicode and case normalization consistently (nielash)
+ - Partial uploads known issue on local/ftp/sftp has been
+ resolved (unless using --inplace) (nielash)
+ - Fixed handling of unicode normalization and case
+ insensitivity, support for --fix-case, --ignore-case-sync,
+ --no-unicode-normalization (nielash)
+ - Bisync no longer fails to find the correct listing file when
+ configs are overridden with backend-specific flags.
+ (nielash)
+ - nfsmount
+ - Fix exit after external unmount (nielash)
+ - Fix --volname being ignored (nielash)
+ - operations
+ - Fix renaming a file on macOS (nielash)
+ - Fix case-insensitive moves in operations.Move (nielash)
+ - Fix TestCaseInsensitiveMoveFileDryRun on chunker integration
+ tests (nielash)
+ - Fix TestMkdirModTime test (Nick Craig-Wood)
+ - Fix TestSetDirModTime for backends with SetDirModTime but
+ not Metadata (Nick Craig-Wood)
+ - Fix typo in log messages (nielash)
+ - serve nfs: Fix writing files via Finder on macOS (nielash)
+ - serve restic: Fix error handling (Michael Eischer)
+ - serve webdav: Fix --baseurl without leading / (Nick Craig-Wood)
+ - stats: Fix race between ResetCounters and stopAverageLoop called
+ from time.AfterFunc (Nick Craig-Wood)
+ - sync
+ - --fix-case flag to rename case insensitive dest (nielash)
+ - Use operations.DirMove instead of sync.MoveDir for
+ --fix-case (nielash)
+ - systemd: Fix detection and switch to the coreos package
+ everywhere rather than having 2 separate libraries (Anagh Kumar
+ Baranwal)
+- Mount
+ - Fix macOS not noticing errors with --daemon (Nick Craig-Wood)
+ - Notice daemon dying much quicker (Nick Craig-Wood)
+- VFS
+ - Fix unicode normalization on macOS (nielash)
+- Bisync
+ - Copies and deletes are now handled in one operation instead of
+ two (nielash)
+ - --track-renames and --backup-dir are now supported (nielash)
+ - Final listings are now generated from sync results, to avoid
+ needing to re-list (nielash)
+ - Bisync is now much more resilient to changes that happen during
+ a bisync run, and far less prone to critical errors / undetected
+ changes (nielash)
+ - Bisync is now capable of rolling a file listing back in cases of
+ uncertainty, essentially marking the file as needing to be
+ rechecked next time. (nielash)
+ - A few basic terminal colors are now supported, controllable with
+ --color (AUTO|NEVER|ALWAYS) (nielash)
+ - Initial listing snapshots of Path1 and Path2 are now generated
+ concurrently, using the same "march" infrastructure as check and
+ sync, for performance improvements and less risk of error.
+ (nielash)
+ - --resync is now much more efficient (especially for users of
+ --create-empty-src-dirs) (nielash)
+ - Google Docs (and other files of unknown size) are now supported
+ (with the same options as in sync) (nielash)
+ - Equality checks before a sync conflict rename now fall back to
+ cryptcheck (when possible) or --download, (nielash) instead of
+ of --size-only, when check is not available.
+ - Bisync now fully supports comparing based on any combination of
+ size, modtime, and checksum, lifting the prior restriction on
+ backends without modtime support. (nielash)
+ - Bisync now supports a "Graceful Shutdown" mode to cleanly cancel
+ a run early without requiring --resync. (nielash)
+ - New --recover flag allows robust recovery in the event of
+ interruptions, without requiring --resync. (nielash)
+ - A new --max-lock setting allows lock files to automatically
+ renew and expire, for better automatic recovery when a run is
+ interrupted. (nielash)
+ - Bisync now supports auto-resolving sync conflicts and
+ customizing rename behavior with new --conflict-resolve,
+ --conflict-loser, and --conflict-suffix flags. (nielash)
+ - A new --resync-mode flag allows more control over which version
+ of a file gets kept during a --resync. (nielash)
+ - Bisync now supports --retries and --retries-sleep (when
+ --resilient is set.) (nielash)
+ - Clarify file operation directions in dry-run logs (Kyle
+ Reynolds)
+- Local
+ - Fix cleanRootPath on Windows after go1.21.4 stdlib update
+ (nielash)
+ - Implement setting modification time on directories (nielash)
+ - Implement modtime and metadata for directories (Nick Craig-Wood)
+ - Fix setting of btime on directories on Windows (Nick Craig-Wood)
+ - Delete backend implementation of Purge to speed up and make
+ stats (Nick Craig-Wood)
+ - Support metadata setting and mapping on server side Move (Nick
+ Craig-Wood)
+- Cache
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+- Crypt
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+ - Improve handling of undecryptable file names (nielash)
+ - Add missing error check spotted by linter (Nick Craig-Wood)
+- Azure Blob
+ - Implement --azureblob-delete-snapshots (Nick Craig-Wood)
+- B2
+ - Clarify exactly what --b2-download-auth-duration does in the
+ docs (Nick Craig-Wood)
+- Chunker
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+- Combine
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+ - Fix directory metadata error on upstream root (nielash)
+ - Fix directory move across upstreams (nielash)
+- Compress
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+- Drive
+ - Implement setting modification time on directories (nielash)
+ - Implement modtime and metadata setting for directories (Nick
+ Craig-Wood)
+ - Support metadata setting and mapping on server side Move,Copy
+ (Nick Craig-Wood)
+- FTP
+ - Fix mkdir with rsftp which is returning the wrong code (Nick
+ Craig-Wood)
+- Hasher
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+ - Fix error from trying to stop an already-stopped db (nielash)
+ - Look for cached hash if passed hash unexpectedly blank (nielash)
+- Imagekit
+ - Updated docs and web content (Harshit Budhraja)
+ - Updated overview - supported operations (Harshit Budhraja)
+- Mega
+ - Fix panic with go1.22 (Nick Craig-Wood)
+- Netstorage
+ - Fix Root to return correct directory when pointing to a file
+ (Nick Craig-Wood)
+- Onedrive
+ - Add metadata support (nielash)
+- Opendrive
+ - Fix moving file/folder within the same parent dir (nielash)
+- Oracle Object Storage
+ - Support backend restore command (Nikhil Ahuja)
+ - Support workload identity authentication for OKE (Anders
+ Swanson)
+- Protondrive
+ - Fix encoding of Root method (Nick Craig-Wood)
+- Quatrix
+ - Fix Content-Range header (Volodymyr)
+ - Add option to skip project folders (Oksana Zhykina)
+ - Fix Root to return correct directory when pointing to a file
+ (Nick Craig-Wood)
+- S3
+ - Add --s3-version-deleted to show delete markers in listings when
+ using versions. (Nick Craig-Wood)
+ - Add IPv6 support with option --s3-use-dual-stack (Anthony
+ Metzidis)
+ - Copy parts in parallel when doing chunked server side copy (Nick
+ Craig-Wood)
+ - GCS provider: fix server side copy of files bigger than 5G (Nick
+ Craig-Wood)
+ - Support metadata setting and mapping on server side Copy (Nick
+ Craig-Wood)
+- Seafile
+ - Fix download/upload error when FILE_SERVER_ROOT is relative
+ (DanielEgbers)
+ - Fix Root to return correct directory when pointing to a file
+ (Nick Craig-Wood)
+- SFTP
+ - Implement setting modification time on directories (nielash)
+ - Set directory modtimes update on write flag (Nick Craig-Wood)
+ - Shorten wait delay for external ssh binaries now that we are
+ using go1.20 (Nick Craig-Wood)
+- Swift
+ - Avoid unnecessary container versioning check (Joe Cai)
+- Union
+ - Implement setting modification time on directories (if supported
+ by wrapped remote) (nielash)
+ - Implement setting metadata on directories (Nick Craig-Wood)
+- WebDAV
+ - Reduce priority of chunks upload log (Gabriel Ramos)
+ - owncloud: Add config owncloud_exclude_shares which allows to
+ exclude shared files and folders when listing remote resources
+ (Thomas Müller)
+
+v1.65.2 - 2024-01-24
+
+See commits
+
+- Bug Fixes
+ - build: bump github.com/cloudflare/circl from 1.3.6 to 1.3.7
+ (dependabot)
+ - docs updates (Nick Craig-Wood, kapitainsky, nielash, Tera,
+ Harshit Budhraja)
+- VFS
+ - Fix stale data when using --vfs-cache-mode full (Nick
+ Craig-Wood)
+- Azure Blob
+ - IMPORTANT Fix data corruption bug - see #7590 (Nick Craig-Wood)
+
+v1.65.1 - 2024-01-08
+
+See commits
+
+- Bug Fixes
+ - build
+ - Bump golang.org/x/crypto to fix ssh terrapin CVE-2023-48795
+ (dependabot)
+ - Update to go1.21.5 to fix Windows path problems (Nick
+ Craig-Wood)
+ - Fix docker build on arm/v6 (Nick Craig-Wood)
+ - install.sh: fix harmless error message on install (Nick
+ Craig-Wood)
+ - accounting: fix stats to show server side transfers (Nick
+ Craig-Wood)
+ - doc fixes (albertony, ben-ba, Eli Orzitzer, emyarod, keongalvin,
+ rarspace01)
+ - nfsmount: Compile for all unix oses, add --sudo and fix
+ error/option handling (Nick Craig-Wood)
+ - operations: Fix files moved by rclone move not being counted as
+ transfers (Nick Craig-Wood)
+ - oauthutil: Avoid panic when *token and *ts.token are the same
+ (rkonfj)
+ - serve s3: Fix listing oddities (Nick Craig-Wood)
+- VFS
+ - Note that --vfs-refresh runs in the background (Nick Craig-Wood)
+- Azurefiles
+ - Fix storage base url (Oksana)
+- Crypt
+ - Fix rclone move a file over itself deleting the file (Nick
+ Craig-Wood)
+- Chunker
+ - Fix rclone move a file over itself deleting the file (Nick
+ Craig-Wood)
+- Compress
+ - Fix rclone move a file over itself deleting the file (Nick
+ Craig-Wood)
+- Dropbox
+ - Fix used space on dropbox team accounts (Nick Craig-Wood)
+- FTP
+ - Fix multi-thread copy (WeidiDeng)
+- Googlephotos
+ - Fix nil pointer exception when batch failed (Nick Craig-Wood)
+- Hasher
+ - Fix rclone move a file over itself deleting the file (Nick
+ Craig-Wood)
+ - Fix invalid memory address error when MaxAge == 0 (nielash)
+- Onedrive
+ - Fix error listing: unknown object type (Nick Craig-Wood)
+ - Fix "unauthenticated: Unauthenticated" errors when uploading
+ (Nick Craig-Wood)
+- Oracleobjectstorage
+ - Fix object storage endpoint for custom endpoints (Manoj Ghosh)
+ - Multipart copy create bucket if it doesn't exist. (Manoj Ghosh)
+- Protondrive
+ - Fix CVE-2023-45286 / GHSA-xwh9-gc39-5298 (Nick Craig-Wood)
+- S3
+ - Fix crash if no UploadId in multipart upload (Nick Craig-Wood)
+- Smb
+ - Fix shares not listed by updating go-smb2 (halms)
+- Union
+ - Fix rclone move a file over itself deleting the file (Nick
+ Craig-Wood)
+
v1.65.0 - 2023-11-26
See commits
@@ -53501,10 +56954,12 @@ Bugs and Limitations
Limitations
-Directory timestamps aren't preserved
+Directory timestamps aren't preserved on some backends
-Rclone doesn't currently preserve the timestamps of directories. This is
-because rclone only really considers objects when syncing.
+As of v1.66, rclone supports syncing directory modtimes, if the backend
+supports it. Some backends do not support it -- see overview for a
+complete list. Additionally, note that empty directories are not synced
+by default (this can be enabled with --create-empty-src-dirs.)
Rclone struggles with millions of files in a directory/bucket
@@ -53850,7 +57305,7 @@ email addresses removed from here need to be added to bin/.ignore-emails to make
- Scott McGillivray scott.mcgillivray@gmail.com
- Bjørn Erik Pedersen bjorn.erik.pedersen@gmail.com
- Lukas Loesche lukas@mesosphere.io
-- emyarod allllaboutyou@gmail.com
+- emyarod emyarod@users.noreply.github.com
- T.C. Ferguson tcf909@gmail.com
- Brandur brandur@mutelight.org
- Dario Giovannetti dev@dariogiovannetti.net
@@ -54607,6 +58062,27 @@ email addresses removed from here need to be added to bin/.ignore-emails to make
- Alen Šiljak dev@alensiljak.eu.org
- 你知道未来吗 rkonfj@gmail.com
- Abhinav Dhiman 8640877+ahnv@users.noreply.github.com
+- halms 7513146+halms@users.noreply.github.com
+- ben-ba benjamin.brauner@gmx.de
+- Eli Orzitzer e_orz@yahoo.com
+- Anthony Metzidis anthony.metzidis@gmail.com
+- emyarod afw5059@gmail.com
+- keongalvin keongalvin@gmail.com
+- rarspace01 rarspace01@users.noreply.github.com
+- Paul Stern paulstern45@gmail.com
+- Nikhil Ahuja nikhilahuja@live.com
+- Harshit Budhraja 52413945+harshit-budhraja@users.noreply.github.com
+- Tera 24725862+teraa@users.noreply.github.com
+- Kyle Reynolds kylereynoldsdev@gmail.com
+- Michael Eischer michael.eischer@gmx.de
+- Thomas Müller 1005065+DeepDiver1975@users.noreply.github.com
+- DanielEgbers 27849724+DanielEgbers@users.noreply.github.com
+- Jack Provance 49460795+njprov@users.noreply.github.com
+- Gabriel Ramos 109390599+gabrielramos02@users.noreply.github.com
+- Dan McArdle d@nmcardle.com
+- Joe Cai joe.cai@bigcommerce.com
+- Anders Swanson anders.swanson@oracle.com
+- huajin tong 137764712+thirdkeyword@users.noreply.github.com
Contact the rclone project
diff --git a/Makefile b/Makefile
index 13b92fb90..3503a91fb 100644
--- a/Makefile
+++ b/Makefile
@@ -103,7 +103,7 @@ check: rclone
# Get the build dependencies
build_dep:
- go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
+ go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
# Get the release dependencies we only install on linux
release_dep_linux:
diff --git a/README.md b/README.md
index fc8c8fe57..ddd94d53a 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,7 @@
[](https://rclone.org/#gh-light-mode-only)
[](https://rclone.org/#gh-dark-mode-only)
+[](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-light-mode-only)
+[](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-dark-mode-only)
[Website](https://rclone.org) |
[Documentation](https://rclone.org/docs/) |
@@ -23,7 +25,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
- * Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
@@ -46,6 +47,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
* HTTP [:page_facing_up:](https://rclone.org/http/)
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
+ * ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
@@ -120,6 +122,7 @@ These backends adapt or modify other storage providers
* Partial syncs supported on a whole file basis
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
+ * [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
* Can sync to and from network, e.g. two different cloud accounts
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
diff --git a/RELEASE.md b/RELEASE.md
index 09bc34a4d..71e5b7918 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -124,32 +124,21 @@ Cherry pick any changes back to master and the stable branch if it is active.
## Making a manual build of docker
-The rclone docker image should autobuild on via GitHub actions. If it doesn't
-or needs to be updated then rebuild like this.
-
-See: https://github.com/ilteoood/docker_buildx/issues/19
-See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
+To do a basic build of rclone's docker image to debug builds locally:
+
+```
+docker buildx build --load -t rclone/rclone:testing --progress=plain .
+docker run --rm rclone/rclone:testing version
+```
+
+To test the multipatform build
+
+```
+docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
+```
+
+To make a full build then set the tags correctly and add `--push`
```
-git co v1.54.1
-docker pull golang
-export DOCKER_CLI_EXPERIMENTAL=enabled
-docker buildx create --name actions_builder --use
-docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
-docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
-SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
-echo "Supported platforms: $SUPPORTED_PLATFORMS"
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
-docker buildx stop actions_builder
-```
-
-### Old build for linux/amd64 only
-
-```
-docker pull golang
-docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
-docker push rclone/rclone:1.52.0
-docker push rclone/rclone:1.52
-docker push rclone/rclone:1
-docker push rclone/rclone:latest
```
diff --git a/VERSION b/VERSION
index 0cff9236f..f5cf8b802 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-v1.66.0
+v1.67.0
diff --git a/backend/alias/alias_internal_test.go b/backend/alias/alias_internal_test.go
index 4e3c842af..fa0956b6d 100644
--- a/backend/alias/alias_internal_test.go
+++ b/backend/alias/alias_internal_test.go
@@ -81,10 +81,12 @@ func TestNewFS(t *testing.T) {
for i, gotEntry := range gotEntries {
what := fmt.Sprintf("%s, entry=%d", what, i)
wantEntry := test.entries[i]
+ _, isDir := gotEntry.(fs.Directory)
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
- require.Equal(t, wantEntry.size, gotEntry.Size(), what)
- _, isDir := gotEntry.(fs.Directory)
+ if !isDir {
+ require.Equal(t, wantEntry.size, gotEntry.Size(), what)
+ }
require.Equal(t, wantEntry.isDir, isDir, what)
}
}
diff --git a/backend/all/all.go b/backend/all/all.go
index 8afb991bb..e90877eda 100644
--- a/backend/all/all.go
+++ b/backend/all/all.go
@@ -4,7 +4,6 @@ package all
import (
// Active file systems
_ "github.com/rclone/rclone/backend/alias"
- _ "github.com/rclone/rclone/backend/amazonclouddrive"
_ "github.com/rclone/rclone/backend/azureblob"
_ "github.com/rclone/rclone/backend/azurefiles"
_ "github.com/rclone/rclone/backend/b2"
diff --git a/backend/amazonclouddrive/amazonclouddrive.go b/backend/amazonclouddrive/amazonclouddrive.go
deleted file mode 100644
index c5a123915..000000000
--- a/backend/amazonclouddrive/amazonclouddrive.go
+++ /dev/null
@@ -1,1376 +0,0 @@
-// Package amazonclouddrive provides an interface to the Amazon Cloud
-// Drive object storage system.
-package amazonclouddrive
-
-/*
-FIXME make searching for directory in id and file in id more efficient
-- use the name: search parameter - remember the escaping rules
-- use Folder GetNode and GetFile
-
-FIXME make the default for no files and no dirs be (FILE & FOLDER) so
-we ignore assets completely!
-*/
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
- "path"
- "strings"
- "time"
-
- acd "github.com/ncw/go-acd"
- "github.com/rclone/rclone/fs"
- "github.com/rclone/rclone/fs/config"
- "github.com/rclone/rclone/fs/config/configmap"
- "github.com/rclone/rclone/fs/config/configstruct"
- "github.com/rclone/rclone/fs/fserrors"
- "github.com/rclone/rclone/fs/fshttp"
- "github.com/rclone/rclone/fs/hash"
- "github.com/rclone/rclone/lib/dircache"
- "github.com/rclone/rclone/lib/encoder"
- "github.com/rclone/rclone/lib/oauthutil"
- "github.com/rclone/rclone/lib/pacer"
- "golang.org/x/oauth2"
-)
-
-const (
- folderKind = "FOLDER"
- fileKind = "FILE"
- statusAvailable = "AVAILABLE"
- timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
- minSleep = 20 * time.Millisecond
- warnFileSize = 50000 << 20 // Display warning for files larger than this size
- defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
-)
-
-// Globals
-var (
- // Description of how to auth for this app
- acdConfig = &oauth2.Config{
- Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
- Endpoint: oauth2.Endpoint{
- AuthURL: "https://www.amazon.com/ap/oa",
- TokenURL: "https://api.amazon.com/auth/o2/token",
- },
- ClientID: "",
- ClientSecret: "",
- RedirectURL: oauthutil.RedirectURL,
- }
-)
-
-// Register with Fs
-func init() {
- fs.Register(&fs.RegInfo{
- Name: "amazon cloud drive",
- Prefix: "acd",
- Description: "Amazon Drive",
- NewFs: NewFs,
- Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
- return oauthutil.ConfigOut("", &oauthutil.Options{
- OAuth2Config: acdConfig,
- })
- },
- Options: append(oauthutil.SharedOptions, []fs.Option{{
- Name: "checkpoint",
- Help: "Checkpoint for internal polling (debug).",
- Hide: fs.OptionHideBoth,
- Advanced: true,
- }, {
- Name: "upload_wait_per_gb",
- Help: `Additional time per GiB to wait after a failed complete upload to see if it appears.
-
-Sometimes Amazon Drive gives an error when a file has been fully
-uploaded but the file appears anyway after a little while. This
-happens sometimes for files over 1 GiB in size and nearly every time for
-files bigger than 10 GiB. This parameter controls the time rclone waits
-for the file to appear.
-
-The default value for this parameter is 3 minutes per GiB, so by
-default it will wait 3 minutes for every GiB uploaded to see if the
-file appears.
-
-You can disable this feature by setting it to 0. This may cause
-conflict errors as rclone retries the failed upload but the file will
-most likely appear correctly eventually.
-
-These values were determined empirically by observing lots of uploads
-of big files for a range of file sizes.
-
-Upload with the "-v" flag to see more info about what rclone is doing
-in this situation.`,
- Default: fs.Duration(180 * time.Second),
- Advanced: true,
- }, {
- Name: "templink_threshold",
- Help: `Files >= this size will be downloaded via their tempLink.
-
-Files this size or more will be downloaded via their "tempLink". This
-is to work around a problem with Amazon Drive which blocks downloads
-of files bigger than about 10 GiB. The default for this is 9 GiB which
-shouldn't need to be changed.
-
-To download files above this threshold, rclone requests a "tempLink"
-which downloads the file through a temporary URL directly from the
-underlying S3 storage.`,
- Default: defaultTempLinkThreshold,
- Advanced: true,
- }, {
- Name: config.ConfigEncoding,
- Help: config.ConfigEncodingHelp,
- Advanced: true,
- // Encode invalid UTF-8 bytes as json doesn't handle them properly.
- Default: (encoder.Base |
- encoder.EncodeInvalidUtf8),
- }}...),
- })
-}
-
-// Options defines the configuration for this backend
-type Options struct {
- Checkpoint string `config:"checkpoint"`
- UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
- TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
- Enc encoder.MultiEncoder `config:"encoding"`
-}
-
-// Fs represents a remote acd server
-type Fs struct {
- name string // name of this remote
- features *fs.Features // optional features
- opt Options // options for this Fs
- ci *fs.ConfigInfo // global config
- c *acd.Client // the connection to the acd server
- noAuthClient *http.Client // unauthenticated http client
- root string // the path we are working on
- dirCache *dircache.DirCache // Map of directory path to directory id
- pacer *fs.Pacer // pacer for API calls
- trueRootID string // ID of true root directory
- tokenRenewer *oauthutil.Renew // renew the token on expiry
-}
-
-// Object describes an acd object
-//
-// Will definitely have info but maybe not meta
-type Object struct {
- fs *Fs // what this object is part of
- remote string // The remote path
- info *acd.Node // Info from the acd object if known
-}
-
-// ------------------------------------------------------------
-
-// Name of the remote (as passed into NewFs)
-func (f *Fs) Name() string {
- return f.name
-}
-
-// Root of the remote (as passed into NewFs)
-func (f *Fs) Root() string {
- return f.root
-}
-
-// String converts this Fs to a string
-func (f *Fs) String() string {
- return fmt.Sprintf("amazon drive root '%s'", f.root)
-}
-
-// Features returns the optional features of this Fs
-func (f *Fs) Features() *fs.Features {
- return f.features
-}
-
-// parsePath parses an acd 'url'
-func parsePath(path string) (root string) {
- root = strings.Trim(path, "/")
- return
-}
-
-// retryErrorCodes is a slice of error codes that we will retry
-var retryErrorCodes = []int{
- 400, // Bad request (seen in "Next token is expired")
- 401, // Unauthorized (seen in "Token has expired")
- 408, // Request Timeout
- 429, // Rate exceeded.
- 500, // Get occasional 500 Internal Server Error
- 502, // Bad Gateway when doing big listings
- 503, // Service Unavailable
- 504, // Gateway Time-out
-}
-
-// shouldRetry returns a boolean as to whether this resp and err
-// deserve to be retried. It returns the err as a convenience
-func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
- if fserrors.ContextError(ctx, &err) {
- return false, err
- }
- if resp != nil {
- if resp.StatusCode == 401 {
- f.tokenRenewer.Invalidate()
- fs.Debugf(f, "401 error received - invalidating token")
- return true, err
- }
- // Work around receiving this error sporadically on authentication
- //
- // HTTP code 403: "403 Forbidden", response body: {"message":"Authorization header requires 'Credential' parameter. Authorization header requires 'Signature' parameter. Authorization header requires 'SignedHeaders' parameter. Authorization header requires existence of either a 'X-Amz-Date' or a 'Date' header. Authorization=Bearer"}
- if resp.StatusCode == 403 && strings.Contains(err.Error(), "Authorization header requires") {
- fs.Debugf(f, "403 \"Authorization header requires...\" error received - retry")
- return true, err
- }
- }
- return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
-}
-
-// If query parameters contain X-Amz-Algorithm remove Authorization header
-//
-// This happens when ACD redirects to S3 for the download. The oauth
-// transport puts an Authorization header in which we need to remove
-// otherwise we get this message from AWS
-//
-// Only one auth mechanism allowed; only the X-Amz-Algorithm query
-// parameter, Signature query string parameter or the Authorization
-// header should be specified
-func filterRequest(req *http.Request) {
- if req.URL.Query().Get("X-Amz-Algorithm") != "" {
- fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
- req.Header.Del("Authorization")
- }
-}
-
-// NewFs constructs an Fs from the path, container:path
-func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
- // Parse config into Options struct
- opt := new(Options)
- err := configstruct.Set(m, opt)
- if err != nil {
- return nil, err
- }
- root = parsePath(root)
- baseClient := fshttp.NewClient(ctx)
- if do, ok := baseClient.Transport.(interface {
- SetRequestFilter(f func(req *http.Request))
- }); ok {
- do.SetRequestFilter(filterRequest)
- } else {
- fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
- }
- oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, acdConfig, baseClient)
- if err != nil {
- return nil, fmt.Errorf("failed to configure Amazon Drive: %w", err)
- }
-
- c := acd.NewClient(oAuthClient)
- ci := fs.GetConfig(ctx)
- f := &Fs{
- name: name,
- root: root,
- opt: *opt,
- ci: ci,
- c: c,
- pacer: fs.NewPacer(ctx, pacer.NewAmazonCloudDrive(pacer.MinSleep(minSleep))),
- noAuthClient: fshttp.NewClient(ctx),
- }
- f.features = (&fs.Features{
- CaseInsensitive: true,
- ReadMimeType: true,
- CanHaveEmptyDirectories: true,
- }).Fill(ctx, f)
-
- // Renew the token in the background
- f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
- _, err := f.getRootInfo(ctx)
- return err
- })
-
- // Update endpoints
- var resp *http.Response
- err = f.pacer.Call(func() (bool, error) {
- _, resp, err = f.c.Account.GetEndpoints()
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- return nil, fmt.Errorf("failed to get endpoints: %w", err)
- }
-
- // Get rootID
- rootInfo, err := f.getRootInfo(ctx)
- if err != nil || rootInfo.Id == nil {
- return nil, fmt.Errorf("failed to get root: %w", err)
- }
- f.trueRootID = *rootInfo.Id
-
- f.dirCache = dircache.New(root, f.trueRootID, f)
-
- // Find the current root
- err = f.dirCache.FindRoot(ctx, false)
- if err != nil {
- // Assume it is a file
- newRoot, remote := dircache.SplitPath(root)
- tempF := *f
- tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
- tempF.root = newRoot
- // Make new Fs which is the parent
- err = tempF.dirCache.FindRoot(ctx, false)
- if err != nil {
- // No root so return old f
- return f, nil
- }
- _, err := tempF.newObjectWithInfo(ctx, remote, nil)
- if err != nil {
- if err == fs.ErrorObjectNotFound {
- // File doesn't exist so return old f
- return f, nil
- }
- return nil, err
- }
- // XXX: update the old f here instead of returning tempF, since
- // `features` were already filled with functions having *f as a receiver.
- // See https://github.com/rclone/rclone/issues/2182
- f.dirCache = tempF.dirCache
- f.root = tempF.root
- // return an error with an fs which points to the parent
- return f, fs.ErrorIsFile
- }
- return f, nil
-}
-
-// getRootInfo gets the root folder info
-func (f *Fs) getRootInfo(ctx context.Context) (rootInfo *acd.Folder, err error) {
- var resp *http.Response
- err = f.pacer.Call(func() (bool, error) {
- rootInfo, resp, err = f.c.Nodes.GetRoot()
- return f.shouldRetry(ctx, resp, err)
- })
- return rootInfo, err
-}
-
-// Return an Object from a path
-//
-// If it can't be found it returns the error fs.ErrorObjectNotFound.
-func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
- o := &Object{
- fs: f,
- remote: remote,
- }
- if info != nil {
- // Set info but not meta
- o.info = info
- } else {
- err := o.readMetaData(ctx) // reads info and meta, returning an error
- if err != nil {
- return nil, err
- }
- }
- return o, nil
-}
-
-// NewObject finds the Object at remote. If it can't be found
-// it returns the error fs.ErrorObjectNotFound.
-func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
- return f.newObjectWithInfo(ctx, remote, nil)
-}
-
-// FindLeaf finds a directory of name leaf in the folder with ID pathID
-func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
- //fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
- folder := acd.FolderFromId(pathID, f.c.Nodes)
- var resp *http.Response
- var subFolder *acd.Folder
- err = f.pacer.Call(func() (bool, error) {
- subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- if err == acd.ErrorNodeNotFound {
- //fs.Debugf(f, "...Not found")
- return "", false, nil
- }
- //fs.Debugf(f, "...Error %v", err)
- return "", false, err
- }
- if subFolder.Status != nil && *subFolder.Status != statusAvailable {
- fs.Debugf(f, "Ignoring folder %q in state %q", leaf, *subFolder.Status)
- time.Sleep(1 * time.Second) // FIXME wait for problem to go away!
- return "", false, nil
- }
- //fs.Debugf(f, "...Found(%q, %v)", *subFolder.Id, leaf)
- return *subFolder.Id, true, nil
-}
-
-// CreateDir makes a directory with pathID as parent and name leaf
-func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
- //fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
- folder := acd.FolderFromId(pathID, f.c.Nodes)
- var resp *http.Response
- var info *acd.Folder
- err = f.pacer.Call(func() (bool, error) {
- info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- //fmt.Printf("...Error %v\n", err)
- return "", err
- }
- //fmt.Printf("...Id %q\n", *info.Id)
- return *info.Id, nil
-}
-
-// list the objects into the function supplied
-//
-// If directories is set it only sends directories
-// User function to process a File item from listAll
-//
-// Should return true to finish processing
-type listAllFn func(*acd.Node) bool
-
-// Lists the directory required calling the user function on each item found
-//
-// If the user fn ever returns true then it early exits with found = true
-func (f *Fs) listAll(ctx context.Context, dirID string, title string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
- query := "parents:" + dirID
- if directoriesOnly {
- query += " AND kind:" + folderKind
- } else if filesOnly {
- query += " AND kind:" + fileKind
- //} else {
- // FIXME none of these work
- //query += " AND kind:(" + fileKind + " OR " + folderKind + ")"
- //query += " AND (kind:" + fileKind + " OR kind:" + folderKind + ")"
- }
- opts := acd.NodeListOptions{
- Filters: query,
- }
- var nodes []*acd.Node
- var out []*acd.Node
- //var resp *http.Response
- for {
- var resp *http.Response
- err = f.pacer.CallNoRetry(func() (bool, error) {
- nodes, resp, err = f.c.Nodes.GetNodes(&opts)
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- return false, err
- }
- if nodes == nil {
- break
- }
- for _, node := range nodes {
- if node.Name != nil && node.Id != nil && node.Kind != nil && node.Status != nil {
- // Ignore nodes if not AVAILABLE
- if *node.Status != statusAvailable {
- continue
- }
- // Ignore bogus nodes Amazon Drive sometimes reports
- hasValidParent := false
- for _, parent := range node.Parents {
- if parent == dirID {
- hasValidParent = true
- break
- }
- }
- if !hasValidParent {
- continue
- }
- *node.Name = f.opt.Enc.ToStandardName(*node.Name)
- // Store the nodes up in case we have to retry the listing
- out = append(out, node)
- }
- }
- }
- // Send the nodes now
- for _, node := range out {
- if fn(node) {
- found = true
- break
- }
- }
- return
-}
-
-// List the objects and directories in dir into entries. The
-// entries can be returned in any order but should be for a
-// complete directory.
-//
-// dir should be "" to list the root, and should not have
-// trailing slashes.
-//
-// This should return ErrDirNotFound if the directory isn't
-// found.
-func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
- directoryID, err := f.dirCache.FindDir(ctx, dir, false)
- if err != nil {
- return nil, err
- }
- maxTries := f.ci.LowLevelRetries
- var iErr error
- for tries := 1; tries <= maxTries; tries++ {
- entries = nil
- _, err = f.listAll(ctx, directoryID, "", false, false, func(node *acd.Node) bool {
- remote := path.Join(dir, *node.Name)
- switch *node.Kind {
- case folderKind:
- // cache the directory ID for later lookups
- f.dirCache.Put(remote, *node.Id)
- when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
- d := fs.NewDir(remote, when).SetID(*node.Id)
- entries = append(entries, d)
- case fileKind:
- o, err := f.newObjectWithInfo(ctx, remote, node)
- if err != nil {
- iErr = err
- return true
- }
- entries = append(entries, o)
- default:
- // ignore ASSET, etc.
- }
- return false
- })
- if iErr != nil {
- return nil, iErr
- }
- if fserrors.IsRetryError(err) {
- fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
- continue
- }
- if err != nil {
- return nil, err
- }
- break
- }
- return entries, nil
-}
-
-// checkUpload checks to see if an error occurred after the file was
-// completely uploaded.
-//
-// If it was then it waits for a while to see if the file really
-// exists and is the right size and returns an updated info.
-//
-// If the file wasn't found or was the wrong size then it returns the
-// original error.
-//
-// This is a workaround for Amazon sometimes returning
-//
-// - 408 REQUEST_TIMEOUT
-// - 504 GATEWAY_TIMEOUT
-// - 500 Internal server error
-//
-// At the end of large uploads. The speculation is that the timeout
-// is waiting for the sha1 hashing to complete and the file may well
-// be properly uploaded.
-func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
- // Return if no error - all is well
- if inErr == nil {
- return false, inInfo, inErr
- }
- // If not one of the errors we can fix return
- // if resp == nil || resp.StatusCode != 408 && resp.StatusCode != 500 && resp.StatusCode != 504 {
- // return false, inInfo, inErr
- // }
-
- // The HTTP status
- httpStatus := "HTTP status UNKNOWN"
- if resp != nil {
- httpStatus = resp.Status
- }
-
- // check to see if we read to the end
- buf := make([]byte, 1)
- n, err := in.Read(buf)
- if !(n == 0 && err == io.EOF) {
- fs.Debugf(src, "Upload error detected but didn't finish upload: %v (%q)", inErr, httpStatus)
- return false, inInfo, inErr
- }
-
- // Don't wait for uploads - assume they will appear later
- if f.opt.UploadWaitPerGB <= 0 {
- fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
- return false, inInfo, inErr
- }
-
- // Time we should wait for the upload
- uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
- timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
-
- const sleepTime = 5 * time.Second // sleep between tries
- retries := int((timeToWait + sleepTime - 1) / sleepTime) // number of retries, rounded up
-
- fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
- remote := src.Remote()
- for i := 1; i <= retries; i++ {
- o, err := f.NewObject(ctx, remote)
- if err == fs.ErrorObjectNotFound {
- fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
- } else if err != nil {
- fs.Debugf(src, "Object returned error - waiting (%d/%d): %v", i, retries, err)
- } else {
- if src.Size() == o.Size() {
- fs.Debugf(src, "Object found with correct size %d after waiting (%d/%d) - %v - returning with no error", src.Size(), i, retries, sleepTime*time.Duration(i-1))
- info = &acd.File{
- Node: o.(*Object).info,
- }
- return true, info, nil
- }
- fs.Debugf(src, "Object found but wrong size %d vs %d - waiting (%d/%d)", src.Size(), o.Size(), i, retries)
- }
- time.Sleep(sleepTime)
- }
- fs.Debugf(src, "Giving up waiting for object - returning original error: %v (%q)", inErr, httpStatus)
- return false, inInfo, inErr
-}
-
-// Put the object into the container
-//
-// Copy the reader in to the new object which is returned.
-//
-// The new object may have been created if an error is returned
-func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
- remote := src.Remote()
- size := src.Size()
- // Temporary Object under construction
- o := &Object{
- fs: f,
- remote: remote,
- }
- // Check if object already exists
- err := o.readMetaData(ctx)
- switch err {
- case nil:
- return o, o.Update(ctx, in, src, options...)
- case fs.ErrorObjectNotFound:
- // Not found so create it
- default:
- return nil, err
- }
- // If not create it
- leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
- if err != nil {
- return nil, err
- }
- if size > warnFileSize {
- fs.Logf(f, "Warning: file %q may fail because it is too big. Use --max-size=%dM to skip large files.", remote, warnFileSize>>20)
- }
- folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
- var info *acd.File
- var resp *http.Response
- err = f.pacer.CallNoRetry(func() (bool, error) {
- start := time.Now()
- f.tokenRenewer.Start()
- info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
- f.tokenRenewer.Stop()
- var ok bool
- ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
- if ok {
- return false, nil
- }
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- return nil, err
- }
- o.info = info.Node
- return o, nil
-}
-
-// Mkdir creates the container if it doesn't exist
-func (f *Fs) Mkdir(ctx context.Context, dir string) error {
- _, err := f.dirCache.FindDir(ctx, dir, true)
- return err
-}
-
-// Move src to this remote using server-side move operations.
-//
-// This is stored with the remote path given.
-//
-// It returns the destination Object and a possible error.
-//
-// Will only be called if src.Fs().Name() == f.Name()
-//
-// If it isn't possible then return fs.ErrorCantMove
-func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
- // go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
- srcObj, ok := src.(*Object)
- if !ok {
- fs.Debugf(src, "Can't move - not same remote type")
- return nil, fs.ErrorCantMove
- }
-
- // create the destination directory if necessary
- srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
- if err != nil {
- return nil, err
- }
- dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
- if err != nil {
- return nil, err
- }
- err = f.moveNode(ctx, srcObj.remote, dstLeaf, dstDirectoryID, srcObj.info, srcLeaf, srcDirectoryID, false)
- if err != nil {
- return nil, err
- }
- // Wait for directory caching so we can no longer see the old
- // object and see the new object
- time.Sleep(200 * time.Millisecond) // enough time 90% of the time
- var (
- dstObj fs.Object
- srcErr, dstErr error
- )
- for i := 1; i <= f.ci.LowLevelRetries; i++ {
- _, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
- if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
- // exit if error on source
- return nil, srcErr
- }
- dstObj, dstErr = f.NewObject(ctx, remote)
- if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
- // exit if error on dst
- return nil, dstErr
- }
- if srcErr == fs.ErrorObjectNotFound && dstErr == nil {
- // finished if src not found and dst found
- break
- }
- fs.Debugf(src, "Wait for directory listing to update after move %d/%d", i, f.ci.LowLevelRetries)
- time.Sleep(1 * time.Second)
- }
- return dstObj, dstErr
-}
-
-// DirCacheFlush resets the directory cache - used in testing as an
-// optional interface
-func (f *Fs) DirCacheFlush() {
- f.dirCache.ResetRoot()
-}
-
-// DirMove moves src, srcRemote to this remote at dstRemote
-// using server-side move operations.
-//
-// Will only be called if src.Fs().Name() == f.Name()
-//
-// If it isn't possible then return fs.ErrorCantDirMove
-//
-// If destination exists then return fs.ErrorDirExists
-func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
- srcFs, ok := src.(*Fs)
- if !ok {
- fs.Debugf(src, "DirMove error: not same remote type")
- return fs.ErrorCantDirMove
- }
- srcPath := path.Join(srcFs.root, srcRemote)
- dstPath := path.Join(f.root, dstRemote)
-
- // Refuse to move to or from the root
- if srcPath == "" || dstPath == "" {
- fs.Debugf(src, "DirMove error: Can't move root")
- return errors.New("can't move root directory")
- }
-
- // Find ID of dst parent, creating subdirs if necessary
- dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, dstRemote, true)
- if err != nil {
- return err
- }
-
- // Check destination does not exist
- _, err = f.dirCache.FindDir(ctx, dstRemote, false)
- if err == fs.ErrorDirNotFound {
- // OK
- } else if err != nil {
- return err
- } else {
- return fs.ErrorDirExists
- }
-
- // Find ID of src parent
- _, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcRemote, false)
- if err != nil {
- return err
- }
- srcLeaf, _ := dircache.SplitPath(srcPath)
-
- // Find ID of src
- srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
- if err != nil {
- return err
- }
-
- // FIXME make a proper node.UpdateMetadata command
- srcInfo := acd.NodeFromId(srcID, f.c.Nodes)
- var jsonStr string
- err = srcFs.pacer.Call(func() (bool, error) {
- jsonStr, err = srcInfo.GetMetadata()
- return srcFs.shouldRetry(ctx, nil, err)
- })
- if err != nil {
- fs.Debugf(src, "DirMove error: error reading src metadata: %v", err)
- return err
- }
- err = json.Unmarshal([]byte(jsonStr), &srcInfo)
- if err != nil {
- fs.Debugf(src, "DirMove error: error reading unpacking src metadata: %v", err)
- return err
- }
-
- err = f.moveNode(ctx, srcPath, dstLeaf, dstDirectoryID, srcInfo, srcLeaf, srcDirectoryID, true)
- if err != nil {
- return err
- }
-
- srcFs.dirCache.FlushDir(srcRemote)
- return nil
-}
-
-// purgeCheck remotes the root directory, if check is set then it
-// refuses to do so if it has anything in
-func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
- root := path.Join(f.root, dir)
- if root == "" {
- return errors.New("can't purge root directory")
- }
- dc := f.dirCache
- rootID, err := dc.FindDir(ctx, dir, false)
- if err != nil {
- return err
- }
-
- if check {
- // check directory is empty
- empty := true
- _, err = f.listAll(ctx, rootID, "", false, false, func(node *acd.Node) bool {
- switch *node.Kind {
- case folderKind:
- empty = false
- return true
- case fileKind:
- empty = false
- return true
- default:
- fs.Debugf("Found ASSET %s", *node.Id)
- }
- return false
- })
- if err != nil {
- return err
- }
- if !empty {
- return errors.New("directory not empty")
- }
- }
-
- node := acd.NodeFromId(rootID, f.c.Nodes)
- var resp *http.Response
- err = f.pacer.Call(func() (bool, error) {
- resp, err = node.Trash()
- return f.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- return err
- }
-
- f.dirCache.FlushDir(dir)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Rmdir deletes the root folder
-//
-// Returns an error if it isn't empty
-func (f *Fs) Rmdir(ctx context.Context, dir string) error {
- return f.purgeCheck(ctx, dir, true)
-}
-
-// Precision return the precision of this Fs
-func (f *Fs) Precision() time.Duration {
- return fs.ModTimeNotSupported
-}
-
-// Hashes returns the supported hash sets.
-func (f *Fs) Hashes() hash.Set {
- return hash.Set(hash.MD5)
-}
-
-// Copy src to this remote using server-side copy operations.
-//
-// This is stored with the remote path given
-//
-// It returns the destination Object and a possible error
-//
-// Will only be called if src.Fs().Name() == f.Name()
-//
-// If it isn't possible then return fs.ErrorCantCopy
-//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
-// srcObj, ok := src.(*Object)
-// if !ok {
-// fs.Debugf(src, "Can't copy - not same remote type")
-// return nil, fs.ErrorCantCopy
-// }
-// srcFs := srcObj.fs
-// _, err := f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
-// if err != nil {
-// return nil, err
-// }
-// return f.NewObject(ctx, remote), nil
-//}
-
-// Purge deletes all the files and the container
-//
-// Optional interface: Only implement this if you have a way of
-// deleting all the files quicker than just running Remove() on the
-// result of List()
-func (f *Fs) Purge(ctx context.Context, dir string) error {
- return f.purgeCheck(ctx, dir, false)
-}
-
-// ------------------------------------------------------------
-
-// Fs returns the parent Fs
-func (o *Object) Fs() fs.Info {
- return o.fs
-}
-
-// Return a string version
-func (o *Object) String() string {
- if o == nil {
- return ""
- }
- return o.remote
-}
-
-// Remote returns the remote path
-func (o *Object) Remote() string {
- return o.remote
-}
-
-// Hash returns the Md5sum of an object returning a lowercase hex string
-func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
- if t != hash.MD5 {
- return "", hash.ErrUnsupported
- }
- if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
- return *o.info.ContentProperties.Md5, nil
- }
- return "", nil
-}
-
-// Size returns the size of an object in bytes
-func (o *Object) Size() int64 {
- if o.info.ContentProperties != nil && o.info.ContentProperties.Size != nil {
- return int64(*o.info.ContentProperties.Size)
- }
- return 0 // Object is likely PENDING
-}
-
-// readMetaData gets the metadata if it hasn't already been fetched
-//
-// it also sets the info
-//
-// If it can't be found it returns the error fs.ErrorObjectNotFound.
-func (o *Object) readMetaData(ctx context.Context) (err error) {
- if o.info != nil {
- return nil
- }
- leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, o.remote, false)
- if err != nil {
- if err == fs.ErrorDirNotFound {
- return fs.ErrorObjectNotFound
- }
- return err
- }
- folder := acd.FolderFromId(directoryID, o.fs.c.Nodes)
- var resp *http.Response
- var info *acd.File
- err = o.fs.pacer.Call(func() (bool, error) {
- info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
- return o.fs.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- if err == acd.ErrorNodeNotFound {
- return fs.ErrorObjectNotFound
- }
- return err
- }
- o.info = info.Node
- return nil
-}
-
-// ModTime returns the modification time of the object
-//
-// It attempts to read the objects mtime and if that isn't present the
-// LastModified returned in the http headers
-func (o *Object) ModTime(ctx context.Context) time.Time {
- err := o.readMetaData(ctx)
- if err != nil {
- fs.Debugf(o, "Failed to read metadata: %v", err)
- return time.Now()
- }
- modTime, err := time.Parse(timeFormat, *o.info.ModifiedDate)
- if err != nil {
- fs.Debugf(o, "Failed to read mtime from object: %v", err)
- return time.Now()
- }
- return modTime
-}
-
-// SetModTime sets the modification time of the local fs object
-func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
- // FIXME not implemented
- return fs.ErrorCantSetModTime
-}
-
-// Storable returns a boolean showing whether this object storable
-func (o *Object) Storable() bool {
- return true
-}
-
-// Open an object for read
-func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
- bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
- if bigObject {
- fs.Debugf(o, "Downloading large object via tempLink")
- }
- file := acd.File{Node: o.info}
- var resp *http.Response
- headers := fs.OpenOptionHeaders(options)
- err = o.fs.pacer.Call(func() (bool, error) {
- if !bigObject {
- in, resp, err = file.OpenHeaders(headers)
- } else {
- in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
- }
- return o.fs.shouldRetry(ctx, resp, err)
- })
- return in, err
-}
-
-// Update the object with the contents of the io.Reader, modTime and size
-//
-// The new object may have been created if an error is returned
-func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
- file := acd.File{Node: o.info}
- var info *acd.File
- var resp *http.Response
- var err error
- err = o.fs.pacer.CallNoRetry(func() (bool, error) {
- start := time.Now()
- o.fs.tokenRenewer.Start()
- info, resp, err = file.Overwrite(in)
- o.fs.tokenRenewer.Stop()
- var ok bool
- ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
- if ok {
- return false, nil
- }
- return o.fs.shouldRetry(ctx, resp, err)
- })
- if err != nil {
- return err
- }
- o.info = info.Node
- return nil
-}
-
-// Remove a node
-func (f *Fs) removeNode(ctx context.Context, info *acd.Node) error {
- var resp *http.Response
- var err error
- err = f.pacer.Call(func() (bool, error) {
- resp, err = info.Trash()
- return f.shouldRetry(ctx, resp, err)
- })
- return err
-}
-
-// Remove an object
-func (o *Object) Remove(ctx context.Context) error {
- return o.fs.removeNode(ctx, o.info)
-}
-
-// Restore a node
-func (f *Fs) restoreNode(ctx context.Context, info *acd.Node) (newInfo *acd.Node, err error) {
- var resp *http.Response
- err = f.pacer.Call(func() (bool, error) {
- newInfo, resp, err = info.Restore()
- return f.shouldRetry(ctx, resp, err)
- })
- return newInfo, err
-}
-
-// Changes name of given node
-func (f *Fs) renameNode(ctx context.Context, info *acd.Node, newName string) (newInfo *acd.Node, err error) {
- var resp *http.Response
- err = f.pacer.Call(func() (bool, error) {
- newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
- return f.shouldRetry(ctx, resp, err)
- })
- return newInfo, err
-}
-
-// Replaces one parent with another, effectively moving the file. Leaves other
-// parents untouched. ReplaceParent cannot be used when the file is trashed.
-func (f *Fs) replaceParent(ctx context.Context, info *acd.Node, oldParentID string, newParentID string) error {
- return f.pacer.Call(func() (bool, error) {
- resp, err := info.ReplaceParent(oldParentID, newParentID)
- return f.shouldRetry(ctx, resp, err)
- })
-}
-
-// Adds one additional parent to object.
-func (f *Fs) addParent(ctx context.Context, info *acd.Node, newParentID string) error {
- return f.pacer.Call(func() (bool, error) {
- resp, err := info.AddParent(newParentID)
- return f.shouldRetry(ctx, resp, err)
- })
-}
-
-// Remove given parent from object, leaving the other possible
-// parents untouched. Object can end up having no parents.
-func (f *Fs) removeParent(ctx context.Context, info *acd.Node, parentID string) error {
- return f.pacer.Call(func() (bool, error) {
- resp, err := info.RemoveParent(parentID)
- return f.shouldRetry(ctx, resp, err)
- })
-}
-
-// moveNode moves the node given from the srcLeaf,srcDirectoryID to
-// the dstLeaf,dstDirectoryID
-func (f *Fs) moveNode(ctx context.Context, name, dstLeaf, dstDirectoryID string, srcInfo *acd.Node, srcLeaf, srcDirectoryID string, useDirErrorMsgs bool) (err error) {
- // fs.Debugf(name, "moveNode dst(%q,%s) <- src(%q,%s)", dstLeaf, dstDirectoryID, srcLeaf, srcDirectoryID)
- cantMove := fs.ErrorCantMove
- if useDirErrorMsgs {
- cantMove = fs.ErrorCantDirMove
- }
-
- if len(srcInfo.Parents) > 1 && srcLeaf != dstLeaf {
- fs.Debugf(name, "Move error: object is attached to multiple parents and should be renamed. This would change the name of the node in all parents.")
- return cantMove
- }
-
- if srcLeaf != dstLeaf {
- // fs.Debugf(name, "renaming")
- _, err = f.renameNode(ctx, srcInfo, dstLeaf)
- if err != nil {
- fs.Debugf(name, "Move: quick path rename failed: %v", err)
- goto OnConflict
- }
- }
- if srcDirectoryID != dstDirectoryID {
- // fs.Debugf(name, "trying parent replace: %s -> %s", oldParentID, newParentID)
- err = f.replaceParent(ctx, srcInfo, srcDirectoryID, dstDirectoryID)
- if err != nil {
- fs.Debugf(name, "Move: quick path parent replace failed: %v", err)
- return err
- }
- }
-
- return nil
-
-OnConflict:
- fs.Debugf(name, "Could not directly rename file, presumably because there was a file with the same name already. Instead, the file will now be trashed where such operations do not cause errors. It will be restored to the correct parent after. If any of the subsequent calls fails, the rename/move will be in an invalid state.")
-
- // fs.Debugf(name, "Trashing file")
- err = f.removeNode(ctx, srcInfo)
- if err != nil {
- fs.Debugf(name, "Move: remove node failed: %v", err)
- return err
- }
- // fs.Debugf(name, "Renaming file")
- _, err = f.renameNode(ctx, srcInfo, dstLeaf)
- if err != nil {
- fs.Debugf(name, "Move: rename node failed: %v", err)
- return err
- }
- // note: replacing parent is forbidden by API, modifying them individually is
- // okay though
- // fs.Debugf(name, "Adding target parent")
- err = f.addParent(ctx, srcInfo, dstDirectoryID)
- if err != nil {
- fs.Debugf(name, "Move: addParent failed: %v", err)
- return err
- }
- // fs.Debugf(name, "removing original parent")
- err = f.removeParent(ctx, srcInfo, srcDirectoryID)
- if err != nil {
- fs.Debugf(name, "Move: removeParent failed: %v", err)
- return err
- }
- // fs.Debugf(name, "Restoring")
- _, err = f.restoreNode(ctx, srcInfo)
- if err != nil {
- fs.Debugf(name, "Move: restoreNode node failed: %v", err)
- return err
- }
- return nil
-}
-
-// MimeType of an Object if known, "" otherwise
-func (o *Object) MimeType(ctx context.Context) string {
- if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
- return *o.info.ContentProperties.ContentType
- }
- return ""
-}
-
-// ChangeNotify calls the passed function with a path that has had changes.
-// If the implementation uses polling, it should adhere to the given interval.
-//
-// Automatically restarts itself in case of unexpected behaviour of the remote.
-//
-// Close the returned channel to stop being notified.
-func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
- checkpoint := f.opt.Checkpoint
-
- go func() {
- var ticker *time.Ticker
- var tickerC <-chan time.Time
- for {
- select {
- case pollInterval, ok := <-pollIntervalChan:
- if !ok {
- if ticker != nil {
- ticker.Stop()
- }
- return
- }
- if pollInterval == 0 {
- if ticker != nil {
- ticker.Stop()
- ticker, tickerC = nil, nil
- }
- } else {
- ticker = time.NewTicker(pollInterval)
- tickerC = ticker.C
- }
- case <-tickerC:
- checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
- if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
- fs.Debugf(f, "Unable to save checkpoint: %v", err)
- }
- }
- }
- }()
-}
-
-func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
- var err error
- var resp *http.Response
- var reachedEnd bool
- var csCount int
- var nodeCount int
-
- fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
- err = f.pacer.CallNoRetry(func() (bool, error) {
- resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
- Checkpoint: checkpoint,
- IncludePurged: true,
- }, func(changeSet *acd.ChangeSet, err error) error {
- if err != nil {
- return err
- }
-
- type entryType struct {
- path string
- entryType fs.EntryType
- }
- var pathsToClear []entryType
- csCount++
- nodeCount += len(changeSet.Nodes)
- if changeSet.End {
- reachedEnd = true
- }
- if changeSet.Checkpoint != "" {
- checkpoint = changeSet.Checkpoint
- }
- for _, node := range changeSet.Nodes {
- if path, ok := f.dirCache.GetInv(*node.Id); ok {
- if node.IsFile() {
- pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
- } else {
- pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
- }
- continue
- }
-
- if node.IsFile() {
- // translate the parent dir of this object
- if len(node.Parents) > 0 {
- if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
- // and append the drive file name to compute the full file name
- name := f.opt.Enc.ToStandardName(*node.Name)
- if len(path) > 0 {
- path = path + "/" + name
- } else {
- path = name
- }
- // this will now clear the actual file too
- pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
- }
- } else { // a true root object that is changed
- pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
- }
- }
- }
-
- visitedPaths := make(map[string]bool)
- for _, entry := range pathsToClear {
- if _, ok := visitedPaths[entry.path]; ok {
- continue
- }
- visitedPaths[entry.path] = true
- notifyFunc(entry.path, entry.entryType)
- }
-
- return nil
- })
- return false, err
- })
- fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
-
- if err != nil && err != io.ErrUnexpectedEOF {
- fs.Debugf(f, "Failed to get Changes: %v", err)
- return checkpoint
- }
-
- if reachedEnd {
- reachedEnd = false
- fs.Debugf(f, "All changes were processed. Waiting for more.")
- } else if checkpoint == "" {
- fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
- }
- return checkpoint
-}
-
-// Shutdown shutdown the fs
-func (f *Fs) Shutdown(ctx context.Context) error {
- f.tokenRenewer.Shutdown()
- return nil
-}
-
-// ID returns the ID of the Object if known, or "" if not
-func (o *Object) ID() string {
- if o.info.Id == nil {
- return ""
- }
- return *o.info.Id
-}
-
-// Check the interfaces are satisfied
-var (
- _ fs.Fs = (*Fs)(nil)
- _ fs.Purger = (*Fs)(nil)
- // _ fs.Copier = (*Fs)(nil)
- _ fs.Mover = (*Fs)(nil)
- _ fs.DirMover = (*Fs)(nil)
- _ fs.DirCacheFlusher = (*Fs)(nil)
- _ fs.ChangeNotifier = (*Fs)(nil)
- _ fs.Shutdowner = (*Fs)(nil)
- _ fs.Object = (*Object)(nil)
- _ fs.MimeTyper = &Object{}
- _ fs.IDer = &Object{}
-)
diff --git a/backend/amazonclouddrive/amazonclouddrive_test.go b/backend/amazonclouddrive/amazonclouddrive_test.go
deleted file mode 100644
index 821a6f1ed..000000000
--- a/backend/amazonclouddrive/amazonclouddrive_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Test AmazonCloudDrive filesystem interface
-
-//go:build acd
-// +build acd
-
-package amazonclouddrive_test
-
-import (
- "testing"
-
- "github.com/rclone/rclone/backend/amazonclouddrive"
- "github.com/rclone/rclone/fs"
- "github.com/rclone/rclone/fstest/fstests"
-)
-
-// TestIntegration runs integration tests against the remote
-func TestIntegration(t *testing.T) {
- fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
- fstests.RemoteName = "TestAmazonCloudDrive:"
- fstests.Run(t)
-}
diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go
index 89e308aed..36bd50e30 100644
--- a/backend/azureblob/azureblob.go
+++ b/backend/azureblob/azureblob.go
@@ -8,6 +8,7 @@ import (
"context"
"crypto/md5"
"encoding/base64"
+ "encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
@@ -401,6 +402,24 @@ rclone does if you know the container exists already.
Help: `If set, do not do HEAD before GET when getting objects.`,
Default: false,
Advanced: true,
+ }, {
+ Name: "delete_snapshots",
+ Help: `Set to specify how to deal with snapshots on blob deletion.`,
+ Examples: []fs.OptionExample{
+ {
+ Value: "",
+ Help: "By default, the delete operation fails if a blob has snapshots",
+ }, {
+ Value: string(blob.DeleteSnapshotsOptionTypeInclude),
+ Help: "Specify 'include' to remove the root blob and all its snapshots",
+ }, {
+ Value: string(blob.DeleteSnapshotsOptionTypeOnly),
+ Help: "Specify 'only' to remove only the snapshots but keep the root blob.",
+ },
+ },
+ Default: "",
+ Exclusive: true,
+ Advanced: true,
}},
})
}
@@ -437,6 +456,7 @@ type Options struct {
DirectoryMarkers bool `config:"directory_markers"`
NoCheckContainer bool `config:"no_check_container"`
NoHeadObject bool `config:"no_head_object"`
+ DeleteSnapshots string `config:"delete_snapshots"`
}
// Fs represents a remote azure server
@@ -1966,34 +1986,21 @@ func (rs *readSeekCloser) Close() error {
return nil
}
-// increment the array as LSB binary
-func increment(xs *[8]byte) {
- for i, digit := range xs {
- newDigit := digit + 1
- xs[i] = newDigit
- if newDigit >= digit {
- // exit if no carry
- break
- }
- }
-}
-
// record chunk number and id for Close
type azBlock struct {
- chunkNumber int
+ chunkNumber uint64
id string
}
// Implements the fs.ChunkWriter interface
type azChunkWriter struct {
- chunkSize int64
- size int64
- f *Fs
- ui uploadInfo
- blocksMu sync.Mutex // protects the below
- blocks []azBlock // list of blocks for finalize
- binaryBlockID [8]byte // block counter as LSB first 8 bytes
- o *Object
+ chunkSize int64
+ size int64
+ f *Fs
+ ui uploadInfo
+ blocksMu sync.Mutex // protects the below
+ blocks []azBlock // list of blocks for finalize
+ o *Object
}
// OpenChunkWriter returns the chunk size and a ChunkWriter
@@ -2081,13 +2088,14 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
transactionalMD5 := md5sum[:]
// increment the blockID and save the blocks for finalize
- increment(&w.binaryBlockID)
- blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
+ var binaryBlockID [8]byte // block counter as LSB first 8 bytes
+ binary.LittleEndian.PutUint64(binaryBlockID[:], uint64(chunkNumber))
+ blockID := base64.StdEncoding.EncodeToString(binaryBlockID[:])
// Save the blockID for the commit
w.blocksMu.Lock()
w.blocks = append(w.blocks, azBlock{
- chunkNumber: chunkNumber,
+ chunkNumber: uint64(chunkNumber),
id: blockID,
})
w.blocksMu.Unlock()
@@ -2152,9 +2160,20 @@ func (w *azChunkWriter) Close(ctx context.Context) (err error) {
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
})
- // Create a list of block IDs
+ // Create and check a list of block IDs
blockIDs := make([]string, len(w.blocks))
for i := range w.blocks {
+ if w.blocks[i].chunkNumber != uint64(i) {
+ return fmt.Errorf("internal error: expecting chunkNumber %d but got %d", i, w.blocks[i].chunkNumber)
+ }
+ chunkBytes, err := base64.StdEncoding.DecodeString(w.blocks[i].id)
+ if err != nil {
+ return fmt.Errorf("internal error: bad block ID: %w", err)
+ }
+ chunkNumber := binary.LittleEndian.Uint64(chunkBytes)
+ if w.blocks[i].chunkNumber != chunkNumber {
+ return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", w.blocks[i].chunkNumber, chunkNumber)
+ }
blockIDs[i] = w.blocks[i].id
}
@@ -2356,9 +2375,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
blb := o.getBlobSVC()
- //only := blob.DeleteSnapshotsOptionTypeOnly
- opt := blob.DeleteOptions{
- //DeleteSnapshots: &only,
+ opt := blob.DeleteOptions{}
+ if o.fs.opt.DeleteSnapshots != "" {
+ action := blob.DeleteSnapshotsOptionType(o.fs.opt.DeleteSnapshots)
+ opt.DeleteSnapshots = &action
}
return o.fs.pacer.Call(func() (bool, error) {
_, err := blb.Delete(ctx, &opt)
diff --git a/backend/azureblob/azureblob_internal_test.go b/backend/azureblob/azureblob_internal_test.go
index 1871fa265..2479c3624 100644
--- a/backend/azureblob/azureblob_internal_test.go
+++ b/backend/azureblob/azureblob_internal_test.go
@@ -17,21 +17,3 @@ func (f *Fs) InternalTest(t *testing.T) {
enabled = f.Features().GetTier
assert.True(t, enabled)
}
-
-func TestIncrement(t *testing.T) {
- for _, test := range []struct {
- in [8]byte
- want [8]byte
- }{
- {[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
- {[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
- {[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
- {[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
- {[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
- {[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
- {[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
- } {
- increment(&test.in)
- assert.Equal(t, test.want, test.in)
- }
-}
diff --git a/backend/b2/b2.go b/backend/b2/b2.go
index 778d856f1..50413cc03 100644
--- a/backend/b2/b2.go
+++ b/backend/b2/b2.go
@@ -193,9 +193,12 @@ Example:
Advanced: true,
}, {
Name: "download_auth_duration",
- Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
+ Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
+
+This is used in combination with "rclone link" for making files
+accessible to the public and sets the duration before the download
+authorization token will expire.
-The duration before the download authorization token will expire.
The minimum value is 1 second. The maximum value is one week.`,
Default: fs.Duration(7 * 24 * time.Hour),
Advanced: true,
diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go
index d7ee0fba2..5c7d19c6d 100644
--- a/backend/cache/cache_internal_test.go
+++ b/backend/cache/cache_internal_test.go
@@ -30,6 +30,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/object"
+ "github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/testy"
"github.com/rclone/rclone/lib/random"
@@ -935,8 +936,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
if purge {
- _ = f.Features().Purge(context.Background(), "")
- require.NoError(t, err)
+ _ = operations.Purge(context.Background(), f, "")
}
err = f.Mkdir(context.Background(), "")
require.NoError(t, err)
@@ -949,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
}
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
- err := f.Features().Purge(context.Background(), "")
+ err := operations.Purge(context.Background(), f, "")
require.NoError(t, err)
cfs, err := r.getCacheFs(f)
require.NoError(t, err)
diff --git a/backend/cache/cache_test.go b/backend/cache/cache_test.go
index 594149596..faf33e5d7 100644
--- a/backend/cache/cache_test.go
+++ b/backend/cache/cache_test.go
@@ -16,10 +16,11 @@ import (
// TestIntegration runs integration tests against the remote
func TestIntegration(t *testing.T) {
fstests.Run(t, &fstests.Opt{
- RemoteName: "TestCache:",
- NilObject: (*cache.Object)(nil),
- UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
- UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
- SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
+ RemoteName: "TestCache:",
+ NilObject: (*cache.Object)(nil),
+ UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
+ UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
+ UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
+ SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
})
}
diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go
index baf4656ed..42f8aa16f 100644
--- a/backend/chunker/chunker.go
+++ b/backend/chunker/chunker.go
@@ -338,13 +338,18 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// Note 2: features.Fill() points features.PutStream to our PutStream,
// but features.Mask() will nullify it if wrappedFs does not have it.
f.features = (&fs.Features{
- CaseInsensitive: true,
- DuplicateFiles: true,
- ReadMimeType: false, // Object.MimeType not supported
- WriteMimeType: true,
- BucketBased: true,
- CanHaveEmptyDirectories: true,
- ServerSideAcrossConfigs: true,
+ CaseInsensitive: true,
+ DuplicateFiles: true,
+ ReadMimeType: false, // Object.MimeType not supported
+ WriteMimeType: true,
+ BucketBased: true,
+ CanHaveEmptyDirectories: true,
+ ServerSideAcrossConfigs: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: true,
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
@@ -821,8 +826,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
}
case fs.Directory:
isSubdir[entry.Remote()] = true
- wrapDir := fs.NewDirCopy(ctx, entry)
- wrapDir.SetRemote(entry.Remote())
+ wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
tempEntries = append(tempEntries, wrapDir)
default:
if f.opt.FailHard {
@@ -1571,6 +1575,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.base.Mkdir(ctx, dir)
}
+// MkdirMetadata makes the root directory of the Fs object
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ if do := f.base.Features().MkdirMetadata; do != nil {
+ return do(ctx, dir, metadata)
+ }
+ return nil, fs.ErrorNotImplemented
+}
+
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -1888,6 +1900,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.base, srcRemote, dstRemote)
}
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ if do := f.base.Features().DirSetModTime; do != nil {
+ return do(ctx, dir, modTime)
+ }
+ return fs.ErrorNotImplemented
+}
+
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -2548,6 +2568,8 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
diff --git a/backend/combine/combine.go b/backend/combine/combine.go
index 74384fb48..c00f2314e 100644
--- a/backend/combine/combine.go
+++ b/backend/combine/combine.go
@@ -222,18 +222,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
}
// check features
var features = (&fs.Features{
- CaseInsensitive: true,
- DuplicateFiles: false,
- ReadMimeType: true,
- WriteMimeType: true,
- CanHaveEmptyDirectories: true,
- BucketBased: true,
- SetTier: true,
- GetTier: true,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: true,
- PartialUploads: true,
+ CaseInsensitive: true,
+ DuplicateFiles: false,
+ ReadMimeType: true,
+ WriteMimeType: true,
+ CanHaveEmptyDirectories: true,
+ BucketBased: true,
+ SetTier: true,
+ GetTier: true,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ UserMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: true,
+ PartialUploads: true,
}).Fill(ctx, f)
canMove := true
for _, u := range f.upstreams {
@@ -440,6 +445,32 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return u.f.Mkdir(ctx, uRemote)
}
+// MkdirMetadata makes the root directory of the Fs object
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ u, uRemote, err := f.findUpstream(dir)
+ if err != nil {
+ return nil, err
+ }
+ do := u.f.Features().MkdirMetadata
+ if do == nil {
+ return nil, fs.ErrorNotImplemented
+ }
+ newDir, err := do(ctx, uRemote, metadata)
+ if err != nil {
+ return nil, err
+ }
+ entries := fs.DirEntries{newDir}
+ entries, err = u.wrapEntries(ctx, entries)
+ if err != nil {
+ return nil, err
+ }
+ newDir, ok := entries[0].(fs.Directory)
+ if !ok {
+ return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
+ }
+ return newDir, nil
+}
+
// purge the upstream or fallback to a slow way
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
if do := u.f.Features().Purge; do != nil {
@@ -755,12 +786,11 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
case fs.Object:
entries[i] = u.newObject(x)
case fs.Directory:
- newDir := fs.NewDirCopy(ctx, x)
- newPath, err := u.pathAdjustment.do(newDir.Remote())
+ newPath, err := u.pathAdjustment.do(x.Remote())
if err != nil {
return nil, err
}
- newDir.SetRemote(newPath)
+ newDir := fs.NewDirWrapper(newPath, x)
entries[i] = newDir
default:
return nil, fmt.Errorf("unknown entry type %T", entry)
@@ -783,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.root == "" && dir == "" {
entries = make(fs.DirEntries, 0, len(f.upstreams))
for combineDir := range f.upstreams {
- d := fs.NewDir(combineDir, f.when)
+ d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
entries = append(entries, d)
}
return entries, nil
@@ -965,6 +995,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return do(ctx, uDirs)
}
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ u, uDir, err := f.findUpstream(dir)
+ if err != nil {
+ return err
+ }
+ if uDir == "" {
+ fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
+ return nil
+ }
+ if do := u.f.Features().DirSetModTime; do != nil {
+ return do(ctx, uDir, modTime)
+ }
+ return fs.ErrorNotImplemented
+}
+
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1099,6 +1145,8 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.PutUncheckeder = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.OpenWriterAter = (*Fs)(nil)
_ fs.FullObject = (*Object)(nil)
diff --git a/backend/compress/compress.go b/backend/compress/compress.go
index b477512e7..5fb52c013 100644
--- a/backend/compress/compress.go
+++ b/backend/compress/compress.go
@@ -183,18 +183,23 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
- CaseInsensitive: true,
- DuplicateFiles: false,
- ReadMimeType: false,
- WriteMimeType: false,
- GetTier: true,
- SetTier: true,
- BucketBased: true,
- CanHaveEmptyDirectories: true,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: true,
- PartialUploads: true,
+ CaseInsensitive: true,
+ DuplicateFiles: false,
+ ReadMimeType: false,
+ WriteMimeType: false,
+ GetTier: true,
+ SetTier: true,
+ BucketBased: true,
+ CanHaveEmptyDirectories: true,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ UserMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: true,
+ PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
// We support reading MIME types no matter the wrapped fs
f.features.ReadMimeType = true
@@ -784,6 +789,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, dir)
}
+// MkdirMetadata makes the root directory of the Fs object
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ if do := f.Fs.Features().MkdirMetadata; do != nil {
+ return do(ctx, dir, metadata)
+ }
+ return nil, fs.ErrorNotImplemented
+}
+
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -927,6 +940,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
}
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ if do := f.Fs.Features().DirSetModTime; do != nil {
+ return do(ctx, dir, modTime)
+ }
+ return fs.ErrorNotImplemented
+}
+
// CleanUp the trash in the Fs
//
// Implement this if you have a way of emptying the trash or
@@ -1497,6 +1518,8 @@ var (
_ fs.Copier = (*Fs)(nil)
_ fs.Mover = (*Fs)(nil)
_ fs.DirMover = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.PutStreamer = (*Fs)(nil)
_ fs.CleanUpper = (*Fs)(nil)
_ fs.UnWrapper = (*Fs)(nil)
diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go
index 54b1335dc..23b684ab8 100644
--- a/backend/crypt/crypt.go
+++ b/backend/crypt/crypt.go
@@ -130,6 +130,16 @@ trying to recover an encrypted file with errors and it is desired to
recover as much of the file as possible.`,
Default: false,
Advanced: true,
+ }, {
+ Name: "strict_names",
+ Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
+
+(By default, rclone will just log a NOTICE and continue as normal.)
+This can happen if encrypted and unencrypted files are stored in the same
+directory (which is not recommended.) It may also indicate a more serious
+problem that should be investigated.`,
+ Default: false,
+ Advanced: true,
}, {
Name: "filename_encoding",
Help: `How to encode the encrypted filename to text string.
@@ -263,19 +273,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
// the features here are ones we could support, and they are
// ANDed with the ones from wrappedFs
f.features = (&fs.Features{
- CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
- DuplicateFiles: true,
- ReadMimeType: false, // MimeTypes not supported with crypt
- WriteMimeType: false,
- BucketBased: true,
- CanHaveEmptyDirectories: true,
- SetTier: true,
- GetTier: true,
- ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: true,
- PartialUploads: true,
+ CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
+ DuplicateFiles: true,
+ ReadMimeType: false, // MimeTypes not supported with crypt
+ WriteMimeType: false,
+ BucketBased: true,
+ CanHaveEmptyDirectories: true,
+ SetTier: true,
+ GetTier: true,
+ ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ UserMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: true,
+ PartialUploads: true,
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
return f, err
@@ -294,6 +309,7 @@ type Options struct {
PassBadBlocks bool `config:"pass_bad_blocks"`
FilenameEncoding string `config:"filename_encoding"`
Suffix string `config:"suffix"`
+ StrictNames bool `config:"strict_names"`
}
// Fs represents a wrapped fs.Fs
@@ -328,45 +344,64 @@ func (f *Fs) String() string {
}
// Encrypt an object file name to entries.
-func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
+func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
remote := obj.Remote()
decryptedRemote, err := f.cipher.DecryptFileName(remote)
if err != nil {
- fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
- return
+ if f.opt.StrictNames {
+ return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
+ }
+ fs.Logf(remote, "Skipping undecryptable file name: %v", err)
+ return nil
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newObject(obj))
+ return nil
}
// Encrypt a directory file name to entries.
-func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
+func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
- fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
- return
+ if f.opt.StrictNames {
+ return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
+ }
+ fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
+ return nil
}
if f.opt.ShowMapping {
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
}
*entries = append(*entries, f.newDir(ctx, dir))
+ return nil
}
// Encrypt some directory entries. This alters entries returning it as newEntries.
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
newEntries = entries[:0] // in place filter
+ errors := 0
+ var firsterr error
for _, entry := range entries {
switch x := entry.(type) {
case fs.Object:
- f.add(&newEntries, x)
+ err = f.add(&newEntries, x)
case fs.Directory:
- f.addDir(ctx, &newEntries, x)
+ err = f.addDir(ctx, &newEntries, x)
default:
return nil, fmt.Errorf("unknown object type %T", entry)
}
+ if err != nil {
+ errors++
+ if firsterr == nil {
+ firsterr = err
+ }
+ }
+ }
+ if firsterr != nil {
+ return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
}
return newEntries, nil
}
@@ -520,6 +555,37 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
}
+// MkdirMetadata makes the root directory of the Fs object
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ do := f.Fs.Features().MkdirMetadata
+ if do == nil {
+ return nil, fs.ErrorNotImplemented
+ }
+ newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
+ if err != nil {
+ return nil, err
+ }
+ var entries = make(fs.DirEntries, 0, 1)
+ err = f.addDir(ctx, &entries, newDir)
+ if err != nil {
+ return nil, err
+ }
+ newDir, ok := entries[0].(fs.Directory)
+ if !ok {
+ return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
+ }
+ return newDir, nil
+}
+
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ do := f.Fs.Features().DirSetModTime
+ if do == nil {
+ return fs.ErrorNotImplemented
+ }
+ return do(ctx, f.cipher.EncryptDirName(dir), modTime)
+}
+
// Rmdir removes the directory (container, bucket) if empty
//
// Return an error if it doesn't exist or isn't empty
@@ -761,7 +827,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
}
out := make([]fs.Directory, len(dirs))
for i, dir := range dirs {
- out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
+ out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
}
return do(ctx, out)
}
@@ -997,14 +1063,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// newDir returns a dir with the Name decrypted
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
- newDir := fs.NewDirCopy(ctx, dir)
remote := dir.Remote()
decryptedRemote, err := f.cipher.DecryptDirName(remote)
if err != nil {
fs.Debugf(remote, "Undecryptable dir name: %v", err)
} else {
- newDir.SetRemote(decryptedRemote)
+ remote = decryptedRemote
}
+ newDir := fs.NewDirWrapper(remote, dir)
return newDir
}
@@ -1207,6 +1273,8 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
diff --git a/backend/drive/drive.go b/backend/drive/drive.go
index 88e2b7253..a62d47ca9 100644
--- a/backend/drive/drive.go
+++ b/backend/drive/drive.go
@@ -287,7 +287,10 @@ func init() {
},
MetadataInfo: &fs.MetadataInfo{
System: systemMetadataInfo,
- Help: `User metadata is stored in the properties field of the drive object.`,
+ Help: `User metadata is stored in the properties field of the drive object.
+
+Metadata is supported on files and directories.
+`,
},
Options: append(driveOAuthOptions(), []fs.Option{{
Name: "scope",
@@ -870,6 +873,11 @@ type Object struct {
v2Download bool // generate v2 download link ondemand
}
+// Directory describes a drive directory
+type Directory struct {
+ baseObject
+}
+
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
@@ -1374,15 +1382,20 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
}
f.isTeamDrive = opt.TeamDriveID != ""
f.features = (&fs.Features{
- DuplicateFiles: true,
- ReadMimeType: true,
- WriteMimeType: true,
- CanHaveEmptyDirectories: true,
- ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
- FilterAware: true,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: true,
+ DuplicateFiles: true,
+ ReadMimeType: true,
+ WriteMimeType: true,
+ CanHaveEmptyDirectories: true,
+ ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
+ FilterAware: true,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ UserMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: false, // FIXME need to check!
}).Fill(ctx, f)
// Create a new authorized Drive client.
@@ -1729,11 +1742,9 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
return pathIDOut, found, err
}
-// CreateDir makes a directory with pathID as parent and name leaf
-func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
+// createDir makes a directory with pathID as parent and name leaf with optional metadata
+func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Metadata) (info *drive.File, err error) {
leaf = f.opt.Enc.FromStandardName(leaf)
- // fmt.Println("Making", path)
- // Define the metadata for the directory we are going to create.
pathID = actualID(pathID)
createInfo := &drive.File{
Name: leaf,
@@ -1741,14 +1752,63 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
MimeType: driveFolderType,
Parents: []string{pathID},
}
- var info *drive.File
+ var updateMetadata updateMetadataFn
+ if len(metadata) > 0 {
+ updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
+ if err != nil {
+ return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
+ }
+ }
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Create(createInfo).
- Fields("id").
+ Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
})
+ if err != nil {
+ return nil, err
+ }
+ if updateMetadata != nil {
+ err = updateMetadata(ctx, info)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return info, nil
+}
+
+// updateDir updates an existing a directory with the metadata passed in
+func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata) (info *drive.File, err error) {
+ if len(metadata) == 0 {
+ return f.getFile(ctx, dirID, f.getFileFields(ctx))
+ }
+ dirID = actualID(dirID)
+ updateInfo := &drive.File{}
+ updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
+ if err != nil {
+ return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
+ }
+ err = f.pacer.Call(func() (bool, error) {
+ info, err = f.svc.Files.Update(dirID, updateInfo).
+ Fields(f.getFileFields(ctx)).
+ SupportsAllDrives(true).
+ Context(ctx).Do()
+ return f.shouldRetry(ctx, err)
+ })
+ if err != nil {
+ return nil, err
+ }
+ err = updateMetadata(ctx, info)
+ if err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// CreateDir makes a directory with pathID as parent and name leaf
+func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
+ info, err := f.createDir(ctx, pathID, leaf, nil)
if err != nil {
return "", err
}
@@ -2161,7 +2221,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
// Send the entry to the caller, queueing any directories as new jobs
cb := func(entry fs.DirEntry) error {
- if d, isDir := entry.(*fs.Dir); isDir {
+ if d, isDir := entry.(fs.Directory); isDir {
job := listREntry{actualID(d.ID()), d.Remote()}
sendJob(job)
}
@@ -2338,11 +2398,11 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
if item.ResourceKey != "" {
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
}
- when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
- d := fs.NewDir(remote, when).SetID(item.Id)
- if len(item.Parents) > 0 {
- d.SetParentID(item.Parents[0])
+ baseObject, err := f.newBaseObject(ctx, remote, item)
+ if err != nil {
+ return nil, err
}
+ d := &Directory{baseObject: baseObject}
return d, nil
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
// ignore object
@@ -2535,6 +2595,59 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return err
}
+// MkdirMetadata makes the directory passed in as dir.
+//
+// It shouldn't return an error if it already exists.
+//
+// If the metadata is not nil it is set.
+//
+// It returns the directory that was created.
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ var info *drive.File
+ dirID, err := f.dirCache.FindDir(ctx, dir, false)
+ if err == fs.ErrorDirNotFound {
+ // Directory does not exist so create it
+ var leaf, parentID string
+ leaf, parentID, err = f.dirCache.FindPath(ctx, dir, true)
+ if err != nil {
+ return nil, err
+ }
+ info, err = f.createDir(ctx, parentID, leaf, metadata)
+ } else if err == nil {
+ // Directory exists and needs updating
+ info, err = f.updateDir(ctx, dirID, metadata)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert the info into a directory entry
+ entry, err := f.itemToDirEntry(ctx, dir, info)
+ if err != nil {
+ return nil, err
+ }
+ dirEntry, ok := entry.(fs.Directory)
+ if !ok {
+ return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
+ }
+
+ return dirEntry, nil
+}
+
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ dirID, err := f.dirCache.FindDir(ctx, dir, false)
+ if err != nil {
+ return err
+ }
+ o := baseObject{
+ fs: f,
+ remote: dir,
+ id: dirID,
+ }
+ return o.SetModTime(ctx, modTime)
+}
+
// delete a file or directory unconditionally by ID
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
return f.pacer.Call(func() (bool, error) {
@@ -2678,6 +2791,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
createInfo.Description = ""
}
+ // Adjust metadata if required
+ updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), createInfo, false)
+ if err != nil {
+ return nil, err
+ }
+
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
@@ -2691,7 +2810,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
copy := f.svc.Files.Copy(id, createInfo).
- Fields(partialFields).
+ Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
KeepRevisionForever(f.opt.KeepRevisionForever)
srcObj.addResourceKey(copy.Header())
@@ -2727,6 +2846,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
}
}
+ // Finalise metadata
+ err = updateMetadata(ctx, info)
+ if err != nil {
+ return nil, err
+ }
return newObject, nil
}
@@ -2900,13 +3024,19 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
dstParents := strings.Join(dstInfo.Parents, ",")
dstInfo.Parents = nil
+ // Adjust metadata if required
+ updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstInfo, true)
+ if err != nil {
+ return nil, err
+ }
+
// Do the move
var info *drive.File
err = f.pacer.Call(func() (bool, error) {
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
RemoveParents(srcParentID).
AddParents(dstParents).
- Fields(partialFields).
+ Fields(f.getFileFields(ctx)).
SupportsAllDrives(true).
Context(ctx).Do()
return f.shouldRetry(ctx, err)
@@ -2915,6 +3045,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
+ // Finalise metadata
+ err = updateMetadata(ctx, info)
+ if err != nil {
+ return nil, err
+ }
return f.newObjectWithInfo(ctx, remote, info)
}
@@ -3420,6 +3555,50 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
return nil
}
+func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
+ list := f.svc.Files.List()
+ if query != "" {
+ list.Q(query)
+ }
+
+ if f.opt.ListChunk > 0 {
+ list.PageSize(f.opt.ListChunk)
+ }
+ list.SupportsAllDrives(true)
+ list.IncludeItemsFromAllDrives(true)
+ if f.isTeamDrive && !f.opt.SharedWithMe {
+ list.DriveId(f.opt.TeamDriveID)
+ list.Corpora("drive")
+ }
+ // If using appDataFolder then need to add Spaces
+ if f.rootFolderID == "appDataFolder" {
+ list.Spaces("appDataFolder")
+ }
+
+ fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
+
+ var results []*drive.File
+ for {
+ var files *drive.FileList
+ err = f.pacer.Call(func() (bool, error) {
+ files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
+ return f.shouldRetry(ctx, err)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute query: %w", err)
+ }
+ if files.IncompleteSearch {
+ fs.Errorf(f, "search result INCOMPLETE")
+ }
+ results = append(results, files.Files...)
+ if files.NextPageToken == "" {
+ break
+ }
+ list.PageToken(files.NextPageToken)
+ }
+ return results, nil
+}
+
var commandHelp = []fs.CommandHelp{{
Name: "get",
Short: "Get command for fetching the drive config parameters",
@@ -3570,6 +3749,47 @@ Use the --interactive/-i or --dry-run flag to see what would be copied before co
}, {
Name: "importformats",
Short: "Dump the import formats for debug purposes",
+}, {
+ Name: "query",
+ Short: "List files using Google Drive query language",
+ Long: `This command lists files based on a query
+
+Usage:
+
+ rclone backend query drive: query
+
+The query syntax is documented at [Google Drive Search query terms and
+operators](https://developers.google.com/drive/api/guides/ref-search-terms).
+
+For example:
+
+ rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
+
+If the query contains literal ' or \ characters, these need to be escaped with
+\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
+file named "foo ' \.txt":
+
+ rclone backend query drive: "name = 'foo \' \\\.txt'"
+
+The result is a JSON array of matches, for example:
+
+[
+ {
+ "createdTime": "2017-06-29T19:58:28.537Z",
+ "id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
+ "md5Checksum": "68518d16be0c6fbfab918be61d658032",
+ "mimeType": "text/plain",
+ "modifiedTime": "2024-02-02T10:40:02.874Z",
+ "name": "foo ' \\.txt",
+ "parents": [
+ "0BxAe_BCDE4zkFGZpcWJGek0xbzC"
+ ],
+ "resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
+ "sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
+ "size": "311",
+ "webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
+ }
+]`,
}}
// Command the backend to run a named command
@@ -3687,6 +3907,17 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
return f.exportFormats(ctx), nil
case "importformats":
return f.importFormats(ctx), nil
+ case "query":
+ if len(arg) == 1 {
+ query := arg[0]
+ var results, err = f.query(ctx, query)
+ if err != nil {
+ return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
+ }
+ return results, nil
+ } else {
+ return nil, errors.New("need a query argument")
+ }
default:
return nil, fs.ErrorCommandNotFound
}
@@ -4193,6 +4424,37 @@ func (o *linkObject) ext() string {
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
}
+// Items returns the count of items in this directory or this
+// directory and subdirectories if known, -1 for unknown
+func (d *Directory) Items() int64 {
+ return -1
+}
+
+// SetMetadata sets metadata for a Directory
+//
+// It should return fs.ErrorNotImplemented if it can't set metadata
+func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
+ info, err := d.fs.updateDir(ctx, d.id, metadata)
+ if err != nil {
+ return fmt.Errorf("failed to update directory info: %w", err)
+ }
+ // Update directory from info returned
+ baseObject, err := d.fs.newBaseObject(ctx, d.remote, info)
+ if err != nil {
+ return fmt.Errorf("failed to process directory info: %w", err)
+ }
+ d.baseObject = baseObject
+ return err
+}
+
+// Hash does nothing on a directory
+//
+// This method is implemented with the incorrect type signature to
+// stop the Directory type asserting to fs.Object or fs.ObjectInfo
+func (d *Directory) Hash() {
+ // Does nothing
+}
+
// templates for document link files
const (
urlTemplate = `[InternetShortcut]{{"\r"}}
@@ -4242,6 +4504,8 @@ var (
_ fs.PublicLinker = (*Fs)(nil)
_ fs.ListRer = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.MimeTyper = (*Object)(nil)
@@ -4256,4 +4520,8 @@ var (
_ fs.MimeTyper = (*linkObject)(nil)
_ fs.IDer = (*linkObject)(nil)
_ fs.ParentIDer = (*linkObject)(nil)
+ _ fs.Directory = (*Directory)(nil)
+ _ fs.SetModTimer = (*Directory)(nil)
+ _ fs.SetMetadataer = (*Directory)(nil)
+ _ fs.ParentIDer = (*Directory)(nil)
)
diff --git a/backend/drive/drive_internal_test.go b/backend/drive/drive_internal_test.go
index c2e6da05b..838962759 100644
--- a/backend/drive/drive_internal_test.go
+++ b/backend/drive/drive_internal_test.go
@@ -524,6 +524,41 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
})
}
+// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
+func (f *Fs) InternalTestQuery(t *testing.T) {
+ ctx := context.Background()
+ var err error
+ t.Run("BadQuery", func(t *testing.T) {
+ _, err = f.query(ctx, "this is a bad query")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "failed to execute query")
+ })
+
+ t.Run("NoMatch", func(t *testing.T) {
+ results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
+ require.NoError(t, err)
+ assert.Len(t, results, 0)
+ })
+
+ t.Run("GoodQuery", func(t *testing.T) {
+ pathSegments := strings.Split(existingFile, "/")
+ var parent string
+ for _, item := range pathSegments {
+ // the file name contains ' characters which must be escaped
+ escapedItem := f.opt.Enc.FromStandardName(item)
+ escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
+ escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
+
+ results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
+ require.NoError(t, err)
+ require.Len(t, results, 1)
+ assert.Len(t, results[0].Id, 33)
+ assert.Equal(t, results[0].Name, item)
+ parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
+ }
+ })
+}
+
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
// Check set up for filtering
@@ -611,6 +646,7 @@ func (f *Fs) InternalTest(t *testing.T) {
t.Run("Shortcuts", f.InternalTestShortcuts)
t.Run("UnTrash", f.InternalTestUnTrash)
t.Run("CopyID", f.InternalTestCopyID)
+ t.Run("Query", f.InternalTestQuery)
t.Run("AgeQuery", f.InternalTestAgeQuery)
t.Run("ShouldRetry", f.InternalTestShouldRetry)
}
diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go
index 0eea08c53..235e121be 100644
--- a/backend/dropbox/dropbox.go
+++ b/backend/dropbox/dropbox.go
@@ -428,15 +428,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
members := []*team.UserSelectorArg{&user}
args := team.NewMembersGetInfoArgs(members)
- memberIds, err := f.team.MembersGetInfo(args)
+ memberIDs, err := f.team.MembersGetInfo(args)
if err != nil {
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
}
- if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
+ if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
}
- cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
+ cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
}
f.srv = files.New(cfg)
@@ -1231,7 +1231,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
return nil, err
}
var total uint64
- var used = q.Used
+ used := q.Used
if q.Allocation != nil {
if q.Allocation.Individual != nil {
total += q.Allocation.Individual.Allocated
diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go
index aab419779..6c7e7b105 100644
--- a/backend/ftp/ftp.go
+++ b/backend/ftp/ftp.go
@@ -970,6 +970,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
f.putFtpConnection(&c, err)
if errX := textprotoError(err); errX != nil {
switch errX.Code {
+ case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
+ err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
err = nil
case 521: // dir already exists: error number according to RFC 959: issue #2363
diff --git a/backend/googlephotos/api/types.go b/backend/googlephotos/api/types.go
index 9b7aa79f6..f49195685 100644
--- a/backend/googlephotos/api/types.go
+++ b/backend/googlephotos/api/types.go
@@ -56,8 +56,7 @@ type MediaItem struct {
CreationTime time.Time `json:"creationTime"`
Width string `json:"width"`
Height string `json:"height"`
- Photo struct {
- } `json:"photo"`
+ Photo struct{} `json:"photo"`
} `json:"mediaMetadata"`
Filename string `json:"filename"`
}
@@ -68,7 +67,7 @@ type MediaItems struct {
NextPageToken string `json:"nextPageToken"`
}
-//Content categories
+// Content categories
// NONE Default content category. This category is ignored when any other category is used in the filter.
// LANDSCAPES Media items containing landscapes.
// RECEIPTS Media items containing receipts.
@@ -187,5 +186,5 @@ type BatchCreateResponse struct {
// BatchRemoveItems is for removing items from an album
type BatchRemoveItems struct {
- MediaItemIds []string `json:"mediaItemIds"`
+ MediaItemIDs []string `json:"mediaItemIds"`
}
diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go
index 5733b5b86..28722009f 100644
--- a/backend/googlephotos/googlephotos.go
+++ b/backend/googlephotos/googlephotos.go
@@ -282,7 +282,7 @@ func errorHandler(resp *http.Response) error {
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
body = []byte("Image not found or broken")
}
- var e = api.Error{
+ e := api.Error{
Details: api.ErrorDetails{
Code: resp.StatusCode,
Message: string(body),
@@ -704,7 +704,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
Path: "/albums",
Parameters: url.Values{},
}
- var request = api.CreateAlbum{
+ request := api.CreateAlbum{
Album: &api.Album{
Title: albumTitle,
},
@@ -1005,7 +1005,7 @@ func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, resul
Method: "POST",
Path: "/mediaItems:batchCreate",
}
- var request = api.BatchCreateRequest{
+ request := api.BatchCreateRequest{
AlbumID: albumID,
}
itemsInBatch := 0
@@ -1152,6 +1152,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
info = results[0]
}
}
+ if err != nil {
+ return fmt.Errorf("failed to commit batch: %w", err)
+ }
o.setMetaData(info)
@@ -1180,8 +1183,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
NoResponse: true,
}
- var request = api.BatchRemoveItems{
- MediaItemIds: []string{o.id},
+ request := api.BatchRemoveItems{
+ MediaItemIDs: []string{o.id},
}
var resp *http.Response
err = o.fs.pacer.Call(func() (bool, error) {
diff --git a/backend/hasher/commands.go b/backend/hasher/commands.go
index 517ab17cc..a6eff7efd 100644
--- a/backend/hasher/commands.go
+++ b/backend/hasher/commands.go
@@ -80,6 +80,14 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
}
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
}
+ if f.db == nil {
+ if f.opt.MaxAge == 0 {
+ fs.Errorf(f, "db not found. (disabled with max_age = 0)")
+ } else {
+ fs.Errorf(f, "db not found.")
+ }
+ return kv.ErrInactive
+ }
op := &kvDump{
full: full,
root: root,
diff --git a/backend/hasher/hasher.go b/backend/hasher/hasher.go
index 325f7f619..c50f2404a 100644
--- a/backend/hasher/hasher.go
+++ b/backend/hasher/hasher.go
@@ -164,16 +164,21 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
}
stubFeatures := &fs.Features{
- CanHaveEmptyDirectories: true,
- IsLocal: true,
- ReadMimeType: true,
- WriteMimeType: true,
- SetTier: true,
- GetTier: true,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: true,
- PartialUploads: true,
+ CanHaveEmptyDirectories: true,
+ IsLocal: true,
+ ReadMimeType: true,
+ WriteMimeType: true,
+ SetTier: true,
+ GetTier: true,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ UserMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: true,
+ DirModTimeUpdatesOnWrite: true,
+ PartialUploads: true,
}
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
@@ -341,6 +346,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
return errors.New("MergeDirs not supported")
}
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ if do := f.Fs.Features().DirSetModTime; do != nil {
+ return do(ctx, dir, modTime)
+ }
+ return fs.ErrorNotImplemented
+}
+
+// MkdirMetadata makes the root directory of the Fs object
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ if do := f.Fs.Features().MkdirMetadata; do != nil {
+ return do(ctx, dir, metadata)
+ }
+ return nil, fs.ErrorNotImplemented
+}
+
// DirCacheFlush resets the directory cache - used in testing
// as an optional interface
func (f *Fs) DirCacheFlush() {
@@ -418,7 +439,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Shutdown the backend, closing any background tasks and any cached connections.
func (f *Fs) Shutdown(ctx context.Context) (err error) {
- err = f.db.Stop(false)
+ if f.db != nil && !f.db.IsStopped() {
+ err = f.db.Stop(false)
+ }
if do := f.Fs.Features().Shutdown; do != nil {
if err2 := do(ctx); err2 != nil {
err = err2
@@ -528,6 +551,8 @@ var (
_ fs.Abouter = (*Fs)(nil)
_ fs.Wrapper = (*Fs)(nil)
_ fs.MergeDirser = (*Fs)(nil)
+ _ fs.DirSetModTimer = (*Fs)(nil)
+ _ fs.MkdirMetadataer = (*Fs)(nil)
_ fs.DirCacheFlusher = (*Fs)(nil)
_ fs.ChangeNotifier = (*Fs)(nil)
_ fs.PublicLinker = (*Fs)(nil)
diff --git a/backend/hasher/hasher_internal_test.go b/backend/hasher/hasher_internal_test.go
index 0ca418e20..e289d8f0d 100644
--- a/backend/hasher/hasher_internal_test.go
+++ b/backend/hasher/hasher_internal_test.go
@@ -60,9 +60,11 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
assert.NotNil(t, dst)
// check that hash was created
- hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
- assert.NoError(t, err)
- assert.NotEmpty(t, hash)
+ if f.opt.MaxAge > 0 {
+ hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
+ assert.NoError(t, err)
+ assert.NotEmpty(t, hash)
+ }
//t.Logf("hash is %q", hash)
_ = operations.Purge(ctx, f, dirName)
}
diff --git a/backend/hasher/hasher_test.go b/backend/hasher/hasher_test.go
index f17303ecc..f5119c724 100644
--- a/backend/hasher/hasher_test.go
+++ b/backend/hasher/hasher_test.go
@@ -37,4 +37,9 @@ func TestIntegration(t *testing.T) {
opt.QuickTestOK = true
}
fstests.Run(t, &opt)
+ // test again with MaxAge = 0
+ if *fstest.RemoteName == "" {
+ opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
+ fstests.Run(t, &opt)
+ }
}
diff --git a/backend/hasher/object.go b/backend/hasher/object.go
index ef6349b0f..1003807c8 100644
--- a/backend/hasher/object.go
+++ b/backend/hasher/object.go
@@ -71,7 +71,14 @@ func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string,
f := o.f
if f.passHashes.Contains(hashType) {
fs.Debugf(o, "pass %s", hashType)
- return o.Object.Hash(ctx, hashType)
+ hashVal, err = o.Object.Hash(ctx, hashType)
+ if hashVal != "" {
+ return hashVal, err
+ }
+ if err != nil {
+ fs.Debugf(o, "error passing %s: %v", hashType, err)
+ }
+ fs.Debugf(o, "passed %s is blank -- trying other methods", hashType)
}
if !f.suppHashes.Contains(hashType) {
fs.Debugf(o, "unsupp %s", hashType)
diff --git a/backend/local/local.go b/backend/local/local.go
index c444267f1..848375901 100644
--- a/backend/local/local.go
+++ b/backend/local/local.go
@@ -53,6 +53,8 @@ netbsd, macOS and Solaris. It is **not** supported on Windows yet
User metadata is stored as extended attributes (which may not be
supported by all file systems) under the "user.*" prefix.
+
+Metadata is supported on files and directories.
`,
},
Options: []fs.Option{{
@@ -270,6 +272,11 @@ type Object struct {
translatedLink bool // Is this object a translated link
}
+// Directory represents a local filesystem directory
+type Directory struct {
+ Object
+}
+
// ------------------------------------------------------------
var (
@@ -301,15 +308,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
f.features = (&fs.Features{
- CaseInsensitive: f.caseInsensitive(),
- CanHaveEmptyDirectories: true,
- IsLocal: true,
- SlowHash: true,
- ReadMetadata: true,
- WriteMetadata: true,
- UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
- FilterAware: true,
- PartialUploads: true,
+ CaseInsensitive: f.caseInsensitive(),
+ CanHaveEmptyDirectories: true,
+ IsLocal: true,
+ SlowHash: true,
+ ReadMetadata: true,
+ WriteMetadata: true,
+ ReadDirMetadata: true,
+ WriteDirMetadata: true,
+ WriteDirSetModTime: true,
+ UserDirMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
+ DirModTimeUpdatesOnWrite: true,
+ UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
+ FilterAware: true,
+ PartialUploads: true,
}).Fill(ctx, f)
if opt.FollowSymlinks {
f.lstat = os.Stat
@@ -453,6 +465,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
+// Create new directory object from the info passed in
+func (f *Fs) newDirectory(dir string, fi os.FileInfo) *Directory {
+ o := f.newObject(dir)
+ o.setMetadata(fi)
+ return &Directory{
+ Object: *o,
+ }
+}
+
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@@ -563,7 +584,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
// Ignore directories which are symlinks. These are junction points under windows which
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
- d := fs.NewDir(newRemote, fi.ModTime())
+ d := f.newDirectory(newRemote, fi)
entries = append(entries, d)
}
} else {
@@ -643,6 +664,58 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
return nil
}
+// DirSetModTime sets the directory modtime for dir
+func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
+ o := Object{
+ fs: f,
+ remote: dir,
+ path: f.localPath(dir),
+ }
+ return o.SetModTime(ctx, modTime)
+}
+
+// MkdirMetadata makes the directory passed in as dir.
+//
+// It shouldn't return an error if it already exists.
+//
+// If the metadata is not nil it is set.
+//
+// It returns the directory that was created.
+func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
+ // Find and or create the directory
+ localPath := f.localPath(dir)
+ fi, err := f.lstat(localPath)
+ if errors.Is(err, os.ErrNotExist) {
+ err := f.Mkdir(ctx, dir)
+ if err != nil {
+ return nil, fmt.Errorf("mkdir metadata: failed make directory: %w", err)
+ }
+ fi, err = f.lstat(localPath)
+ if err != nil {
+ return nil, fmt.Errorf("mkdir metadata: failed to read info: %w", err)
+ }
+ } else if err != nil {
+ return nil, err
+ }
+
+ // Create directory object
+ d := f.newDirectory(dir, fi)
+
+ // Set metadata on the directory object if provided
+ if metadata != nil {
+ err = d.writeMetadata(metadata)
+ if err != nil {
+ return nil, fmt.Errorf("failed to set metadata on directory: %w", err)
+ }
+ // Re-read info now we have finished setting stuff
+ err = d.lstat()
+ if err != nil {
+ return nil, fmt.Errorf("mkdir metadata: failed to re-read info: %w", err)
+ }
+ }
+ return d, nil
+}
+
// Rmdir removes the directory
//
// If it isn't empty it will return an error
@@ -720,27 +793,6 @@ func (f *Fs) readPrecision() (precision time.Duration) {
return
}
-// Purge deletes all the files in the directory
-//
-// Optional interface: Only implement this if you have a way of
-// deleting all the files quicker than just running Remove() on the
-// result of List()
-func (f *Fs) Purge(ctx context.Context, dir string) error {
- dir = f.localPath(dir)
- fi, err := f.lstat(dir)
- if err != nil {
- // already purged
- if os.IsNotExist(err) {
- return fs.ErrorDirNotFound
- }
- return err
- }
- if !fi.Mode().IsDir() {
- return fmt.Errorf("can't purge non directory: %q", dir)
- }
- return os.RemoveAll(dir)
-}
-
// Move src to this remote using server-side move operations.
//
// This is stored with the remote path given.
@@ -780,6 +832,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, err
}
+ // Fetch metadata if --metadata is in use
+ meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
+ if err != nil {
+ return nil, fmt.Errorf("move: failed to read metadata: %w", err)
+ }
+
// Do the move
err = os.Rename(srcObj.path, dstObj.path)
if os.IsNotExist(err) {
@@ -795,6 +853,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
return nil, fs.ErrorCantMove
}
+ // Set metadata if --metadata is in use
+ err = dstObj.writeMetadata(meta)
+ if err != nil {
+ return nil, fmt.Errorf("move: failed to set metadata: %w", err)
+ }
+
// Update the info
err = dstObj.lstat()
if err != nil {
@@ -1447,6 +1511,10 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
if runtime.GOOS == "windows" {
s = filepath.ToSlash(s)
vol := filepath.VolumeName(s)
+ if vol == `\\?` && len(s) >= 6 {
+ // `\\?\C:`
+ vol = s[:6]
+ }
s = vol + enc.FromStandardPath(s[len(vol):])
s = filepath.FromSlash(s)
if !noUNC {
@@ -1459,15 +1527,51 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
return s
}
+// Items returns the count of items in this directory or this
+// directory and subdirectories if known, -1 for unknown
+func (d *Directory) Items() int64 {
+ return -1
+}
+
+// ID returns the internal ID of this directory if known, or
+// "" otherwise
+func (d *Directory) ID() string {
+ return ""
+}
+
+// SetMetadata sets metadata for a Directory
+//
+// It should return fs.ErrorNotImplemented if it can't set metadata
+func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
+ err := d.writeMetadata(metadata)
+ if err != nil {
+ return fmt.Errorf("SetMetadata failed on Directory: %w", err)
+ }
+ // Re-read info now we have finished setting stuff
+ return d.lstat()
+}
+
+// Hash does nothing on a directory
+//
+// This method is implemented with the incorrect type signature to
+// stop the Directory type asserting to fs.Object or fs.ObjectInfo
+func (d *Directory) Hash() {
+ // Does nothing
+}
+
// Check the interfaces are satisfied
var (
- _ fs.Fs = &Fs{}
- _ fs.Purger = &Fs{}
- _ fs.PutStreamer = &Fs{}
- _ fs.Mover = &Fs{}
- _ fs.DirMover = &Fs{}
- _ fs.Commander = &Fs{}
- _ fs.OpenWriterAter = &Fs{}
- _ fs.Object = &Object{}
- _ fs.Metadataer = &Object{}
+ _ fs.Fs = &Fs{}
+ _ fs.PutStreamer = &Fs{}
+ _ fs.Mover = &Fs{}
+ _ fs.DirMover = &Fs{}
+ _ fs.Commander = &Fs{}
+ _ fs.OpenWriterAter = &Fs{}
+ _ fs.DirSetModTimer = &Fs{}
+ _ fs.MkdirMetadataer = &Fs{}
+ _ fs.Object = &Object{}
+ _ fs.Metadataer = &Object{}
+ _ fs.Directory = &Directory{}
+ _ fs.SetModTimer = &Directory{}
+ _ fs.SetMetadataer = &Directory{}
)
diff --git a/backend/local/setbtime_windows.go b/backend/local/setbtime_windows.go
index adb9efa3a..2a46f09eb 100644
--- a/backend/local/setbtime_windows.go
+++ b/backend/local/setbtime_windows.go
@@ -4,7 +4,6 @@
package local
import (
- "os"
"syscall"
"time"
)
@@ -13,7 +12,13 @@ const haveSetBTime = true
// setBTime sets the birth time of the file passed in
func setBTime(name string, btime time.Time) (err error) {
- h, err := syscall.Open(name, os.O_RDWR, 0755)
+ pathp, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ h, err := syscall.CreateFile(pathp,
+ syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
+ syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return err
}
diff --git a/backend/netstorage/netstorage.go b/backend/netstorage/netstorage.go
index d5a0e9c6b..d365918ca 100755
--- a/backend/netstorage/netstorage.go
+++ b/backend/netstorage/netstorage.go
@@ -15,6 +15,7 @@ import (
"math/rand"
"net/http"
"net/url"
+ "path"
"strconv"
"strings"
"sync"
@@ -260,6 +261,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
case fs.ErrorObjectNotFound:
return f, nil
case fs.ErrorIsFile:
+ // Correct root if definitely pointing to a file
+ f.root = path.Dir(f.root)
+ if f.root == "." || f.root == "/" {
+ f.root = ""
+ }
// Fs points to the parent directory
return f, err
default:
diff --git a/backend/onedrive/api/types.go b/backend/onedrive/api/types.go
index c8e7a5fd1..da4fa5a3c 100644
--- a/backend/onedrive/api/types.go
+++ b/backend/onedrive/api/types.go
@@ -7,7 +7,7 @@ import (
)
const (
- timeFormat = `"` + time.RFC3339 + `"`
+ timeFormat = `"` + "2006-01-02T15:04:05.999Z" + `"`
// PackageTypeOneNote is the package type value for OneNote files
PackageTypeOneNote = "oneNote"
@@ -40,17 +40,17 @@ var _ error = (*Error)(nil)
// Identity represents an identity of an actor. For example, and actor
// can be a user, device, or application.
type Identity struct {
- DisplayName string `json:"displayName"`
- ID string `json:"id"`
+ DisplayName string `json:"displayName,omitempty"`
+ ID string `json:"id,omitempty"`
}
// IdentitySet is a keyed collection of Identity objects. It is used
// to represent a set of identities associated with various events for
// an item, such as created by or last modified by.
type IdentitySet struct {
- User Identity `json:"user"`
- Application Identity `json:"application"`
- Device Identity `json:"device"`
+ User Identity `json:"user,omitempty"`
+ Application Identity `json:"application,omitempty"`
+ Device Identity `json:"device,omitempty"`
}
// Quota groups storage space quota-related information on OneDrive into a single structure.
@@ -150,16 +150,15 @@ type FileFacet struct {
// facet can be used to specify the last modified date or created date
// of the item as it was on the local device.
type FileSystemInfoFacet struct {
- CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
- LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
+ CreatedDateTime Timestamp `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
+ LastModifiedDateTime Timestamp `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
}
// DeletedFacet indicates that the item on OneDrive has been
// deleted. In this version of the API, the presence (non-null) of the
// facet value indicates that the file was deleted. A null (or
// missing) value indicates that the file is not deleted.
-type DeletedFacet struct {
-}
+type DeletedFacet struct{}
// PackageFacet indicates that a DriveItem is the top level item
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
@@ -168,31 +167,141 @@ type PackageFacet struct {
Type string `json:"type"`
}
+// SharedType indicates a DriveItem has been shared with others. The resource includes information about how the item is shared.
+// If a Driveitem has a non-null shared facet, the item has been shared.
+type SharedType struct {
+ Owner IdentitySet `json:"owner,omitempty"` // The identity of the owner of the shared item. Read-only.
+ Scope string `json:"scope,omitempty"` // Indicates the scope of how the item is shared: anonymous, organization, or users. Read-only.
+ SharedBy IdentitySet `json:"sharedBy,omitempty"` // The identity of the user who shared the item. Read-only.
+ SharedDateTime Timestamp `json:"sharedDateTime,omitempty"` // The UTC date and time when the item was shared. Read-only.
+}
+
+// SharingInvitationType groups invitation-related data items into a single structure.
+type SharingInvitationType struct {
+ Email string `json:"email,omitempty"` // The email address provided for the recipient of the sharing invitation. Read-only.
+ InvitedBy *IdentitySet `json:"invitedBy,omitempty"` // Provides information about who sent the invitation that created this permission, if that information is available. Read-only.
+ SignInRequired bool `json:"signInRequired,omitempty"` // If true the recipient of the invitation needs to sign in in order to access the shared item. Read-only.
+}
+
+// SharingLinkType groups link-related data items into a single structure.
+// If a Permission resource has a non-null sharingLink facet, the permission represents a sharing link (as opposed to permissions granted to a person or group).
+type SharingLinkType struct {
+ Application *Identity `json:"application,omitempty"` // The app the link is associated with.
+ Type LinkType `json:"type,omitempty"` // The type of the link created.
+ Scope LinkScope `json:"scope,omitempty"` // The scope of the link represented by this permission. Value anonymous indicates the link is usable by anyone, organization indicates the link is only usable for users signed into the same tenant.
+ WebHTML string `json:"webHtml,omitempty"` // For embed links, this property contains the HTML code for an