diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3fc6a9d10..fe1d84c38 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,12 +17,11 @@ on: manual: description: Manual run (bypass default conditions) type: boolean - required: true default: true jobs: build: - if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} + if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 60 strategy: fail-fast: false @@ -217,7 +216,7 @@ jobs: if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone' lint: - if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} + if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 30 name: "lint" runs-on: ubuntu-latest @@ -296,7 +295,7 @@ jobs: run: govulncheck ./... android: - if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }} + if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) timeout-minutes: 30 name: "android-all" runs-on: ubuntu-latest diff --git a/.github/workflows/build_publish_release_docker_image.yml b/.github/workflows/build_publish_release_docker_image.yml index 319ce1b30..b69285a7e 100644 --- a/.github/workflows/build_publish_release_docker_image.yml +++ b/.github/workflows/build_publish_release_docker_image.yml @@ -32,15 +32,27 @@ jobs: - name: Get actual major version id: actual_major_version run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1) - - name: Build and publish image - uses: ilteoood/docker_buildx@1.1.0 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + uses: docker/login-action@v3 with: - tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }} - imageName: rclone/rclone - platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 - publish: true - dockerHubUser: ${{ secrets.DOCKER_HUB_USER }} - dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }} + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_PASSWORD }} + - name: Build and publish image + uses: docker/build-push-action@v6 + with: + file: Dockerfile + context: . + platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 + push: true + tags: | + rclone/rclone:latest + rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }} + rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }} + rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }} build_docker_volume_plugin: if: github.repository == 'rclone/rclone' diff --git a/.golangci.yml b/.golangci.yml index f4092493a..4d3a8480c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -100,10 +100,45 @@ linters-settings: # as documented here: https://staticcheck.io/docs/configuration/options/#checks checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"] gocritic: - disabled-checks: - - appendAssign - - captLocal - - commentFormatting - - exitAfterDefer - - ifElseChain - - singleCaseSwitch + # Enable all default checks with some exceptions and some additions (commented). + # Cannot use both enabled-checks and disabled-checks, so must specify all to be used. + disable-all: true + enabled-checks: + #- appendAssign # Enabled by default + - argOrder + - assignOp + - badCall + - badCond + #- captLocal # Enabled by default + - caseOrder + - codegenComment + #- commentFormatting # Enabled by default + - defaultCaseOrder + - deprecatedComment + - dupArg + - dupBranchBody + - dupCase + - dupSubExpr + - elseif + #- exitAfterDefer # Enabled by default + - flagDeref + - flagName + #- ifElseChain # Enabled by default + - mapKey + - newDeref + - offBy1 + - regexpMust + - ruleguard # Not enabled by default + #- singleCaseSwitch # Enabled by default + - sloppyLen + - sloppyTypeAssert + - switchTrue + - typeSwitchVar + - underef + - unlambda + - unslice + - valSwap + - wrapperFunc + settings: + ruleguard: + rules: "${configDir}/bin/rules.go" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d31258e78..9da630e79 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as - `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`) - make sure this has the `autogenerated options` comments in (see your reference backend docs) - update them in your backend with `bin/make_backend_docs.py remote` -- `docs/content/overview.md` - overview docs +- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table. - `docs/content/docs.md` - list of remotes in config section - `docs/content/_index.md` - front page of rclone.org - `docs/layouts/chrome/navbar.html` - add it to the website navigation diff --git a/MANUAL.html b/MANUAL.html index 6ab9d478d..d4b683fec 100644 --- a/MANUAL.html +++ b/MANUAL.html @@ -81,7 +81,7 @@

rclone(1) User Manual

Nick Craig-Wood

-

Jun 14, 2024

+

Sep 08, 2024

Rclone syncs your files to cloud storage

rclone logo

@@ -154,7 +154,9 @@
  • Dropbox
  • Enterprise File Fabric
  • Fastmail Files
  • +
  • Files.com
  • FTP
  • +
  • Gofile
  • Google Cloud Storage
  • Google Drive
  • Google Photos
  • @@ -193,6 +195,7 @@
  • pCloud
  • Petabox
  • PikPak
  • +
  • Pixeldrain
  • premiumize.me
  • put.io
  • Proton Drive
  • @@ -441,6 +444,7 @@ go build

    There are other make targets that can be used for more advanced builds, such as cross-compiling for all supported os/architectures, and packaging results into release artifacts. See Makefile and cross-compile.go for details.

    Another alternative method for source installation is to download the source, build and install rclone - all in one operation, as a regular Go package. The source will be stored it in the Go module cache, and the resulting executable will be in your GOPATH bin folder ($(go env GOPATH)/bin, which corresponds to ~/go/bin/rclone by default).

    go install github.com/rclone/rclone@latest
    +

    In some situations, rclone executable size might be too big for deployment in very restricted environments when all backends with large SDKs are included. To limit binary size unused backends can be commented out in backends/all/all.go and unused commands in cmd/all/all.go before building with go build or make

    Ansible installation

    This can be done with Stefan Weichinger's ansible role.

    Instructions

    @@ -510,12 +514,15 @@ go build
  • Digi Storage
  • Dropbox
  • Enterprise File Fabric
  • +
  • Files.com
  • FTP
  • +
  • Gofile
  • Google Cloud Storage
  • Google Drive
  • Google Photos
  • Hasher - to handle checksums for other remotes
  • HDFS
  • +
  • Hetzner Storage Box
  • HiDrive
  • HTTP
  • Internet Archive
  • @@ -533,11 +540,13 @@ go build
  • Oracle Object Storage
  • Pcloud
  • PikPak
  • +
  • Pixeldrain
  • premiumize.me
  • put.io
  • Proton Drive
  • QingStor
  • Quatrix by Maytech
  • +
  • rsync.net
  • Seafile
  • SFTP
  • Sia
  • @@ -576,7 +585,7 @@ rclone sync --interactive /local/path remote:path # syncs /local/path to the rem

    Options

      -h, --help   help for config

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    The default number of parallel checks is 8. See the --checkers=N option for more information.

    rclone cryptcheck remote:path cryptedremote:path [flags]
    -

    Options

    +

    Options

          --combined string         Make a combined report of changes to this file
           --differ string           Report all non-matching files to this file
           --error string            Report all files with errors (hashing or reading) to this file
    @@ -2417,11 +2487,12 @@ if src is directory
           --missing-on-dst string   Report all files missing from the destination to this file
           --missing-on-src string   Report all files missing from the source to this file
           --one-way                 Check one way only, source files must exist on remote
    -

    Check Options

    -

    Flags used for rclone check.

    +

    Options shared with other commands are described next. See the global flags page for global options not listed here.

    +

    Check Options

    +

    Flags used for check commands

          --max-backlog int   Maximum number of objects in sync or check backlog (default 10000)
    -

    Filter Options

    -

    Flags for filtering directory listings.

    +

    Filter Options

    +

    Flags for filtering directory listings

          --delete-excluded                     Delete files on dest excluded from sync
           --exclude stringArray                 Exclude files matching pattern
           --exclude-from stringArray            Read file exclude patterns from file (use - to read from stdin)
    @@ -2444,19 +2515,18 @@ if src is directory
           --metadata-include-from stringArray   Read metadata include patterns from file (use - to read from stdin)
           --min-age Duration                    Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
           --min-size SizeSuffix                 Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
    -

    Listing Options

    -

    Flags for listing directories.

    +

    Listing Options

    +

    Flags for listing directories

          --default-time Time   Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
           --fast-list           Use recursive list if available; uses more memory but fewer transactions
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    rclone cryptdecode

    Cryptdecode returns unencrypted file names.

    -

    Synopsis

    -

    rclone cryptdecode returns unencrypted file names when provided with a list of encrypted file names. List limit is 10 items.

    +

    Synopsis

    +

    Returns unencrypted file names when provided with a list of encrypted file names. List limit is 10 items.

    If you supply the --reverse flag, it will return encrypted file names.

    use it like this

    rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
    @@ -2464,99 +2534,31 @@ if src is directory
     rclone cryptdecode --reverse encryptedremote: filename1 filename2

    Another way to accomplish this is by using the rclone backend encode (or decode) command. See the documentation on the crypt overlay for more info.

    rclone cryptdecode encryptedremote: encryptedfilename [flags]
    -

    Options

    +

    Options

      -h, --help      help for cryptdecode
           --reverse   Reverse cryptdecode, encrypts filenames

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    rclone deletefile

    Remove a single file from remote.

    -

    Synopsis

    +

    Synopsis

    Remove a single file from remote. Unlike delete it cannot be used to remove a directory and it doesn't obey include/exclude filters - if the specified file exists, it will always be removed.

    rclone deletefile remote:path [flags]
    -

    Options

    +

    Options

      -h, --help   help for deletefile
    -

    Important Options

    -

    Important flags useful for most commands.

    +

    Options shared with other commands are described next. See the global flags page for global options not listed here.

    +

    Important Options

    +

    Important flags useful for most commands

      -n, --dry-run         Do a trial run with no permanent changes
       -i, --interactive     Enable interactive mode
       -v, --verbose count   Print lots more stuff (repeat for more)
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    -

    rclone genautocomplete

    -

    Output completion script for a given shell.

    -

    Synopsis

    -

    Generates a shell completion script for rclone. Run with --help to list the supported shells.

    -

    Options

    -
      -h, --help   help for genautocomplete
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    - -

    rclone genautocomplete bash

    -

    Output bash completion script for rclone.

    -

    Synopsis

    -

    Generates a bash shell autocompletion script for rclone.

    -

    This writes to /etc/bash_completion.d/rclone by default so will probably need to be run with sudo or as root, e.g.

    -
    sudo rclone genautocomplete bash
    -

    Logout and login again to use the autocompletion scripts, or source them directly

    -
    . /etc/bash_completion
    -

    If you supply a command line argument the script will be written there.

    -

    If output_file is "-", then the output will be written to stdout.

    -
    rclone genautocomplete bash [output_file] [flags]
    -

    Options

    -
      -h, --help   help for bash
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    - -

    rclone genautocomplete fish

    -

    Output fish completion script for rclone.

    -

    Synopsis

    -

    Generates a fish autocompletion script for rclone.

    -

    This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g.

    -
    sudo rclone genautocomplete fish
    -

    Logout and login again to use the autocompletion scripts, or source them directly

    -
    . /etc/fish/completions/rclone.fish
    -

    If you supply a command line argument the script will be written there.

    -

    If output_file is "-", then the output will be written to stdout.

    -
    rclone genautocomplete fish [output_file] [flags]
    -

    Options

    -
      -h, --help   help for fish
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    - -

    rclone genautocomplete zsh

    -

    Output zsh completion script for rclone.

    -

    Synopsis

    -

    Generates a zsh autocompletion script for rclone.

    -

    This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g.

    -
    sudo rclone genautocomplete zsh
    -

    Logout and login again to use the autocompletion scripts, or source them directly

    -
    autoload -U compinit && compinit
    -

    If you supply a command line argument the script will be written there.

    -

    If output_file is "-", then the output will be written to stdout.

    -
    rclone genautocomplete zsh [output_file] [flags]
    -

    Options

    -
      -h, --help   help for zsh
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    -

    rclone gendocs

    Output markdown docs for rclone to the directory supplied.

    Synopsis

    @@ -2565,7 +2567,7 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2

    Options

      -h, --help   help for gendocs

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    @@ -2576,38 +2578,38 @@ rclone cryptdecode --reverse encryptedremote: filename1 filename2

    Installation on Linux

    1. Skip this step if your version of git-annex is 10.20240430 or newer. Otherwise, you must create a symlink somewhere on your PATH with a particular name. This symlink helps git-annex tell rclone it wants to run the "gitannex" subcommand.

      -
      # Create the helper symlink in "$HOME/bin".
      -ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin"
      -
      -# Verify the new symlink is on your PATH.
      -which git-annex-remote-rclone-builtin
    2. +
      # Create the helper symlink in "$HOME/bin".
      +ln -s "$(realpath rclone)" "$HOME/bin/git-annex-remote-rclone-builtin"
      +
      +# Verify the new symlink is on your PATH.
      +which git-annex-remote-rclone-builtin
    3. Add a new remote to your git-annex repo. This new remote will connect git-annex with the rclone gitannex subcommand.

      Start by asking git-annex to describe the remote's available configuration parameters.

      -
      # If you skipped step 1:
      -git annex initremote MyRemote type=rclone --whatelse
      -
      -# If you created a symlink in step 1:
      -git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse
      +
      # If you skipped step 1:
      +git annex initremote MyRemote type=rclone --whatelse
      +
      +# If you created a symlink in step 1:
      +git annex initremote MyRemote type=external externaltype=rclone-builtin --whatelse

      NOTE: If you're porting an existing git-annex-remote-rclone remote to use rclone gitannex, you can probably reuse the configuration parameters verbatim without renaming them. Check parameter synonyms with --whatelse as shown above.

      The following example creates a new git-annex remote named "MyRemote" that will use the rclone remote named "SomeRcloneRemote". That rclone remote must be one configured in your rclone.conf file, which can be located with rclone config file.

      -
      git annex initremote MyRemote         \
      -    type=external                     \
      -    externaltype=rclone-builtin       \
      -    encryption=none                   \
      -    rcloneremotename=SomeRcloneRemote \
      -    rcloneprefix=git-annex-content    \
      -    rclonelayout=nodir
    4. +
      git annex initremote MyRemote         \
      +    type=external                     \
      +    externaltype=rclone-builtin       \
      +    encryption=none                   \
      +    rcloneremotename=SomeRcloneRemote \
      +    rcloneprefix=git-annex-content    \
      +    rclonelayout=nodir
    5. Before you trust this command with your precious data, be sure to test the remote. This command is very new and has not been tested on many rclone backends. Caveat emptor!

      -
      git annex testremote MyRemote
    6. +
      git annex testremote MyRemote

    Happy annexing!

    rclone gitannex [flags]

    Options

      -h, --help   help for gitannex

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    @@ -2636,8 +2638,9 @@ Supported hashes are: --download Download the file and hash it locally; if this flag is not specified, the hash is requested from the remote -h, --help help for hashsum --output-file string Output hashsums to a file rather than the terminal -

    Filter Options

    -

    Flags for filtering directory listings.

    +

    Options shared with other commands are described next. See the global flags page for global options not listed here.

    +

    Filter Options

    +

    Flags for filtering directory listings

          --delete-excluded                     Delete files on dest excluded from sync
           --exclude stringArray                 Exclude files matching pattern
           --exclude-from stringArray            Read file exclude patterns from file (use - to read from stdin)
    @@ -2660,19 +2663,18 @@ Supported hashes are:
           --metadata-include-from stringArray   Read metadata include patterns from file (use - to read from stdin)
           --min-age Duration                    Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off)
           --min-size SizeSuffix                 Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off)
    -

    Listing Options

    -

    Flags for listing directories.

    +

    Listing Options

    +

    Flags for listing directories

          --default-time Time   Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
           --fast-list           Use recursive list if available; uses more memory but fewer transactions
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    rclone link

    Generate public link to file/folder.

    Synopsis

    -

    rclone link will create, retrieve or remove a public link to the given file or folder.

    +

    Create, retrieve or remove a public link to the given file or folder.

    rclone link remote:path/to/file
     rclone link remote:path/to/folder/
     rclone link --unlink remote:path/to/folder/
    @@ -2686,21 +2688,28 @@ rclone link --expire 1d remote:path/to/file
    -h, --help help for link --unlink Remove existing public link to file/folder

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    rclone listremotes

    List all the remotes in the config file and defined in environment variables.

    Synopsis

    -

    rclone listremotes lists all the available remotes from the config file.

    -

    When used with the --long flag it lists the types and the descriptions too.

    -
    rclone listremotes [flags]
    +

    Lists all the available remotes from the config file, or the remotes matching an optional filter.

    +

    Prints the result in human-readable format by default, and as a simple list of remote names, or if used with flag --long a tabular format including the remote names, types and descriptions. Using flag --json produces machine-readable output instead, which always includes all attributes - including the source (file or environment).

    +

    Result can be filtered by a filter argument which applies to all attributes, and/or filter flags specific for each attribute. The values must be specified according to regular rclone filtering pattern syntax.

    +
    rclone listremotes [<filter>] [flags]

    Options

    -
      -h, --help   help for listremotes
    -      --long   Show the type and the description as well as names
    +
          --description string   Filter remotes by description
    +  -h, --help                 help for listremotes
    +      --json                 Format output as JSON
    +      --long                 Show type and description in addition to name
    +      --name string          Filter remotes by name
    +      --order-by string      Instructions on how to order the result, e.g. 'type,name=descending'
    +      --source string        Filter remotes by source, e.g. 'file' or 'environment'
    +      --type string          Filter remotes by type

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    @@ -2797,8 +2806,9 @@ rclone lsf remote:path --format pt --time-format max -R, --recursive Recurse into the listing -s, --separator string Separator for the items in the format (default ";") -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) -

    Filter Options

    -

    Flags for filtering directory listings.

    +

    Options shared with other commands are described next. See the global flags page for global options not listed here.

    +

    Filter Options

    +

    Flags for filtering directory listings

          --delete-excluded                     Delete files on dest excluded from sync
           --exclude stringArray                 Exclude files matching pattern
           --exclude-from stringArray            Read file exclude patterns from file (use - to read from stdin)
    @@ -2821,12 +2831,11 @@ rclone lsf remote:path --format pt --time-format max
    --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -

    Listing Options

    -

    Flags for listing directories.

    +

    Listing Options

    +

    Flags for listing directories

          --default-time Time   Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z)
           --fast-list           Use recursive list if available; uses more memory but fewer transactions
    -

    See the global flags page for global options not listed here.

    -

    SEE ALSO

    +

    See Also

    @@ -2834,7 +2843,7 @@ rclone lsf remote:path --format pt --time-format max

    List directories and objects in the path in JSON format.

    Synopsis

    List directories and objects in the path in JSON format.

    -

    The output is an array of Items, where each Item looks like this

    +

    The output is an array of Items, where each Item looks like this:

    {
       "Hashes" : {
          "SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
    @@ -2854,18 +2863,28 @@ rclone lsf remote:path --format pt --time-format max
    "Size" : 6, "Tier" : "hot", } -

    If --hash is not specified, the Hashes property will be omitted. The types of hash can be specified with the --hash-type parameter (which may be repeated). If --hash-type is set then it implies --hash.

    -

    If --no-modtime is specified then ModTime will be blank. This can speed things up on remotes where reading the ModTime takes an extra request (e.g. s3, swift).

    -

    If --no-mimetype is specified then MimeType will be blank. This can speed things up on remotes where reading the MimeType takes an extra request (e.g. s3, swift).

    -

    If --encrypted is not specified the Encrypted will be omitted.

    -

    If --dirs-only is not specified files in addition to directories are returned

    -

    If --files-only is not specified directories in addition to the files will be returned.

    -

    If --metadata is set then an additional Metadata key will be returned. This will have metadata in rclone standard format as a JSON object.

    -

    if --stat is set then a single JSON blob will be returned about the item pointed to. This will return an error if the item isn't found. However on bucket based backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will return an empty directory as it isn't possible to tell empty directories from missing directories there.

    +

    The exact set of properties included depends on the backend:

    + +

    Different options may also affect which properties are included:

    + +

    The default is to list directories and files/objects, but this can be changed with the following options:

    + +

    If --stat is set then the the output is not an array of items, but instead a single JSON blob will be returned about the item pointed to. This will return an error if the item isn't found, however on bucket based backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will return an empty directory, as it isn't possible to tell empty directories from missing directories there.

    The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without --recursive the Path will always be the same as Name.

    -

    If the directory is a bucket in a bucket-based backend, then "IsBucket" will be set to true. This key won't be present unless it is "true".

    The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the nearest millisecond (e.g. Google Drive) then 3 digits will always be shown ("2017-05-31T16:15:57.034+01:00") whereas if the times are accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00").

    -

    The whole output can be processed as a JSON blob, or alternatively it can be processed line by line as each item is written one to a line.

    +

    The whole output can be processed as a JSON blob, or alternatively it can be processed line by line as each item is written on individual lines (except with --stat).

    Any of the filtering options can be applied to this command.

    There are several related list commands

    -

    If none of these option actually end up providing rclone with AWS credentials then S3 interaction will be non-authenticated (see below).

    +

    With env_auth = true rclone (which uses the SDK for Go v2) should support all authentication methods that the aws CLI tool does and the other AWS SDKs.

    +

    If none of these option actually end up providing rclone with AWS credentials then S3 interaction will be non-authenticated (see the anonymous access section for more info).

    S3 Permissions

    When using the sync subcommand of rclone the following minimum permissions are required to be available on the bucket being written to:

    --s3-sts-endpoint

    -

    Endpoint for STS.

    +

    Endpoint for STS (deprecated).

    Leave blank if using AWS to use the default endpoint for the region.

    Properties:

    +

    --s3-sdk-log-mode

    +

    Set to debug the SDK

    +

    This can be set to a comma separated list of the following functions:

    + +

    Use Off to disable and All to set all log levels. You will need to use -vv to see the debug level logs.

    +

    Properties:

    +

    --s3-description

    Description of the remote.

    Properties:

    @@ -15420,13 +16028,14 @@ Windows: "%USERPROFILE%\.aws\credentials"

    See the backend command for more info on how to pass options and arguments.

    These can be run on a running backend using the rc command backend/command.

    restore

    -

    Restore objects from GLACIER to normal storage

    +

    Restore objects from GLACIER or INTELLIGENT-TIERING archive tier

    rclone backend restore remote: [options] [<arguments>+]
    -

    This command can be used to restore one or more objects from GLACIER to normal storage.

    +

    This command can be used to restore one or more objects from GLACIER to normal storage or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.

    Usage Examples:

    rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
     rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
    -rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
    +rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS +rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY

    This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags

    rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1

    All the objects shown will be marked for restore, then

    @@ -15445,13 +16054,13 @@ rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS

    Options:

    restore-status

    -

    Show the restore status for objects being restored from GLACIER to normal storage

    +

    Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage

    rclone backend restore-status remote: [options] [<arguments>+]
    -

    This command can be used to show the status for objects being restored from GLACIER to normal storage.

    +

    This command can be used to show the status for objects being restored from GLACIER to normal storage or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.

    Usage Examples:

    rclone backend restore-status s3:bucket/path/to/object
     rclone backend restore-status s3:bucket/path/to/directory
    @@ -15476,6 +16085,15 @@ rclone backend restore-status -o all s3:bucket/path/to/directory
    "RestoreExpiryDate": "2023-09-06T12:29:19+01:00" }, "StorageClass": "DEEP_ARCHIVE" + }, + { + "Remote": "test.gz", + "VersionID": null, + "RestoreStatus": { + "IsRestoreInProgress": true, + "RestoreExpiryDate": "null" + }, + "StorageClass": "INTELLIGENT_TIERING" } ]

    Options:

    @@ -15546,23 +16164,16 @@ rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id

    The option keys are named as they are in the config file.

    This rebuilds the connection to the s3 backend when it is called with the new parameters. Only new parameters need be passed as the values will default to those currently in use.

    It doesn't return anything.

    -

    Anonymous access to public buckets

    +

    Anonymous access to public buckets

    If you want to use rclone to access a public bucket, configure with a blank access_key_id and secret_access_key. Your config should end up looking like this:

    [anons3]
     type = s3
    -provider = AWS
    -env_auth = false
    -access_key_id =
    -secret_access_key =
    -region = us-east-1
    -endpoint =
    -location_constraint =
    -acl = private
    -server_side_encryption =
    -storage_class =
    +provider = AWS

    Then use it as normal with the name of the public bucket, e.g.

    rclone lsd anons3:1000genomes

    You will be able to list and copy data but not upload it.

    +

    You can also do this entirely on the command line

    +
    rclone lsd :s3,provider=AWS:1000genomes

    Providers

    AWS S3

    This is the provider used as main example and described in the configuration section above.

    @@ -15690,6 +16301,7 @@ endpoint = https://ACCOUNT_ID.r2.cloudflarestorage.com acl = private

    Now run rclone lsf r2: to see your buckets and rclone lsf r2:bucket to look within a bucket.

    For R2 tokens with the "Object Read & Write" permission, you may also need to add no_check_bucket = true for object uploads to work correctly.

    +

    Note that Cloudflare decompresses files uploaded with Content-Encoding: gzip by default which is a deviation from what AWS does. If this is causing a problem then upload the files with --header-upload "Cache-Control: no-transform"

    Dreamhost

    Dreamhost DreamObjects is an object storage system based on CEPH.

    To use rclone with Dreamhost, configure as above but leave the region blank and set the endpoint. You should end up with something like this in your config:

    @@ -17710,7 +18322,7 @@ cos s3

    For Netease NOS configure as per the configurator rclone config setting the provider Netease. This will automatically set force_path_style = false which is necessary for it to run properly.

    Petabox

    Here is an example of making a Petabox configuration. First run:

    -
    rclone config
    +
    rclone config

    This will guide you through an interactive setup process.

    No remotes found, make a new one?
     n) New remote
    @@ -17921,6 +18533,11 @@ y/n> n

    Use the the native protocol to take advantage of client-side encryption as well as to achieve the best possible download performance. Uploads will be erasure-coded locally, thus a 1gb upload will result in 2.68gb of data being uploaded to storage nodes across the network.

    Use this backend and the S3 compatible Hosted Gateway to increase upload performance and reduce the load on your systems and network. Uploads will be encrypted and erasure-coded server-side, thus a 1GB upload will result in only in 1GB of data being uploaded to storage nodes across the network.

    For more detailed comparison please check the documentation of the storj backend.

    +

    Memory usage {memory}

    +

    The most common cause of rclone using lots of memory is a single directory with millions of files in. Despite s3 not really having the concepts of directories, rclone does the sync on a directory by directory basis to be compatible with normal filing systems.

    +

    Rclone loads each directory into memory as rclone objects. Each rclone object takes 0.5k-1k of memory, so approximately 1GB per 1,000,000 files, and the sync for that directory does not begin until it is entirely loaded in memory. So the sync can take a long time to start for large directories.

    +

    To sync a directory with 100,000,000 files in you would need approximately 100 GB of memory. At some point the amount of memory becomes difficult to provide so there is a workaround for this which involves a bit of scripting.

    +

    At some point rclone will gain a sync mode which is effectively this workaround but built in to rclone.

    Limitations

    rclone about is not supported by the S3 backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    See List of backends that do not support rclone about and rclone about

    @@ -18070,12 +18687,13 @@ key> 0123456789abcdef0123456789abcdef0123456789 Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = 123456789abc -key = 0123456789abcdef0123456789abcdef0123456789 -endpoint = --------------------- +Configuration complete. +Options: +- type: b2 +- account: 123456789abc +- key: 0123456789abcdef0123456789abcdef0123456789 +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -18129,7 +18747,9 @@ y/e/d> y

    Backblaze recommends that you do lots of transfers simultaneously for maximum speed. In tests from my SSD equipped laptop the optimum setting is about --transfers 32 though higher numbers may be used for a slight speed improvement. The optimum number for you may vary depending on your hardware, how big the files are, how much you want to load your computer, etc. The default of --transfers 4 is definitely too low for Backblaze B2 though.

    Note that uploading big files (bigger than 200 MiB by default) will use a 96 MiB RAM buffer by default. There can be at most --transfers of these in use at any moment, so this sets the upper limit on the memory used.

    Versions

    -

    When rclone uploads a new version of a file it creates a new version of it. Likewise when you delete a file, the old version will be marked hidden and still be available. Conversely, you may opt in to a "hard delete" of files with the --b2-hard-delete flag which would permanently remove the file instead of hiding it.

    +

    The default setting of B2 is to keep old versions of files. This means when rclone uploads a new version of a file it creates a new version of it. Likewise when you delete a file, the old version will be marked hidden and still be available.

    +

    Whether B2 keeps old versions of files or not can be adjusted on a per bucket basis using the "Lifecycle settings" on the B2 control panel or when creating the bucket using the --b2-lifecycle flag or after creation using the rclone backend lifecycle command.

    +

    You may opt in to a "hard delete" of files with the --b2-hard-delete flag which permanently removes files on deletion instead of hiding them.

    Old versions of files, where available, are visible using the --b2-versions flag.

    It is also possible to view a bucket as it was at a certain point in time, using the --b2-version-at flag. This will show the file versions as they were at that time, showing files that have been deleted afterwards, and hiding files that were created since.

    If you wish to remove all the old versions, and unfinished large file uploads, then you can use the rclone cleanup remote:bucket command which will delete all the old versions of files, leaving the current ones intact. You can also supply a path and only old versions under that path will be deleted, e.g. rclone cleanup remote:bucket/path/to/stuff.

    @@ -18525,12 +19145,13 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} --------------------- +Configuration complete. +Options: +- type: box +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -18579,11 +19200,11 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = box -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} +Keep this "remote" remote? Edit remote Value "client_id" = "" Edit? (y/n)> @@ -18611,11 +19232,11 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = box -token = {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -19638,12 +20259,12 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = sharefile -endpoint = https://XXX.sharefile.com -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} --------------------- +Configuration complete. +Options: +- type: sharefile +- endpoint: https://XXX.sharefile.com +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -20532,11 +21153,11 @@ Embedded spaces can be added using quotes "dir=remote:path with space" "dir2=remote2:path with space" Enter a fs.SpaceSepList value. upstreams> images=s3:imagesbucket files=drive:important/files --------------------- -[remote] -type = combine -upstreams = images=s3:imagesbucket files=drive:important/files --------------------- +Configuration complete. +Options: +- type: combine +- upstreams: images=s3:imagesbucket files=drive:important/files +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -20617,12 +21238,13 @@ Remote config Please visit: https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX --------------------- -[remote] -app_key = -app_secret = -token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: dropbox +- app_key: +- app_secret: +- token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -20862,7 +21484,7 @@ y/e/d> y

    --dropbox-batch-size

    Max number of files in upload batch.

    This sets the batch size of files to upload. It has to be less than 1000.

    -

    By default this is 0 which means rclone which calculate the batch size depending on the setting of batch_mode.

    +

    By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode.

    +

    Files.com

    +

    Files.com is a cloud storage service that provides a secure and easy way to store and share files.

    +

    The initial setup for filescom involves authenticating with your Files.com account. You can do this by providing your site subdomain, username, and password. Alternatively, you can authenticate using an API Key from Files.com. rclone config walks you through it.

    +

    Configuration

    +

    Here is an example of how to make a remote called remote. First run:

    +
    rclone config
    +

    This will guide you through an interactive setup process:

    +
    No remotes found, make a new one?
    +n) New remote
    +s) Set configuration password
    +q) Quit config
    +n/s/q> n
    +
    +Enter name for new remote.
    +name> remote
    +
    +Option Storage.
    +Type of storage to configure.
    +Choose a number from below, or type in your own value.
    +[snip]
    +XX / Files.com
    +  \ "filescom"
    +[snip]
    +Storage> filescom
    +
    +Option site.
    +Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com)
    +Enter a value. Press Enter to leave empty.
    +site> mysite
    +
    +Option username.
    +The username used to authenticate with Files.com.
    +Enter a value. Press Enter to leave empty.
    +username> user
    +
    +Option password.
    +The password used to authenticate with Files.com.
    +Choose an alternative below. Press Enter for the default (n).
    +y) Yes, type in my own password
    +g) Generate random password
    +n) No, leave this optional password blank (default)
    +y/g/n> y
    +Enter the password:
    +password:
    +Confirm the password:
    +password:
    +
    +Edit advanced config?
    +y) Yes
    +n) No (default)
    +y/n> n
    +
    +Configuration complete.
    +Options:
    +- type: filescom
    +- site: mysite
    +- username: user
    +- password: *** ENCRYPTED ***
    +Keep this "remote" remote?
    +y) Yes this is OK (default)
    +e) Edit this remote
    +d) Delete this remote
    +y/e/d> y
    +

    Once configured you can use rclone.

    +

    See all files in the top level:

    +
    rclone lsf remote:
    +

    Make a new directory in the root:

    +
    rclone mkdir remote:dir
    +

    Recursively List the contents:

    +
    rclone ls remote:
    +

    Sync /home/local/directory to the remote directory, deleting any excess files in the directory.

    +
    rclone sync --interactive /home/local/directory remote:dir
    +

    Standard options

    +

    Here are the Standard options specific to filescom (Files.com).

    +

    --filescom-site

    +

    Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com).

    +

    Properties:

    + +

    --filescom-username

    +

    The username used to authenticate with Files.com.

    +

    Properties:

    + +

    --filescom-password

    +

    The password used to authenticate with Files.com.

    +

    NB Input to this must be obscured - see rclone obscure.

    +

    Properties:

    + +

    Advanced options

    +

    Here are the Advanced options specific to filescom (Files.com).

    +

    --filescom-api-key

    +

    The API key used to authenticate with Files.com.

    +

    Properties:

    + +

    --filescom-encoding

    +

    The encoding for the backend.

    +

    See the encoding section in the overview for more info.

    +

    Properties:

    + +

    --filescom-description

    +

    Description of the remote.

    +

    Properties:

    +

    FTP

    FTP is the File Transfer Protocol. Rclone FTP support is provided using the github.com/jlaffaye/ftp package.

    Limitations of Rclone's FTP backend

    Paths are specified as remote:path. If the path does not begin with a / it is relative to the home directory of the user. An empty path remote: refers to the user's home directory.

    -

    Configuration

    +

    Configuration

    To create an FTP configuration named remote, run

    rclone config

    Rclone config guides you through an interactive setup process. A minimal rclone FTP remote definition only requires host, username and password. For an anonymous FTP server, see below.

    @@ -21180,12 +21934,12 @@ Use FTP over TLS (Explicit) Enter a boolean value (true or false). Press Enter for the default ("false"). explicit_tls> Remote config --------------------- -[remote] -type = ftp -host = ftp.example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: ftp +- host: ftp.example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -21247,7 +22001,7 @@ rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47

    This backend's interactive configuration wizard provides a selection of sensible encoding settings for major FTP servers: ProFTPd, PureFTPd, VsFTPd. Just hit a selection number when prompted.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to ftp (FTP).

    --ftp-host

    FTP host to connect to.

    @@ -21307,7 +22061,7 @@ rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47
  • Type: bool
  • Default: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to ftp (FTP).

    --ftp-concurrency

    Maximum number of FTP simultaneous connections, 0 for unlimited.

    @@ -21497,9 +22251,224 @@ rclone lsf :ftp,host=speedtest.tele2.net,user=anonymous,pass=IXs2wc8OJOz7SYLBk47

    File modification time (timestamps) is supported to 1 second resolution for major FTP servers: ProFTPd, PureFTPd, VsFTPd, and FileZilla FTP server. The VsFTPd server has non-standard implementation of time related protocol commands and needs a special configuration setting: writing_mdtm = true.

    Support for precise file time with other FTP servers varies depending on what protocol extensions they advertise. If all the MLSD, MDTM and MFTM extensions are present, rclone will use them together to provide precise time. Otherwise the times you see on the FTP server through rclone are those of the last file upload.

    You can use the following command to check whether rclone can use precise time with your FTP server: rclone backend features your_ftp_remote: (the trailing colon is important). Look for the number in the line tagged by Precision designating the remote time precision expressed as nanoseconds. A value of 1000000000 means that file time precision of 1 second is available. A value of 3153600000000000000 (or another large number) means "unsupported".

    +

    Gofile

    +

    Gofile is a content storage and distribution platform. Its aim is to provide as much service as possible for free or at a very low price.

    +

    The initial setup for Gofile involves logging in to the web interface and going to the "My Profile" section. Copy the "Account API token" for use in the config file.

    +

    Note that if you wish to connect rclone to Gofile you will need a premium account.

    +

    Configuration

    +

    Here is an example of how to make a remote called remote. First run:

    +
     rclone config
    +

    This will guide you through an interactive setup process:

    +
    No remotes found, make a new one?
    +n) New remote
    +s) Set configuration password
    +q) Quit config
    +n/s/q> n
    +
    +Enter name for new remote.
    +name> remote
    +
    +Option Storage.
    +Type of storage to configure.
    +Choose a number from below, or type in your own value.
    +XX / Gofile
    +   \ (gofile)
    +Storage> gofile
    +
    +Option access_token.
    +API Access token
    +You can get this from the web control panel.
    +Enter a value. Press Enter to leave empty.
    +access_token> YOURACCESSTOKEN
    +
    +Edit advanced config?
    +y) Yes
    +n) No (default)
    +y/n> n
    +
    +Configuration complete.
    +Options:
    +- type: gofile
    +- access_token: YOURACCESSTOKEN
    +Keep this "remote" remote?
    +y) Yes this is OK (default)
    +e) Edit this remote
    +d) Delete this remote
    +y/e/d> y
    +

    Once configured you can then use rclone like this,

    +

    List directories and files in the top level of your Gofile

    +
    rclone lsf remote:
    +

    To copy a local directory to an Gofile directory called backup

    +
    rclone copy /home/source remote:backup
    +

    Modification times and hashes

    +

    Gofile supports modification times with a resolution of 1 second.

    +

    Gofile supports MD5 hashes, so you can use the --checksum flag.

    +

    Restricted filename characters

    +

    In addition to the default restricted characters set the following characters are also replaced:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    CharacterValueReplacement
    !0x21
    "0x22
    *0x2A
    :0x3A
    <0x3C
    >0x3E
    ?0x3F
    \0x5C
    |0x7C
    +

    File names can also not start or end with the following characters. These only get replaced if they are the first or last character in the name:

    + + + + + + + + + + + + + + + +
    CharacterValueReplacement
    .0x2E
    +

    Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

    + +

    Gofile supports rclone link to make public links to files or directories. If you specify a directory it will download as a zip file. You can use the --expire flag to specify the time the link should be valid. Note that rclone link --unlink removes all the public links for a file.

    +

    Root folder ID

    +

    You can set the root_folder_id for rclone. This is the directory (identified by its Folder ID) that rclone considers to be the root of your Gofile drive.

    +

    Normally you will leave this blank and rclone will determine the correct root to use itself and fill in the value in the config file.

    +

    However you can set this to restrict rclone to a specific folder hierarchy.

    +

    In order to do this you will have to find the Folder ID of the directory you wish rclone to display.

    +

    You can do this with rclone

    +
    $ rclone lsf -Fip --dirs-only remote:
    +d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/
    +f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/
    +d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/
    +

    The ID to use is the part before the ; so you could set

    +
    root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0
    +

    To restrict rclone to the Files directory.

    +

    Standard options

    +

    Here are the Standard options specific to gofile (Gofile).

    +

    --gofile-access-token

    +

    API Access token

    +

    You can get this from the web control panel.

    +

    Properties:

    + +

    Advanced options

    +

    Here are the Advanced options specific to gofile (Gofile).

    +

    --gofile-root-folder-id

    +

    ID of the root folder

    +

    Leave this blank normally, rclone will fill it in automatically.

    +

    If you want rclone to be restricted to a particular folder you can fill it in - see the docs for more info.

    +

    Properties:

    + +

    --gofile-account-id

    +

    Account ID

    +

    Leave this blank normally, rclone will fill it in automatically.

    +

    Properties:

    + +

    --gofile-list-chunk

    +

    Number of items to list in each call

    +

    Properties:

    + +

    --gofile-encoding

    +

    The encoding for the backend.

    +

    See the encoding section in the overview for more info.

    +

    Properties:

    + +

    --gofile-description

    +

    Description of the remote.

    +

    Properties:

    + +

    Limitations

    +

    Gofile only supports filenames up to 255 characters in length, where a character is a unicode character.

    +

    Directories should not be cached for more than 24h otherwise files in the directory may not be downloadable. In practice this means when using a VFS based rclone command such as rclone mount you should make sure --dir-cache-time is less than 24h.

    +

    Note that Gofile is currently limited to a total of 100,000 items. If you attempt to upload more than that you will get error-limit-100000. This limit may be lifted in the future.

    +

    Duplicated files

    +

    Gofile is capable of having files with duplicated file names. For instance two files called hello.txt in the same directory.

    +

    Rclone cannot sync that to a normal file system but it can be fixed with the rclone dedupe command.

    +

    Duplicated files cause problems with the syncing and you will see messages in the log about duplicates.

    +

    Use rclone dedupe to fix duplicated files.

    Google Cloud Storage

    Paths are specified as remote:bucket (or remote: for the lsd command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir.

    -

    Configuration

    +

    Configuration

    The initial setup for google cloud storage involves getting a token from Google Cloud Storage which you need to do in your browser. rclone config walks you through it.

    Here is an example of how to make a remote called remote. First run:

     rclone config
    @@ -21610,16 +22579,16 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = google cloud storage -client_id = -client_secret = -token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} -project_number = 12345678 -object_acl = private -bucket_acl = private --------------------- +Configuration complete. +Options: +- type: google cloud storage +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} +- project_number: 12345678 +- object_acl: private +- bucket_acl: private +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -21639,7 +22608,7 @@ y/e/d> y

    You can set up rclone with Google Cloud Storage in an unattended mode, i.e. not tied to a specific end-user Google account. This is useful when you want to synchronise files onto machines that don't have actively logged-in users, for example build machines.

    To get credentials for Google Cloud Platform IAM Service Accounts, please head to the Service Account section of the Google Developer Console. Service Accounts behave just like normal User permissions in Google Cloud Storage ACLs, so you can limit their access (e.g. make them read only). After creating an account, a JSON file containing the Service Account's credentials will be downloaded onto your machines. These credentials are what rclone will use for authentication.

    To use a Service Account instead of OAuth2 token flow, enter the path to your Service Account credentials at the service_account_file prompt and rclone won't use the browser based authentication flow. If you'd rather stuff the contents of the credentials file into the rclone config file, you can set service_account_credentials with the actual contents of the file instead, or set the equivalent environment variable.

    -

    Anonymous Access

    +

    Anonymous Access

    For downloads of objects that permit public access you can configure rclone to use anonymous access by setting anonymous to true. With unauthorized access you can't write or create files but only read or list those buckets and objects that have public read access.

    Application Default Credentials

    If no other source of credentials is provided, rclone will fall back to Application Default Credentials this is useful both when you already have configured authentication for your developer account, or in production when running on a google compute host. Note that if running in docker, you may need to run additional commands on your google compute machine - see this page.

    @@ -21663,7 +22632,7 @@ y/e/d> y

    Google Cloud Storage stores md5sum natively. Google's gsutil tool stores modification time with one-second precision as goog-reserved-file-mtime in file metadata.

    To ensure compatibility with gsutil, rclone stores modification time in 2 separate metadata entries. mtime uses RFC3339 format with one-nanosecond precision. goog-reserved-file-mtime uses Unix timestamp format with one-second precision. To get modification time from object metadata, rclone reads the metadata in the following order: mtime, goog-reserved-file-mtime, object updated time.

    Note that rclone's default modify window is 1ns. Files uploaded by gsutil only contain timestamps with one-second precision. If you use rclone to sync files previously uploaded by gsutil, rclone will attempt to update modification time for all these files. To avoid these possibly unnecessary updates, use --modify-window 1s.

    -

    Restricted filename characters

    +

    Restricted filename characters

    @@ -21696,7 +22665,7 @@ y/e/d> y

    Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to google cloud storage (Google Cloud Storage (this is not Google Drive)).

    --gcs-client-id

    OAuth Client Id.

    @@ -22081,7 +23050,7 @@ y/e/d> y -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to google cloud storage (Google Cloud Storage (this is not Google Drive)).

    --gcs-token

    OAuth Access Token as a JSON blob.

    @@ -22172,13 +23141,13 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    rclone about is not supported by the Google Cloud Storage backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    See List of backends that do not support rclone about and rclone about

    Google Drive

    Paths are specified as drive:path

    Drive paths may be as deep as required, e.g. drive:directory/subdirectory.

    -

    Configuration

    +

    Configuration

    The initial setup for drive involves getting a token from Google drive which you need to do in your browser. rclone config walks you through it.

    Here is an example of how to make a remote called remote. First run:

     rclone config
    @@ -22237,15 +23206,16 @@ Configure this as a Shared Drive (Team Drive)? y) Yes n) No y/n> n --------------------- -[remote] -client_id = -client_secret = -scope = drive -root_folder_id = -service_account_file = -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} --------------------- +Configuration complete. +Options: +type: drive +- client_id: +- client_secret: +- scope: drive +- root_folder_id: +- service_account_file: +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -22277,7 +23247,7 @@ y/e/d> y

    This gives rclone its own private area to store files. Rclone will not be able to see any other files on your drive and you won't be able to see rclone's files from the web interface either.

    drive.metadata.readonly

    This allows read only access to file names only. It does not allow rclone to download or upload data, or rename or delete files or directories.

    -

    Root folder ID

    +

    Root folder ID

    This option has been moved to the advanced section. You can set the root_folder_id for rclone. This is the directory (identified by its Folder ID) that rclone considers to be the root of your drive.

    Normally you will leave this blank and rclone will determine the correct root to use itself.

    However you can set this to restrict rclone to a specific folder hierarchy or to access data within the "Computers" tab on the drive web interface (where files from Google's Backup and Sync desktop program go).

    @@ -22289,38 +23259,44 @@ y/e/d> y

    Service Account support

    You can set up rclone with Google Drive in an unattended mode, i.e. not tied to a specific end-user Google account. This is useful when you want to synchronise files onto machines that don't have actively logged-in users, for example build machines.

    To use a Service Account instead of OAuth2 token flow, enter the path to your Service Account credentials at the service_account_file prompt during rclone config and rclone won't use the browser based authentication flow. If you'd rather stuff the contents of the credentials file into the rclone config file, you can set service_account_credentials with the actual contents of the file instead, or set the equivalent environment variable.

    -

    Use case - Google Apps/G-suite account and individual Drive

    -

    Let's say that you are the administrator of a Google Apps (old) or G-suite account. The goal is to store data on an individual's Drive account, who IS a member of the domain. We'll call the domain example.com, and the user foo@example.com.

    +

    Use case - Google Workspace account and individual Drive

    +

    Let's say that you are the administrator of a Google Workspace. The goal is to read or write data on an individual's Drive account, who IS a member of the domain. We'll call the domain example.com, and the user foo@example.com.

    There's a few steps we need to go through to accomplish this:

    1. Create a service account for example.com
    +

    If you ever need to remove access, press the "Delete service account key" button.

    2. Allowing API access to example.com Google Drive
    3. Configure rclone, assuming a new install
    rclone config
     
     n/s/q> n         # New
     name>gdrive      # Gdrive is an example name
    -Storage>         # Select the number shown for Google Drive
    +Storage>         # Type drive
     client_id>       # Can be left blank
     client_secret>   # Can be left blank
    -scope>           # Select your scope, 1 for example
    +scope>           # Select the scope use used in step 2
     root_folder_id>  # Can be left blank
    -service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes!
    +service_account_file> /home/foo/myJSONfile.json # Path to the JSON file you downloaded in step 1.
     y/n>             # Auto config, n
     
    4. Verify that it's working
    @@ -22334,7 +23310,7 @@ y/n> # Auto config, n
  • gdrive:backup - use the remote called gdrive, work in the folder named backup.
  • -

    Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using --drive-impersonate, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1 - use rclone without specifying the --drive-impersonate option, like this: rclone -v lsf gdrive:backup

    +

    Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using --drive-impersonate, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step 1 - use rclone without specifying the --drive-impersonate option, like this: rclone -v lsf gdrive:backup

    Shared drives (team drives)

    If you want to configure the remote to point to a Google Shared Drive (previously known as Team Drives) then answer y to the question Configure this as a Shared Drive (Team Drive)?.

    This will fetch the list of Shared Drives from google and allow you to configure which one you want to use. You can also type in a Shared Drive ID if you prefer.

    @@ -22352,13 +23328,14 @@ Choose a number from below, or type in your own value 3 / Rclone Test 3 \ "zzzzzzzzzzzzzzzzzzzz" Enter a Shared Drive ID> 1 --------------------- -[remote] -client_id = -client_secret = -token = {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} -team_drive = xxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: drive +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} +- team_drive: xxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -22385,10 +23362,10 @@ trashed=false and 'c' in parents
  • without --fast-list: 22:05 min
  • with --fast-list: 58s
  • -

    Modification times and hashes

    +

    Modification times and hashes

    Google drive stores modification times accurate to 1 ms.

    Hash algorithms MD5, SHA1 and SHA256 are supported. Note, however, that a small fraction of files uploaded may not have SHA1 or SHA256 hashes especially if they were uploaded before 2018.

    -

    Restricted filename characters

    +

    Restricted filename characters

    Only Invalid UTF-8 bytes will be replaced, as they can't be used in JSON strings.

    In contrast to other backends, / can also be used in names and . or .. are valid names.

    Revisions

    @@ -22674,7 +23651,7 @@ trashed=false and 'c' in parents -

    Standard options

    +

    Standard options

    Here are the Standard options specific to drive (Google Drive).

    --drive-client-id

    Google Application Client Id Setting your own is recommended. See https://rclone.org/drive/#making-your-own-client-id for how to create your own. If you leave this blank, it will use an internal key which is low performance.

    @@ -22751,7 +23728,7 @@ trashed=false and 'c' in parents
  • Type: bool
  • Default: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to drive (Google Drive).

    --drive-token

    OAuth Access Token as a JSON blob.

    @@ -23549,7 +24526,7 @@ rclone backend copyid drive: ID1 path1 ID2 path2 "webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC" } ] -

    Limitations

    +

    Limitations

    Drive has quite a lot of rate limiting. This causes rclone to be limited to transferring about 2 files per second only. Individual files may be transferred much faster at 100s of MiB/s but lots of small files can take a long time.

    Server side copies are also subject to a separate rate limit. If you see User rate limit exceeded errors, wait at least 24 hours and retry. You can disable server-side copies with --disable copy to download and upload the files if you prefer.

    Limitations of Google Docs

    @@ -23557,7 +24534,7 @@ rclone backend copyid drive: ID1 path1 ID2 path2

    This is because rclone can't find out the size of the Google docs without downloading them.

    Google docs will transfer correctly with rclone sync, rclone copy etc as rclone knows to ignore the size when doing the transfer.

    However an unfortunate consequence of this is that you may not be able to download Google docs using rclone mount. If it doesn't work you will get a 0 sized file. If you try again the doc may gain its correct size and be downloadable. Whether it will work on not depends on the application accessing the mount and the OS you are running - experiment to find out if it does work for you!

    -

    Duplicated files

    +

    Duplicated files

    Sometimes, for no reason I've been able to track down, drive will duplicate a file that rclone uploads. Drive unlike all the other remotes can have duplicated files.

    Duplicated files cause problems with the syncing and you will see messages in the log about duplicates.

    Use rclone dedupe to fix duplicated files.

    @@ -23601,7 +24578,7 @@ rclone backend copyid drive: ID1 path1 ID2 path2

    Google Photos

    The rclone backend for Google Photos is a specialized backend for transferring photos and videos to and from Google Photos.

    NB The Google Photos API which rclone uses has quite a few limitations, so please read the limitations section carefully to make sure it is suitable for your use.

    -

    Configuration

    +

    Configuration

    The initial setup for google cloud storage involves getting a token from Google Photos which you need to do in your browser. rclone config walks you through it.

    Here is an example of how to make a remote called remote. First run:

     rclone config
    @@ -23657,11 +24634,11 @@ Got code *** are stored in full resolution at original quality. These uploads *** will count towards storage in your Google Account. --------------------- -[remote] -type = google photos -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} --------------------- +Configuration complete. +Options: +- type: google photos +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -23756,7 +24733,7 @@ y/e/d> y

    This means that you can use the album path pretty much like a normal filesystem and it is a good target for repeated syncing.

    The shared-album directory shows albums shared with you or by you. This is similar to the Sharing tab in the Google Photos web interface.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to google photos (Google Photos).

    --gphotos-client-id

    OAuth Client Id.

    @@ -23788,7 +24765,7 @@ y/e/d> y
  • Type: bool
  • Default: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to google photos (Google Photos).

    --gphotos-token

    OAuth Access Token as a JSON blob.

    @@ -23881,7 +24858,7 @@ y/e/d> y

    --gphotos-batch-size

    Max number of files in upload batch.

    This sets the batch size of files to upload. It has to be less than 50.

    -

    By default this is 0 which means rclone which calculate the batch size depending on the setting of batch_mode.

    +

    By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode.

    -

    Limitations

    +

    Limitations

    Only images and videos can be uploaded. If you attempt to upload non videos or images or formats that Google Photos doesn't understand, rclone will upload the file, then Google Photos will give an error when it is put turned into a media item.

    Note that all media items uploaded to Google Photos through the API are stored in full resolution at "original quality" and will count towards your storage quota in your Google Account. The API does not offer a way to upload in "high quality" mode..

    rclone about is not supported by the Google Photos backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    @@ -24039,7 +25016,7 @@ rclone backend drop Hasher:
    rclone backend stickyimport hasher:path/to/data sha1 remote:/path/to/sum.sha1

    stickyimport is similar to import but works much faster because it does not need to stat existing files and skips initial tree walk. Instead of binding cache entries to file fingerprints it creates sticky entries bound to the file name alone ignoring size, modification time etc. Such hash entries can be replaced only by purge, delete, backend drop or by full re-read/re-write of the files.

    Configuration reference

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to hasher (Better checksums for other remotes).

    --hasher-remote

    Remote to cache checksums for (e.g. myRemote:path).

    @@ -24068,7 +25045,7 @@ rclone backend drop Hasher:
  • Type: Duration
  • Default: off
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to hasher (Better checksums for other remotes).

    --hasher-auto-size

    Auto-update checksum for files smaller than this size (disabled by default).

    @@ -24133,6 +25110,7 @@ rclone backend drop Hasher:

    Other operations

    -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to hdfs (Hadoop distributed file system).

    --hdfs-service-principal-name

    Kerberos service principal name for the namenode.

    @@ -24329,7 +25307,7 @@ username = root
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to hidrive (HiDrive).

    --hidrive-token

    OAuth Access Token as a JSON blob.

    @@ -24634,7 +25612,7 @@ rclone lsd remote:/users/test/path
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    HiDrive is able to store symbolic links (symlinks) by design, for example, when unpacked from a zip archive.

    There exists no direct mechanism to manage native symlinks in remotes. As such this implementation has chosen to ignore any native symlinks present in the remote. rclone will not be able to access or show any symlinks stored in the hidrive-remote. This means symlinks cannot be individually removed, copied, or moved, except when removing, copying, or moving the parent folder.

    @@ -24648,7 +25626,7 @@ rclone lsd remote:/users/test/path

    The remote: represents the configured url, and any path following it will be resolved relative to this url, according to the URL standard. This means with remote url https://beta.rclone.org/branch and path fix, the resolved URL will be https://beta.rclone.org/branch/fix, while with path /fix the resolved URL will be https://beta.rclone.org/fix as the absolute path is resolved from the root of the domain.

    If the path following the remote: ends with / it will be assumed to point to a directory. If the path does not end with /, then a HEAD request is sent and the response used to decide if it it is treated as a file or a directory (run with -vv to see details). When --http-no-head is specified, a path without ending / is always assumed to be a file. If rclone incorrectly assumes the path is a file, the solution is to specify the path with ending /. When you know the path is a directory, ending it with / is always better as it avoids the initial HEAD request.

    To just download a single file it is easier to use copyurl.

    -

    Configuration

    +

    Configuration

    Here is an example of how to make a remote called remote. First run:

     rclone config

    This will guide you through an interactive setup process:

    @@ -24671,10 +25649,11 @@ Choose a number from below, or type in your own value \ "https://example.com" url> https://beta.rclone.org Remote config --------------------- -[remote] -url = https://beta.rclone.org --------------------- +Configuration complete. +Options: +- type: http +- url: https://beta.rclone.org +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -24711,7 +25690,7 @@ e/n/d/r/c/s/q> q
    rclone lsd --http-url https://beta.rclone.org :http:

    or:

    rclone lsd :http,url='https://beta.rclone.org':
    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to http (HTTP).

    --http-url

    URL of HTTP host to connect to.

    @@ -24732,7 +25711,7 @@ e/n/d/r/c/s/q> q
  • Type: bool
  • Default: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to http (HTTP).

    --http-headers

    Set HTTP headers for all transactions.

    @@ -24802,7 +25781,7 @@ rclone rc backend/command command=set fs=remote: -o url=https://example.comThe option keys are named as they are in the config file.

    This rebuilds the connection to the http backend when it is called with the new parameters. Only new parameters need be passed as the values will default to those currently in use.

    It doesn't return anything.

    -

    Limitations

    +

    Limitations

    rclone about is not supported by the HTTP backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

    See List of backends that do not support rclone about and rclone about

    ImageKit

    @@ -24811,7 +25790,7 @@ rclone rc backend/command command=set fs=remote: -o url=https://example.comImageKit.io provides real-time image and video optimizations, transformations, and CDN delivery. Over 1,000 businesses and 70,000 developers trust ImageKit with their images and videos on the web.

    Accounts & Pricing

    To use this backend, you need to create an account on ImageKit. Start with a free plan with generous usage limits. Then, as your requirements grow, upgrade to a plan that best fits your needs. See the pricing details.

    -

    Configuration

    +

    Configuration

    Here is an example of making an imagekit configuration.

    Firstly create a ImageKit.io account and choose a plan.

    You will need to log in and get the publicKey and privateKey for your account from the developer section.

    @@ -24878,7 +25857,7 @@ y/e/d> y

    ImageKit does not support modification times or hashes yet.

    Checksums

    No checksums are supported.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to imagekit (ImageKit.io).

    --imagekit-endpoint

    You can find your ImageKit.io URL endpoint in your dashboard

    @@ -24907,7 +25886,7 @@ y/e/d> y
  • Type: string
  • Required: true
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to imagekit (ImageKit.io).

    --imagekit-only-signed

    If you have configured Restrict unsigned image URLs in your dashboard settings, set this to true.

    @@ -25083,7 +26062,7 @@ y/e/d> y

    These auto-created files can be excluded from the sync using metadata filtering.

    rclone sync ... --metadata-exclude "source=metadata" --metadata-exclude "format=Metadata"

    Which excludes from the sync any files which have the source=metadata or format=Metadata flags which are added to Internet Archive auto-created files.

    -

    Configuration

    +

    Configuration

    Here is an example of making an internetarchive configuration. Most applies to the other providers as well, any differences are described below.

    First run

    rclone config
    @@ -25142,17 +26121,17 @@ Edit advanced config? y) Yes n) No (default) y/n> n --------------------- -[remote] -type = internetarchive -access_key_id = XXXX -secret_access_key = XXXX --------------------- +Configuration complete. +Options: +- type: internetarchive +- access_key_id: XXXX +- secret_access_key: XXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y -

    Standard options

    +

    Standard options

    Here are the Standard options specific to internetarchive (Internet Archive).

    --internetarchive-access-key-id

    IAS3 Access Key.

    @@ -25174,7 +26153,7 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to internetarchive (Internet Archive).

    --internetarchive-endpoint

    IAS3 Endpoint.

    @@ -25380,7 +26359,7 @@ Response: {"error":"invalid_grant","error_description&q

    Onlime has sold access to Jottacloud proper, while providing localized support to Danish Customers, but have recently set up their own hosting, transferring their customers from Jottacloud servers to their own ones.

    This, of course, necessitates using their servers for authentication, but otherwise functionality and architecture seems equivalent to Jottacloud.

    To setup rclone to use Onlime Cloud Storage, choose Onlime Cloud authentication in the setup. The rest of the setup is identical to the default setup.

    -

    Configuration

    +

    Configuration

    Here is an example of how to make a remote called remote with the default setup. First run:

    rclone config

    This will guide you through an interactive setup process:

    @@ -25452,18 +26431,18 @@ Press Enter for the default (Archive). 2 > Shared 3 > Sync config_mountpoint> 1 --------------------- -[remote] -type = jottacloud -configVersion = 1 -client_id = jottacli -client_secret = -tokenURL = https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token -token = {........} -username = 2940e57271a93d987d6f8a21 -device = Jotta -mountpoint = Archive --------------------- +Configuration complete. +Options: +- type: jottacloud +- configVersion: 1 +- client_id: jottacli +- client_secret: +- tokenURL: https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token +- token: {........} +- username: 2940e57271a93d987d6f8a21 +- device: Jotta +- mountpoint: Archive +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -25484,11 +26463,11 @@ y/e/d> y

    This backend supports --fast-list which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details.

    Note that the implementation in Jottacloud always uses only a single API request to get the entire list, so for large folders this could lead to long wait time before the first results are shown.

    Note also that with rclone version 1.58 and newer, information about MIME types and metadata item utime are not available when using --fast-list.

    -

    Modification times and hashes

    +

    Modification times and hashes

    Jottacloud allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not.

    Jottacloud supports MD5 type hashes, so you can use the --checksum flag.

    Note that Jottacloud requires the MD5 hash before upload so if the source does not have an MD5 checksum then the file will be cached temporarily on disk (in location given by --temp-dir) before it is uploaded. Small files will be cached in memory - see the --jottacloud-md5-memory-limit flag. When uploading from local disk the source checksum is always available, so this does not apply. Starting with rclone version 1.52 the same is true for encrypted remotes (in older versions the crypt backend would not calculate hashes for uploads from local disk, so the Jottacloud backend had to do it as described above).

    -

    Restricted filename characters

    +

    Restricted filename characters

    In addition to the default restricted characters set the following characters are also replaced:

    @@ -25544,7 +26523,7 @@ y/e/d> y

    Versioning can be disabled by --jottacloud-no-versions option. This is achieved by deleting the remote file prior to uploading a new version. If the upload the fails no version of the file will be available in the remote.

    Quota information

    To view your current quota you can use the rclone about remote: command which will display your usage limit (unless it is unlimited) and the current usage.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to jottacloud (Jottacloud).

    --jottacloud-client-id

    OAuth Client Id.

    @@ -25566,7 +26545,7 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to jottacloud (Jottacloud).

    --jottacloud-token

    OAuth Access Token as a JSON blob.

    @@ -25714,7 +26693,7 @@ y/e/d> y

    See the metadata docs for more info.

    -

    Limitations

    +

    Limitations

    Note that Jottacloud is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

    There are quite a few characters that can't be in Jottacloud file names. Rclone will map these names to and from an identical looking unicode equivalent. For example if a file has a ? in it will be mapped to ? instead.

    Jottacloud only supports filenames up to 255 characters in length.

    @@ -25723,7 +26702,7 @@ y/e/d> y

    Koofr

    Paths are specified as remote:path

    Paths may be as deep as required, e.g. remote:directory/subdirectory.

    -

    Configuration

    +

    Configuration

    The initial setup for Koofr involves creating an application password for rclone. You can do that by opening the Koofr web application, giving the password a nice name like rclone and clicking on generate.

    Here is an example of how to make a remote called koofr. First run:

     rclone config
    @@ -25791,7 +26770,7 @@ y/e/d> y
    rclone ls koofr:

    To copy a local directory to an Koofr directory called backup

    rclone copy /home/source koofr:backup
    -

    Restricted filename characters

    +

    Restricted filename characters

    In addition to the default restricted characters set the following characters are also replaced:

    @@ -25810,7 +26789,7 @@ y/e/d> y

    Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to koofr (Koofr, Digi Storage and other Koofr-compatible storage providers).

    --koofr-provider

    Choose your storage provider.

    @@ -25866,7 +26845,7 @@ y/e/d> y
  • Type: string
  • Required: true
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to koofr (Koofr, Digi Storage and other Koofr-compatible storage providers).

    --koofr-mountid

    Mount ID of the mount to use.

    @@ -25907,7 +26886,7 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    Note that Koofr is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

    Providers

    Koofr

    @@ -26037,7 +27016,7 @@ d) Delete this remote y/e/d> y

    Linkbox

    Linkbox is a private cloud drive.

    -

    Configuration

    +

    Configuration

    Here is an example of making a remote for Linkbox.

    First run:

     rclone config
    @@ -26073,7 +27052,7 @@ e) Edit this remote d) Delete this remote y/e/d> y -

    Standard options

    +

    Standard options

    Here are the Standard options specific to linkbox (Linkbox).

    Token from https://www.linkbox.to/admin/account

    @@ -26084,7 +27063,7 @@ y/e/d> y
  • Type: string
  • Required: true
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to linkbox (Linkbox).

    Description of the remote.

    @@ -26095,7 +27074,7 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

    Mail.ru Cloud

    Mail.ru Cloud is a cloud storage provided by a Russian internet company Mail.Ru Group. The official desktop client is Disk-O:, available on Windows and Mac OS.

    @@ -26110,7 +27089,7 @@ y/e/d> y
  • Storage keeps hash for all files and performs transparent deduplication, the hash algorithm is a modified SHA1
  • If a particular file is already present in storage, one can quickly submit file hash instead of long file upload (this optimization is supported by rclone)
  • -

    Configuration

    +

    Configuration

    Here is an example of making a mailru configuration.

    First create a Mail.ru Cloud account and choose a tariff.

    You will need to log in and create an app password for rclone. Rclone will not work with your normal username and password - it will give an error like oauth2: server response missing access_token.

    @@ -26170,13 +27149,13 @@ y) Yes n) No y/n> n Remote config --------------------- -[remote] -type = mailru -user = username@mail.ru -pass = *** ENCRYPTED *** -speedup_enable = true --------------------- +Configuration complete. +Options: +- type: mailru +- user: username@mail.ru +- pass: *** ENCRYPTED *** +- speedup_enable: true +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -26190,14 +27169,14 @@ y/e/d> y
    rclone ls remote:directory

    Sync /home/local/directory to the remote path, deleting any excess files in the path.

    rclone sync --interactive /home/local/directory remote:directory
    -

    Modification times and hashes

    +

    Modification times and hashes

    Files support a modification time attribute with up to 1 second precision. Directories do not have a modification time, which is shown as "Jan 1 1970".

    File hashes are supported, with a custom Mail.ru algorithm based on SHA1. If file size is less than or equal to the SHA1 block size (20 bytes), its hash is simply its data right-padded with zero bytes. Hashes of a larger file is computed as a SHA1 of the file data bytes concatenated with a decimal representation of the data length.

    Emptying Trash

    Removing a file or directory actually moves it to the trash, which is not visible to rclone but can be seen in a web browser. The trashed file still occupies part of total quota. If you wish to empty your trash and free some quota, you can use the rclone cleanup remote: command, which will permanently delete all your trashed files. This command does not take any path arguments.

    Quota information

    To view your current quota you can use the rclone about remote: command which will display your usage limit (quota) and the current usage.

    -

    Restricted filename characters

    +

    Restricted filename characters

    In addition to the default restricted characters set the following characters are also replaced:

    @@ -26251,7 +27230,7 @@ y/e/d> y

    Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to mailru (Mail.ru Cloud).

    --mailru-client-id

    OAuth Client Id.

    @@ -26314,7 +27293,7 @@ y/e/d> y -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to mailru (Mail.ru Cloud).

    --mailru-token

    OAuth Access Token as a JSON blob.

    @@ -26482,7 +27461,7 @@ y/e/d> y
  • Type: string
  • Required: false
  • -

    Limitations

    +

    Limitations

    File size limits depend on your account. A single file size is limited by 2G for a free account and unlimited for paid tariffs. Please refer to the Mail.ru site for the total uploaded size limits.

    Note that Mailru is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

    Mega

    @@ -26490,7 +27469,7 @@ y/e/d> y

    This is an rclone backend for Mega which supports the file transfer features of Mega using the same client side encryption.

    Paths are specified as remote:path

    Paths may be as deep as required, e.g. remote:directory/subdirectory.

    -

    Configuration

    +

    Configuration

    Here is an example of how to make a remote called remote. First run:

     rclone config

    This will guide you through an interactive setup process:

    @@ -26519,12 +27498,12 @@ password: Confirm the password: password: Remote config --------------------- -[remote] -type = mega -user = you@example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: mega +- user: you@example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -26537,9 +27516,9 @@ y/e/d> y
    rclone ls remote:

    To copy a local directory to an Mega directory called backup

    rclone copy /home/source remote:backup
    -

    Modification times and hashes

    +

    Modification times and hashes

    Mega does not support modification times or hashes yet.

    -

    Restricted filename characters

    +

    Restricted filename characters

    @@ -26562,7 +27541,7 @@ y/e/d> y

    Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

    -

    Duplicated files

    +

    Duplicated files

    Mega can have two files with exactly the same name and path (unlike a normal file system).

    Duplicated files cause problems with the syncing and you will see messages in the log about duplicates.

    Use rclone dedupe to fix duplicated files.

    @@ -26591,7 +27570,7 @@ me@example.com:/$

    Note that once blocked, the use of other tools (such as megacmd) is not a sure workaround: following megacmd login times have been observed in succession for blocked remote: 7 minutes, 20 min, 30min, 30 min, 30min. Web access looks unaffected though.

    Investigation is continuing in relation to workarounds based on timeouts, pacers, retrials and tpslimits - if you discover something relevant, please post on the forum.

    So, if rclone was working nicely and suddenly you are unable to log-in and you are sure the user and the password are correct, likely you have got the remote blocked for a while.

    -

    Standard options

    +

    Standard options

    Here are the Standard options specific to mega (Mega).

    --mega-user

    User name.

    @@ -26612,7 +27591,7 @@ me@example.com:/$
  • Type: string
  • Required: true
  • -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to mega (Mega).

    --mega-debug

    Output more debug from Mega.

    @@ -26665,13 +27644,13 @@ me@example.com:/$

    Process killed

    On accounts with large files or something else, memory usage can significantly increase when executing list/sync instructions. When running on cloud providers (like AWS with EC2), check if the instance type has sufficient memory/CPU to execute the commands. Use the resource monitoring tools to inspect after sending the commands. Look at this issue.

    -

    Limitations

    +

    Limitations

    This backend uses the go-mega go library which is an opensource go library implementing the Mega API. There doesn't appear to be any documentation for the mega protocol beyond the mega C++ SDK source code so there are likely quite a few errors still remaining in this library.

    Mega allows duplicate files which may confuse rclone.

    Memory

    The memory backend is an in RAM backend. It does not persist its data - use the local backend for that.

    The memory backend behaves like a bucket-based remote (e.g. like s3). Because it has no parameters you can just use it with the :memory: remote name.

    -

    Configuration

    +

    Configuration

    You can configure it as a remote like this with rclone config too if you want to:

    No remotes found, make a new one?
     n) New remote
    @@ -26691,10 +27670,10 @@ Storage> memory
     
     Remote config
     
    ---------------------
    -[remote]
    -type = memory
    ---------------------
    +Configuration complete.
    +Options:
    +- type: memory
    +Keep this "remote" remote?
     y) Yes this is OK (default)
     e) Edit this remote
     d) Delete this remote
    @@ -26703,11 +27682,11 @@ y/e/d> y
    rclone mount :memory: /mnt/tmp
     rclone serve webdav :memory:
     rclone serve sftp :memory:
    -

    Modification times and hashes

    +

    Modification times and hashes

    The memory backend supports MD5 hashes and modification times accurate to 1 nS.

    -

    Restricted filename characters

    +

    Restricted filename characters

    The memory backend replaces the default restricted characters set.

    -

    Advanced options

    +

    Advanced options

    Here are the Advanced options specific to memory (In memory object storage system.).

    --memory-description

    Description of the remote.

    @@ -26722,7 +27701,7 @@ rclone serve sftp :memory:

    Paths are specified as remote: You may put subdirectories in too, e.g. remote:/path/to/dir. If you have a CP code you can use that as the folder after the domain such as <domain>/<cpcode>/<internal directories within cpcode>.

    For example, this is commonly configured with or without a CP code: * With a CP code. [your-domain-prefix]-nsu.akamaihd.net/123456/subdirectory/ * Without a CP code. [your-domain-prefix]-nsu.akamaihd.net

    See all buckets rclone lsd remote: The initial setup for Netstorage involves getting an account and secret. Use rclone config to walk you through the setup process.

    -

    Configuration

    +

    Configuration

    Here's an example of how to make a remote called ns1.

    1. To begin the interactive configuration process, enter this command:
    2. @@ -26830,7 +27809,7 @@ y/e/d> y

      Purge

      NetStorage remote supports the purge feature by using the "quick-delete" NetStorage API action. The quick-delete action is disabled by default for security reasons and can be enabled for the account through the Akamai portal. Rclone will first try to use quick-delete action for the purge command and if this functionality is disabled then will fall back to a standard delete method.

      Note: Read the NetStorage Usage API for considerations when using "quick-delete". In general, using quick-delete method will not delete the tree immediately and objects targeted for quick-delete may still be accessible.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to netstorage (Akamai NetStorage).

      --netstorage-host

      Domain+path of NetStorage host to connect to.

      @@ -26862,7 +27841,7 @@ y/e/d> y
    3. Type: string
    4. Required: true
    5. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to netstorage (Akamai NetStorage).

      --netstorage-protocol

      Select between HTTP or HTTPS protocol.

      @@ -26911,7 +27890,7 @@ y/e/d> y

      The desired path location (including applicable sub-directories) ending in the object that will be the target of the symlink (for example, /links/mylink). Include the file extension for the object, if applicable. rclone backend symlink <src> <path>

      Microsoft Azure Blob Storage

      Paths are specified as remote:container (or remote: for the lsd command.) You may put subdirectories in too, e.g. remote:container/path/to/dir.

      -

      Configuration

      +

      Configuration

      Here is an example of making a Microsoft Azure Blob Storage configuration. For a remote called remote. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -26935,12 +27914,13 @@ key> base64encodedkey== Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = account_name -key = base64encodedkey== -endpoint = --------------------- +Configuration complete. +Options: +- type: azureblob +- account: account_name +- key: base64encodedkey== +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -26955,13 +27935,13 @@ y/e/d> y
      rclone sync --interactive /home/local/directory remote:container

      --fast-list

      This remote supports --fast-list which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details.

      -

      Modification times and hashes

      +

      Modification times and hashes

      The modification time is stored as metadata on the object with the mtime key. It is stored using RFC3339 Format time with nanosecond precision. The metadata is supplied during directory listings so there is no performance overhead to using it.

      If you wish to use the Azure standard LastModified time stored on the object as the modified time, then use the --use-server-modtime flag. Note that rclone can't set LastModified, so using the --update flag when syncing is recommended if using --use-server-modtime.

      MD5 hashes are stored with blobs. However blobs that were uploaded in chunks only have an MD5 if the source remote was capable of MD5 hashes, e.g. the local disk.

      Performance

      When uploading large files, increasing the value of --azureblob-upload-concurrency will increase performance at the cost of using more memory. The default of 16 is set quite conservatively to use less memory. It maybe be necessary raise it to 64 or higher to fully utilize a 1 GBit/s link with a single file transfer.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -27104,7 +28084,10 @@ container/

      If use_msi is set then managed service identity credentials are used. This authentication only works when running in an Azure service. env_auth needs to be unset to use this.

      However if you have multiple user identities to choose from these must be explicitly specified using exactly one of the msi_object_id, msi_client_id, or msi_mi_res_id parameters.

      If none of msi_object_id, msi_client_id, or msi_mi_res_id is set, this is is equivalent to using env_auth.

      -

      Standard options

      +

      Anonymous

      +

      If you want to access resources with public anonymous access then set account only. You can do this without making an rclone config:

      +
      rclone lsf :azureblob,account=ACCOUNT:CONTAINER
      +

      Standard options

      Here are the Standard options specific to azureblob (Microsoft Azure Blob Storage).

      --azureblob-account

      Azure Storage Account Name.

      @@ -27200,7 +28183,7 @@ container/
    6. Type: string
    7. Required: false
    8. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to azureblob (Microsoft Azure Blob Storage).

      --azureblob-client-send-certificate-chain

      Send the certificate chain when using certificate auth.

      @@ -27512,7 +28495,7 @@ container/
    9. Content-Type
    10. Eg --header-upload "Content-Type: text/potato"

      -

      Limitations

      +

      Limitations

      MD5 sums are only uploaded with chunked files if the source has an MD5 sum. This will always be the case for a local to azure copy.

      rclone about is not supported by the Microsoft Azure Blob storage backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

      See List of backends that do not support rclone about and rclone about

      @@ -27522,7 +28505,7 @@ container/

      Also, if you want to access a storage emulator instance running on a different machine, you can override the endpoint parameter in the advanced settings, setting it to http(s)://<host>:<port>/devstoreaccount1 (e.g. http://10.254.2.5:10000/devstoreaccount1).

      Microsoft Azure Files Storage

      Paths are specified as remote: You may put subdirectories in too, e.g. remote:path/to/dir.

      -

      Configuration

      +

      Configuration

      Here is an example of making a Microsoft Azure Files Storage configuration. For a remote called remote. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -27602,7 +28585,7 @@ y/e/d>

      The modified time is stored as Azure standard LastModified time on files

      Performance

      When uploading large files, increasing the value of --azurefiles-upload-concurrency will increase performance at the cost of using more memory. The default of 16 is set quite conservatively to use less memory. It maybe be necessary raise it to 64 or higher to fully utilize a 1 GBit/s link with a single file transfer.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -27767,7 +28750,7 @@ y/e/d>

      If use_msi is set then managed service identity credentials are used. This authentication only works when running in an Azure service. env_auth needs to be unset to use this.

      However if you have multiple user identities to choose from these must be explicitly specified using exactly one of the msi_object_id, msi_client_id, or msi_mi_res_id parameters.

      If none of msi_object_id, msi_client_id, or msi_mi_res_id is set, this is is equivalent to using env_auth.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to azurefiles (Microsoft Azure Files).

      --azurefiles-account

      Azure Storage Account Name.

      @@ -27882,7 +28865,7 @@ y/e/d>
    11. Type: string
    12. Required: false
    13. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to azurefiles (Microsoft Azure Files).

      --azurefiles-client-send-certificate-chain

      Send the certificate chain when using certificate auth.

      @@ -28052,12 +29035,12 @@ y/e/d>
    14. Content-Type
    15. Eg --header-upload "Content-Type: text/potato"

      -

      Limitations

      +

      Limitations

      MD5 sums are only uploaded with chunked files if the source has an MD5 sum. This will always be the case for a local to azure copy.

      Microsoft OneDrive

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      The initial setup for OneDrive involves getting a token from Microsoft which you need to do in your browser. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -28123,13 +29106,13 @@ Is that okay? y) Yes n) No y/n> y --------------------- -[remote] -type = onedrive -token = {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} -drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk -drive_type = business --------------------- +Configuration complete. +Options: +- type: onedrive +- token: {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} +- drive_id: b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk +- drive_type: business +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -28169,7 +29152,7 @@ y/e/d> y
    16. In the rclone config, set token_url to https://login.microsoftonline.com/YOUR_TENANT_ID/oauth2/v2.0/token.
    17. Note: If you have a special region, you may need a different host in step 4 and 5. Here are some hints.

      -

      Modification times and hashes

      +

      Modification times and hashes

      OneDrive allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not.

      OneDrive Personal, OneDrive for Business and Sharepoint Server support QuickXorHash.

      Before rclone 1.62 the default hash for Onedrive Personal was SHA1. For rclone 1.62 and above the default for all Onedrive backends is QuickXorHash.

      @@ -28182,7 +29165,7 @@ y/e/d> y

      This can be useful with rclone mount and rclone rc vfs/refresh recursive=true) to very quickly fill the mount with information about all the files.

      The API used for the recursive listing (ListR) only supports listing from the root of the drive. This will become increasingly inefficient the further away you get from the root as rclone will have to discard files outside of the directory you are using.

      Some commands (like rclone lsf -R) will use ListR by default - you can turn this off with --disable ListR if you need to.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -28282,7 +29265,7 @@ y/e/d> y

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      Deleting files

      Any files you delete with rclone will end up in the trash. Microsoft doesn't provide an API to permanently delete files, nor to empty the trash, so you will have to do that with one of Microsoft's apps or via the OneDrive website.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to onedrive (Microsoft OneDrive).

      --onedrive-client-id

      OAuth Client Id.

      @@ -28332,7 +29315,7 @@ y/e/d> y -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to onedrive (Microsoft OneDrive).

      --onedrive-token

      OAuth Access Token as a JSON blob.

      @@ -28677,75 +29660,75 @@ rclone rc vfs/refresh recursive=true

      Permissions are also supported, if --onedrive-metadata-permissions is set. The accepted values for --onedrive-metadata-permissions are "read", "write", "read,write", and "off" (the default). "write" supports adding new permissions, updating the "role" of existing permissions, and removing permissions. Updating and removing require the Permission ID to be known, so it is recommended to use "read,write" instead of "write" if you wish to update/remove permissions.

      Permissions are read/written in JSON format using the same schema as the OneDrive API, which differs slightly between OneDrive Personal and Business.

      Example for OneDrive Personal:

      -
      [
      -    {
      -        "id": "1234567890ABC!123",
      -        "grantedTo": {
      -            "user": {
      -                "id": "ryan@contoso.com"
      -            },
      -            "application": {},
      -            "device": {}
      -        },
      -        "invitation": {
      -            "email": "ryan@contoso.com"
      -        },
      -        "link": {
      -            "webUrl": "https://1drv.ms/t/s!1234567890ABC"
      -        },
      -        "roles": [
      -            "read"
      -        ],
      -        "shareId": "s!1234567890ABC"
      -    }
      -]
      +
      [
      +    {
      +        "id": "1234567890ABC!123",
      +        "grantedTo": {
      +            "user": {
      +                "id": "ryan@contoso.com"
      +            },
      +            "application": {},
      +            "device": {}
      +        },
      +        "invitation": {
      +            "email": "ryan@contoso.com"
      +        },
      +        "link": {
      +            "webUrl": "https://1drv.ms/t/s!1234567890ABC"
      +        },
      +        "roles": [
      +            "read"
      +        ],
      +        "shareId": "s!1234567890ABC"
      +    }
      +]

      Example for OneDrive Business:

      -
      [
      -    {
      -        "id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
      -        "grantedToIdentities": [
      -            {
      -                "user": {
      -                    "displayName": "ryan@contoso.com"
      -                },
      -                "application": {},
      -                "device": {}
      -            }
      -        ],
      -        "link": {
      -            "type": "view",
      -            "scope": "users",
      -            "webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
      -        },
      -        "roles": [
      -            "read"
      -        ],
      -        "shareId": "u!LKj1lkdlals90j1nlkascl"
      -    },
      -    {
      -        "id": "5D33DD65C6932946",
      -        "grantedTo": {
      -            "user": {
      -                "displayName": "John Doe",
      -                "id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
      -            },
      -            "application": {},
      -            "device": {}
      -        },
      -        "roles": [
      -            "owner"
      -        ],
      -        "shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
      -    }
      -]
      +
      [
      +    {
      +        "id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
      +        "grantedToIdentities": [
      +            {
      +                "user": {
      +                    "displayName": "ryan@contoso.com"
      +                },
      +                "application": {},
      +                "device": {}
      +            }
      +        ],
      +        "link": {
      +            "type": "view",
      +            "scope": "users",
      +            "webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
      +        },
      +        "roles": [
      +            "read"
      +        ],
      +        "shareId": "u!LKj1lkdlals90j1nlkascl"
      +    },
      +    {
      +        "id": "5D33DD65C6932946",
      +        "grantedTo": {
      +            "user": {
      +                "displayName": "John Doe",
      +                "id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
      +            },
      +            "application": {},
      +            "device": {}
      +        },
      +        "roles": [
      +            "owner"
      +        ],
      +        "shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
      +    }
      +]

      To write permissions, pass in a "permissions" metadata key using this same format. The --metadata-mapper tool can be very helpful for this.

      When adding permissions, an email address can be provided in the User.ID or DisplayName properties of grantedTo or grantedToIdentities. Alternatively, an ObjectID can be provided in User.ID. At least one valid recipient must be provided in order to add a permission for a user. Creating a Public Link is also supported, if Link.Scope is set to "anonymous".

      Example request to add a "read" permission with --metadata-mapper:

      -
      {
      -    "Metadata": {
      -        "permissions": "[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"
      -    }
      -}
      +
      {
      +    "Metadata": {
      +        "permissions": "[{\"grantedToIdentities\":[{\"user\":{\"id\":\"ryan@contoso.com\"}}],\"roles\":[\"read\"]}]"
      +    }
      +}

      Note that adding a permission can fail if a conflicting permission already exists for the file/folder.

      To update an existing permission, include both the Permission ID and the new roles to be assigned. roles is the only property that can be changed.

      To remove permissions, pass in a blob containing only the permissions you wish to keep (which can be empty, to remove all.) Note that the owner role will be ignored, as it cannot be removed.

      @@ -28895,7 +29878,7 @@ rclone rc vfs/refresh recursive=true

      See the metadata docs for more info.

      -

      Limitations

      +

      Limitations

      If you don't use rclone for 90 days the refresh token will expire. This will result in authorization problems. This is easy to fix by running the rclone config reconnect remote: command to get a new token and refresh token.

      Naming

      Note that OneDrive is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

      @@ -28990,7 +29973,7 @@ ERROR : 20230203_123826234_iOS.heic: vfs cache: failed to download: vfs reader:

      OpenDrive

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      Here is an example of how to make a remote called remote. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -29016,11 +29999,12 @@ Enter the password: password: Confirm the password: password: --------------------- -[remote] -username = -password = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: opendrive +- username: +- password: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -29031,10 +30015,10 @@ y/e/d> y
      rclone ls remote:

      To copy a local directory to an OpenDrive directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      OpenDrive allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not.

      The MD5 hash algorithm is supported.

      -

      Restricted filename characters

      +

      Restricted filename characters

      @@ -29134,7 +30118,7 @@ y/e/d> y

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to opendrive (OpenDrive).

      --opendrive-username

      Username.

      @@ -29155,7 +30139,7 @@ y/e/d> y
    18. Type: string
    19. Required: true
    20. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to opendrive (OpenDrive).

      --opendrive-encoding

      The encoding for the backend.

      @@ -29186,7 +30170,7 @@ y/e/d> y
    21. Type: string
    22. Required: false
    23. -

      Limitations

      +

      Limitations

      Note that OpenDrive is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

      There are quite a few characters that can't be in OpenDrive file names. These can't occur on Windows platforms, but on non-Windows platforms they are common. Rclone will map these names to and from an identical looking unicode equivalent. For example if a file has a ? in it will be mapped to instead.

      rclone about is not supported by the OpenDrive backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

      @@ -29200,7 +30184,7 @@ y/e/d> y

      Paths are specified as remote:bucket (or remote: for the lsd command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir.

      Sample command to transfer local artifacts to remote:bucket in oracle object storage:

      rclone -vvv --progress --stats-one-line --max-stats-groups 10 --log-format date,time,UTC,longfile --fast-list --buffer-size 256Mi --oos-no-check-bucket --oos-upload-cutoff 10Mi --multi-thread-cutoff 16Mi --multi-thread-streams 3000 --transfers 3000 --checkers 64 --retries 2 --oos-chunk-size 10Mi --oos-upload-concurrency 10000 --oos-attempt-resume-upload --oos-leave-parts-on-error sync ./artifacts remote:bucket -vv

      -

      Configuration

      +

      Configuration

      Here is an example of making an oracle object storage configuration. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -29378,7 +30362,7 @@ namespace = id<redacted>34 compartment = ocid1.compartment.oc1..aa<redacted>ba region = us-ashburn-1 provider = no_auth -

      Modification times and hashes

      +

      Modification times and hashes

      The modification time is stored as metadata on the object as opc-meta-mtime as floating point since the epoch, accurate to 1 ns.

      If the modification time needs to be updated rclone will attempt to perform a server side copy to update the modification if the object can be copied in a single part. In the case the object is larger than 5Gb, the object will be uploaded rather than copied.

      Note that reading this from the object takes an additional HEAD request as the metadata isn't returned in object listings.

      @@ -29391,7 +30375,7 @@ provider = no_auth

      Multipart uploads will use --transfers * --oos-upload-concurrency * --oos-chunk-size extra memory. Single part uploads to not use extra memory.

      Single part transfers can be faster than multipart transfers or slower depending on your latency from oos - the more latency, the more likely single part transfers will be faster.

      Increasing --oos-upload-concurrency will increase throughput (8 would be a sensible value) and increasing --oos-chunk-size also increases throughput (16M would be sensible). Increasing either of these will use more memory. The default values are high enough to gain most of the possible performance without using too much memory.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).

      --oos-provider

      Choose your Auth Provider

      @@ -29506,7 +30490,7 @@ provider = no_auth -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to oracleobjectstorage (Oracle Cloud Infrastructure Object Storage).

      --oos-storage-tier

      The storage class to use when storing new objects in storage. https://docs.oracle.com/en-us/iaas/Content/Object/Concepts/understandingstoragetiers.htm

      @@ -29825,7 +30809,7 @@ if not.

      Mounting Buckets

      QingStor

      Paths are specified as remote:bucket (or remote: for the lsd command.) You may put subdirectories in too, e.g. remote:bucket/path/to/dir.

      -

      Configuration

      +

      Configuration

      Here is an example of making an QingStor configuration. First run

      rclone config

      This will guide you through an interactive setup process.

      @@ -29871,15 +30855,16 @@ Number of connection retry. Leave blank will use the default value "3". connection_retries> Remote config --------------------- -[remote] -env_auth = false -access_key_id = access_key -secret_access_key = secret_key -endpoint = -zone = pek3a -connection_retries = --------------------- +Configuration complete. +Options: +- type: qingstor +- env_auth: false +- access_key_id: access_key +- secret_access_key: secret_key +- endpoint: +- zone: pek3a +- connection_retries: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -29917,10 +30902,10 @@ y/e/d> y -

      Restricted filename characters

      +

      Restricted filename characters

      The control characters 0x00-0x1F and / are replaced as in the default restricted characters set. Note that 0x7F is not replaced.

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to qingstor (QingCloud Object Storage).

      --qingstor-env-auth

      Get QingStor credentials from runtime.

      @@ -30001,7 +30986,7 @@ y/e/d> y -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to qingstor (QingCloud Object Storage).

      --qingstor-connection-retries

      Number of connection retries.

      @@ -30065,7 +31050,7 @@ y/e/d> y
    24. Type: string
    25. Required: false
    26. -

      Limitations

      +

      Limitations

      rclone about is not supported by the qingstor backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

      See List of backends that do not support rclone about and rclone about

      Quatrix

      @@ -30074,7 +31059,7 @@ y/e/d> y

      Paths may be as deep as required, e.g., remote:directory/subdirectory.

      The initial setup for Quatrix involves getting an API Key from Quatrix. You can get the API key in the user's profile at https://<account>/profile/api-keys or with the help of the API - https://docs.maytech.net/quatrix/quatrix-api/api-explorer#/API-Key/post_api_key_create.

      See complete Swagger documentation for Quatrix - https://docs.maytech.net/quatrix/quatrix-api/api-explorer

      -

      Configuration

      +

      Configuration

      Here is an example of how to make a remote called remote. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -30096,11 +31081,12 @@ api_key> your_api_key Host name of Quatrix account. host> example.quatrix.it --------------------- -[remote] -api_key = your_api_key -host = example.quatrix.it --------------------- +Configuration complete. +Options: +- type: quatrix +- api_key: your_api_key +- host: example.quatrix.it +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -30132,12 +31118,12 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? Edit remote Option api_key. API key for accessing Quatrix account @@ -30147,26 +31133,26 @@ Option host. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it). --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote y/e/d> y -

      Modification times and hashes

      +

      Modification times and hashes

      Quatrix allows modification times to be set on objects accurate to 1 microsecond. These will be used to detect whether objects need syncing or not.

      Quatrix does not support hashes, so you cannot use the --checksum flag.

      -

      Restricted filename characters

      +

      Restricted filename characters

      File names in Quatrix are case sensitive and have limitations like the maximum length of a filename is 255, and the minimum length is 1. A file name cannot be equal to . or .. nor contain / , \ or non-printable ascii.

      Transfers

      For files above 50 MiB rclone will use a chunked transfer. Rclone will upload up to --transfers chunks at the same time (shared among all multipart uploads). Chunks are buffered in memory, and the minimal chunk size is 10_000_000 bytes by default, and it can be changed in the advanced configuration, so increasing --transfers will increase the memory use. The chunk size has a maximum size limit, which is set to 100_000_000 bytes by default and can be changed in the advanced configuration. The size of the uploaded chunk will dynamically change depending on the upload speed. The total memory use equals the number of transfers multiplied by the minimal chunk size. In case there's free memory allocated for the upload (which equals the difference of maximal_summary_chunk_size and minimal_chunk_size * transfers), the chunk size may increase in case of high upload speed. As well as it can decrease in case of upload speed problems. If no free memory is available, all chunks will equal minimal_chunk_size.

      Deleting files

      Files you delete with rclone will end up in Trash and be stored there for 30 days. Quatrix also provides an API to permanently delete files and an API to empty the Trash so that you can remove files permanently from your account.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to quatrix (Quatrix by Maytech).

      --quatrix-api-key

      API key for accessing Quatrix account

      @@ -30186,7 +31172,7 @@ y/e/d> y
    27. Type: string
    28. Required: true
    29. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to quatrix (Quatrix by Maytech).

      --quatrix-encoding

      The encoding for the backend.

      @@ -30263,7 +31249,7 @@ y/e/d> y

      rclone interacts with Sia network by talking to the Sia daemon via HTTP API which is usually available on port 9980. By default you will run the daemon locally on the same computer so it's safe to leave the API password blank (the API URL will be http://127.0.0.1:9980 making external access impossible).

      However, if you want to access Sia daemon running on another node, for example due to memory constraints or because you want to share single daemon between several rclone and Sia-UI instances, you'll need to make a few more provisions: - Ensure you have Sia daemon installed directly or in a docker container because Sia-UI does not support this mode natively. - Run it on externally accessible port, for example provide --api-addr :9980 and --disable-api-security arguments on the daemon command line. - Enforce API password for the siad daemon via environment variable SIA_API_PASSWORD or text file named apipassword in the daemon directory. - Set rclone backend option api_password taking it from above locations.

      Notes: 1. If your wallet is locked, rclone cannot unlock it automatically. You should either unlock it in advance by using Sia-UI or via command line siac wallet unlock. Alternatively you can make siad unlock your wallet automatically upon startup by running it with environment variable SIA_WALLET_PASSWORD. 2. If siad cannot find the SIA_API_PASSWORD variable or the apipassword file in the SIA_DIR directory, it will generate a random password and store in the text file named apipassword under YOUR_HOME/.sia/ directory on Unix or C:\Users\YOUR_HOME\AppData\Local\Sia\apipassword on Windows. Remember this when you configure password in rclone. 3. The only way to use siad without API password is to run it on localhost with command line argument --authorize-api=false, but this is insecure and strongly discouraged.

      -

      Configuration

      +

      Configuration

      Here is an example of how to make a sia remote called mySia. First, run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -30323,7 +31309,7 @@ y/e/d> y
    30. Upload a local directory to the Sia directory called backup
    31. rclone copy /home/source mySia:backup
      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to sia (Sia Decentralized Cloud).

      --sia-api-url

      Sia daemon API URL, like http://sia.daemon.host:9980.

      @@ -30346,7 +31332,7 @@ y/e/d> y
    32. Type: string
    33. Required: false
    34. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to sia (Sia Decentralized Cloud).

      --sia-user-agent

      Siad User Agent

      @@ -30377,7 +31363,7 @@ y/e/d> y
    35. Type: string
    36. Required: false
    37. -

      Limitations

      +

      Limitations

      Paths are specified as remote:container (or remote: for the lsd command.) You may put subdirectories in too, e.g. remote:container/path/to/dir.

      -

      Configuration

      +

      Configuration

      Here is an example of making a swift configuration. First run

      rclone config

      This will guide you through an interactive setup process.

      @@ -30535,11 +31521,11 @@ rclone lsd myremote:

      --update and --use-server-modtime

      As noted below, the modified time is stored on metadata on the object. It is used by default for all operations that require checking the time a file was last updated. It allows rclone to treat the remote more like a true filesystem, but it is inefficient because it requires an extra API call to retrieve the metadata.

      For many operations, the time the object was last uploaded to the remote is sufficient to determine if it is "dirty". By using --update along with --use-server-modtime, you can avoid the extra API call and simply upload files whose local modtime is newer than the time it was last uploaded.

      -

      Modification times and hashes

      +

      Modification times and hashes

      The modified time is stored as metadata on the object as X-Object-Meta-Mtime as floating point since the epoch accurate to 1 ns.

      This is a de facto standard (used in the official python-swiftclient amongst others) for storing the modification time for an object.

      The MD5 hash algorithm is supported.

      -

      Restricted filename characters

      +

      Restricted filename characters

      @@ -30562,7 +31548,7 @@ rclone lsd myremote:

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).

      --swift-env-auth

      Get swift credentials from environment variables in standard OpenStack form.

      @@ -30800,7 +31786,7 @@ rclone lsd myremote: -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to swift (OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)).

      --swift-leave-parts-on-error

      If true avoid calling abort upload on a failure.

      @@ -30812,6 +31798,30 @@ rclone lsd myremote:
    38. Type: bool
    39. Default: false
    40. +

      --swift-fetch-until-empty-page

      +

      When paginating, always fetch unless we received an empty page.

      +

      Consider using this option if rclone listings show fewer objects than expected, or if repeated syncs copy unchanged objects.

      +

      It is safe to enable this, but rclone may make more API calls than necessary.

      +

      This is one of a pair of workarounds to handle implementations of the Swift API that do not implement pagination as expected. See also "partial_page_fetch_threshold".

      +

      Properties:

      + +

      --swift-partial-page-fetch-threshold

      +

      When paginating, fetch if the current page is within this percentage of the limit.

      +

      Consider using this option if rclone listings show fewer objects than expected, or if repeated syncs copy unchanged objects.

      +

      It is safe to enable this, but rclone may make more API calls than necessary.

      +

      This is one of a pair of workarounds to handle implementations of the Swift API that do not implement pagination as expected. See also "fetch_until_empty_page".

      +

      Properties:

      +

      --swift-chunk-size

      Above this size files will be chunked.

      Above this size files will be chunked into a a _segments container or a .file-segments directory. (See the use_segments_container option for more info). Default for this is 5 GiB which is its maximum value, which means only files above this size will be chunked.

      @@ -30880,7 +31890,7 @@ rclone lsd myremote:
    41. Type: string
    42. Required: false
    43. -

      Limitations

      +

      Limitations

      The Swift API doesn't return a correct MD5SUM for segmented files (Dynamic or Static Large Objects) so rclone won't check or use the MD5SUM for these.

      Troubleshooting

      Rclone gives Failed to create file system for "remote:": Bad Request

      @@ -30900,7 +31910,7 @@ rclone lsd myremote:

      pCloud

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      The initial setup for pCloud involves getting a token from pCloud which you need to do in your browser. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -30934,12 +31944,13 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} --------------------- +Configuration complete. +Options: +- type: pcloud +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -30953,10 +31964,10 @@ y/e/d> y
      rclone ls remote:

      To copy a local directory to a pCloud directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      pCloud allows modification times to be set on objects accurate to 1 second. These will be used to detect whether objects need syncing or not. In order to set a Modification time pCloud requires the object be re-uploaded.

      pCloud supports MD5 and SHA1 hashes in the US region, and SHA1 and SHA256 hashes in the EU region, so you can use the --checksum flag.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -30979,13 +31990,13 @@ y/e/d> y

      Deleted files will be moved to the trash. Your subscription level will determine how long items stay in the trash. rclone cleanup can be used to empty the trash.

      Emptying the trash

      Due to an API limitation, the rclone cleanup command will only work if you set your username and password in the advanced options for this backend. Since we generally want to avoid storing user passwords in the rclone config file, we advise you to only set this up if you need the rclone cleanup command to work.

      -

      Root folder ID

      +

      Root folder ID

      You can set the root_folder_id for rclone. This is the directory (identified by its Folder ID) that rclone considers to be the root of your pCloud drive.

      Normally you will leave this blank and rclone will determine the correct root to use itself.

      However you can set this to restrict rclone to a specific folder hierarchy.

      In order to do this you will have to find the Folder ID of the directory you wish rclone to display. This will be the folder field of the URL when you open the relevant folder in the pCloud web interface.

      So if the folder you want rclone to use has a URL which looks like https://my.pcloud.com/#page=filemanager&folder=5xxxxxxxx8&tpl=foldergrid in the browser, then you use 5xxxxxxxx8 as the root_folder_id in the config.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to pcloud (Pcloud).

      --pcloud-client-id

      OAuth Client Id.

      @@ -31007,7 +32018,7 @@ y/e/d> y
    44. Type: string
    45. Required: false
    46. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to pcloud (Pcloud).

      --pcloud-token

      OAuth Access Token as a JSON blob.

      @@ -31110,7 +32121,7 @@ y/e/d> y

      PikPak

      PikPak is a private cloud drive.

      Paths are specified as remote:path, and may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      Here is an example of making a remote for PikPak.

      First run:

       rclone config
      @@ -31163,10 +32174,10 @@ y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y -

      Modification times and hashes

      +

      Modification times and hashes

      PikPak keeps modification times on objects, and updates them when uploading objects, but it does not support changing only the modification time

      The MD5 hash algorithm is supported.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to pikpak (PikPak).

      --pikpak-user

      Pikpak username.

      @@ -31187,7 +32198,7 @@ y/e/d> y
    47. Type: string
    48. Required: true
    49. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to pikpak (PikPak).

      --pikpak-client-id

      OAuth Client Id.

      @@ -31350,15 +32361,174 @@ rclone backend decompress pikpak:dirpath {filename} -o delete-src-file

      -

      Limitations

      +

      Limitations

      Hashes may be empty

      PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded files.

      Deleted files still visible with trashed-only

      Deleted files will still be visible with --pikpak-trashed-only even after the trash emptied. This goes away after few days.

      +

      Pixeldrain

      +

      This is the backend for Pixeldrain's premium filesystem feature. This is not the same as pixeldrain's free file sharing product. The filesystem requires either a Pro subscription or the Prepaid plan. More information on subscriptions.

      +

      An overview of the filesystem's features and limitations is available in the filesystem guide on pixeldrain.

      +

      Usage with account

      +

      To use the personal filesystem you will need a pixeldrain account and either the Prepaid plan or one of the Patreon-based subscriptions. After registering and subscribing, your personal filesystem will be available at this link: https://pixeldrain.com/d/me.

      +

      Go to the API keys page on your account and generate a new API key for rclone. Then run rclone config and use the API key to create a new backend.

      +

      Example:

      +
      No remotes found, make a new one?
      +n) New remote
      +d) Delete remote
      +c) Copy remote
      +s) Set configuration password
      +q) Quit config
      +n/d/c/s/q> n
      +
      +Enter name for new remote.
      +name> pixeldrainfs
      +
      +Option Storage.
      +Type of storage to configure.
      +Choose a number from below, or type in your own value.
      +...
      +XX / Pixeldrain Filesystem
      +   \ (pixeldrain)
      +...
      +Storage> pixeldrain
      +
      +Option api_key.
      +API key for your pixeldrain account.
      +Found on https://pixeldrain.com/user/api_keys.
      +Enter a value. Press Enter to leave empty.
      +api_key> b1bb1e81-9b7b-406b-986a-c9b20be76e15
      +
      +Option directory_id.
      +Root of the filesystem to use. Set to 'me' to use your personal filesystem.
      +Set to a shared directory ID to use a shared directory.
      +Enter a string value. Press Enter for the default (me).
      +directory_id>
      +
      +Edit advanced config?
      +y) Yes
      +n) No (default)
      +y/n>
      +
      +Configuration complete.
      +Options:
      +- type: pixeldrain
      +- api_key: b1bb1e81-9b7b-406b-986a-c9b20be76e15
      +Keep this "pixeldrainfs" remote?
      +y) Yes this is OK (default)
      +e) Edit this remote
      +d) Delete this remote
      +y/e/d>
      +
      +Current remotes:
      +
      +Name                 Type
      +====                 ====
      +pixeldrainfs         pixeldrain
      +
      +e) Edit existing remote
      +n) New remote
      +d) Delete remote
      +r) Rename remote
      +c) Copy remote
      +s) Set configuration password
      +q) Quit config
      +e/n/d/r/c/s/q> q
      +

      Usage without account

      +

      It is possible to gain read-only access to publicly shared directories through rclone. For this you only need a directory ID. The directory ID can be found in the URL of a shared directory, the URL will look like this https://pixeldrain.com/d/abcd1234 where abcd1234 is the directory ID. Directory IDs in your own filesystem can also be listed with the lsf command:

      +

      rclone lsf Pixeldrain: --dirs-only -Fpi

      +

      This will print directories in your Pixeldrain home directory and their public IDs.

      +

      Enter this directory ID in the rclone config and you will be able to access the directory.

      +

      Standard options

      +

      Here are the Standard options specific to pixeldrain (Pixeldrain Filesystem).

      +

      --pixeldrain-api-key

      +

      API key for your pixeldrain account. Found on https://pixeldrain.com/user/api_keys.

      +

      Properties:

      + +

      --pixeldrain-root-folder-id

      +

      Root of the filesystem to use.

      +

      Set to 'me' to use your personal filesystem. Set to a shared directory ID to use a shared directory.

      +

      Properties:

      + +

      Advanced options

      +

      Here are the Advanced options specific to pixeldrain (Pixeldrain Filesystem).

      +

      --pixeldrain-api-url

      +

      The API endpoint to connect to. In the vast majority of cases it's fine to leave this at default. It is only intended to be changed for testing purposes.

      +

      Properties:

      + +

      --pixeldrain-description

      +

      Description of the remote.

      +

      Properties:

      + +

      Metadata

      +

      Pixeldrain supports file modes and creation times.

      +

      Here are the possible system metadata items for the pixeldrain backend.

      +
      +++++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      NameHelpTypeExampleRead Only
      btimeTime of file birth (creation)RFC 33392006-01-02T15:04:05.999999999Z07:00N
      modeFile modeoctal, unix style755N
      mtimeTime of last modificationRFC 33392006-01-02T15:04:05.999999999Z07:00N
      +

      See the metadata docs for more info.

      premiumize.me

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      The initial setup for premiumize.me involves getting a token from premiumize.me which you need to do in your browser. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -31391,11 +32561,11 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = premiumizeme -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} --------------------- +Configuration complete. +Options: +- type: premiumizeme +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31409,9 +32579,9 @@ y/e/d>
      rclone ls remote:

      To copy a local directory to an premiumize.me directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      premiumize.me does not support modification times or hashes, therefore syncing will default to --size-only checking. Note that using --update will work.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -31435,7 +32605,7 @@ y/e/d>

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to premiumizeme (premiumize.me).

      --premiumizeme-client-id

      OAuth Client Id.

      @@ -31467,7 +32637,7 @@ y/e/d>
    50. Type: string
    51. Required: false
    52. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to premiumizeme (premiumize.me).

      --premiumizeme-token

      OAuth Access Token as a JSON blob.

      @@ -31517,7 +32687,7 @@ y/e/d>
    53. Type: string
    54. Required: false
    55. -

      Limitations

      +

      Limitations

      Note that premiumize.me is case insensitive so you can't have a file called "Hello.doc" and one called "hello.doc".

      premiumize.me file names can't have the \ or " characters in. rclone maps these to and from an identical looking unicode equivalents and

      premiumize.me only supports filenames up to 255 characters in length.

      @@ -31561,12 +32731,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you@protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you@protonmail.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31579,18 +32749,18 @@ y/e/d> y
      rclone ls remote:

      To copy a local directory to an Proton Drive directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      Proton Drive Bridge does not support updating modification times yet.

      The SHA1 hash algorithm is supported.

      -

      Restricted filename characters

      +

      Restricted filename characters

      Invalid UTF-8 bytes will be replaced, also left and right spaces will be removed (code reference)

      -

      Duplicated files

      +

      Duplicated files

      Proton Drive can not have two files with exactly the same name and path. If the conflict occurs, depending on the advanced config, the file might or might not be overwritten.

      Mailbox password

      Please set your mailbox password in the advanced config section.

      Caching

      The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to protondrive (Proton Drive).

      --protondrive-username

      The username of your proton account

      @@ -31622,7 +32792,7 @@ y/e/d> y
    56. Type: string
    57. Required: false
    58. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to protondrive (Proton Drive).

      --protondrive-mailbox-password

      The mailbox password of your two-password proton account.

      @@ -31734,7 +32904,7 @@ y/e/d> y
    59. Type: string
    60. Required: false
    61. -

      Limitations

      +

      Limitations

      This backend uses the Proton-API-Bridge, which is based on go-proton-api, a fork of the official repo.

      There is no official API documentation available from Proton Drive. But, thanks to Proton open sourcing proton-go-api and the web, iOS, and Android client codebases, we don't need to completely reverse engineer the APIs by observing the web client traffic!

      proton-go-api provides the basic building blocks of API calls and error handling, such as 429 exponential back-off, but it is pretty much just a barebone interface to the Proton API. For example, the encryption and decryption of the Proton Drive file are not provided in this library.

      @@ -31742,7 +32912,7 @@ y/e/d> y

      put.io

      Paths are specified as remote:path

      put.io paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      The initial setup for put.io involves getting a token from put.io which you need to do in your browser. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -31807,7 +32977,7 @@ e/n/d/r/c/s/q> q
      rclone ls remote:

      To copy a local directory to a put.io directory called backup

      rclone copy /home/source remote:backup
      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -31826,7 +32996,7 @@ e/n/d/r/c/s/q> q

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to putio (Put.io).

      --putio-client-id

      OAuth Client Id.

      @@ -31848,7 +33018,7 @@ e/n/d/r/c/s/q> q
    62. Type: string
    63. Required: false
    64. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to putio (Put.io).

      --putio-token

      OAuth Access Token as a JSON blob.

      @@ -31898,7 +33068,7 @@ e/n/d/r/c/s/q> q
    65. Type: string
    66. Required: false
    67. -

      Limitations

      +

      Limitations

      put.io has rate limiting. When you hit a limit, rclone automatically retries after waiting the amount of time requested by the server.

      If you want to avoid ever hitting these limits, you may use the --tpslimit flag with a low number. Note that the imposed limits may be different for different operations, and may change over time.

      Proton Drive

      @@ -31941,12 +33111,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you@protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you@protonmail.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31959,18 +33129,18 @@ y/e/d> y
      rclone ls remote:

      To copy a local directory to an Proton Drive directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      Proton Drive Bridge does not support updating modification times yet.

      The SHA1 hash algorithm is supported.

      -

      Restricted filename characters

      +

      Restricted filename characters

      Invalid UTF-8 bytes will be replaced, also left and right spaces will be removed (code reference)

      -

      Duplicated files

      +

      Duplicated files

      Proton Drive can not have two files with exactly the same name and path. If the conflict occurs, depending on the advanced config, the file might or might not be overwritten.

      Mailbox password

      Please set your mailbox password in the advanced config section.

      Caching

      The cache is currently built for the case when the rclone is the only instance performing operations to the mount point. The event system, which is the proton API system that provides visibility of what has changed on the drive, is yet to be implemented, so updates from other clients won’t be reflected in the cache. Thus, if there are concurrent clients accessing the same mount point, then we might have a problem with caching the stale data.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to protondrive (Proton Drive).

      --protondrive-username

      The username of your proton account

      @@ -32002,7 +33172,7 @@ y/e/d> y
    68. Type: string
    69. Required: false
    70. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to protondrive (Proton Drive).

      --protondrive-mailbox-password

      The mailbox password of your two-password proton account.

      @@ -32114,14 +33284,14 @@ y/e/d> y
    71. Type: string
    72. Required: false
    73. -

      Limitations

      +

      Limitations

      This backend uses the Proton-API-Bridge, which is based on go-proton-api, a fork of the official repo.

      There is no official API documentation available from Proton Drive. But, thanks to Proton open sourcing proton-go-api and the web, iOS, and Android client codebases, we don't need to completely reverse engineer the APIs by observing the web client traffic!

      proton-go-api provides the basic building blocks of API calls and error handling, such as 429 exponential back-off, but it is pretty much just a barebone interface to the Proton API. For example, the encryption and decryption of the Proton Drive file are not provided in this library.

      The Proton-API-Bridge, attempts to bridge the gap, so rclone can be built on top of this quickly. This codebase handles the intricate tasks before and after calling Proton APIs, particularly the complex encryption scheme, allowing developers to implement features for other software on top of this codebase. There are likely quite a few errors in this library, as there isn't official documentation available.

      Seafile

      This is a backend for the Seafile storage service: - It works with both the free community edition or the professional edition. - Seafile versions 6.x, 7.x, 8.x and 9.x are all supported. - Encrypted libraries are also supported. - It supports 2FA enabled users - Using a Library API Token is not supported

      -

      Configuration

      +

      Configuration

      There are two distinct modes you can setup your remote: - you point your remote to the root of the server, meaning you don't specify a library during the configuration: Paths are specified as remote:library. You may put subdirectories in too, e.g. remote:library/path/to/dir. - you point your remote to a specific library during the configuration: Paths are specified as remote:path/to/dir. This is the recommended mode when using encrypted libraries. (This mode is possibly slightly faster than the root mode)

      Configuration in root mode

      Here is an example of making a seafile configuration for a user with no two-factor authentication. First run

      @@ -32280,7 +33450,7 @@ y/e/d> y
      rclone sync --interactive /home/local/directory seafile:

      --fast-list

      Seafile version 7+ supports --fast-list which allows you to use fewer transactions in exchange for more memory. See the rclone docs for more details. Please note this is not supported on seafile server version 6.x

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -32322,7 +33492,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/

      It has been actively developed using the seafile docker image of these versions: - 6.3.4 community edition - 7.0.5 community edition - 7.1.3 community edition - 9.0.10 community edition

      Versions below 6.0 are not supported. Versions between 6.0 and 6.3 haven't been tested and might not work properly.

      Each new version of rclone is automatically tested against the latest docker image of the seafile community server.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to seafile (seafile).

      --seafile-url

      URL of seafile host to connect to.

      @@ -32398,7 +33568,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/
    74. Type: string
    75. Required: false
    76. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to seafile (seafile).

      --seafile-create-library

      Should rclone create a library if it doesn't exist.

      @@ -32439,7 +33609,7 @@ http://my.seafile.server/d/9ea2455f6f55478bbb0d/

      Paths are specified as remote:path. If the path does not begin with a / it is relative to the home directory of the user. An empty path remote: refers to the user's home directory. For example, rclone lsd remote: would list the home directory of the user configured in the rclone remote config (i.e /home/sftpuser). However, rclone lsd remote:/ would list the root directory for remote machine (i.e. /)

      Note that some SFTP servers will need the leading / - Synology is a good example of this. rsync.net and Hetzner, on the other hand, requires users to OMIT the leading /.

      Note that by default rclone will try to execute shell commands on the server, see shell access considerations.

      -

      Configuration

      +

      Configuration

      Here is an example of making an SFTP configuration. First run

      rclone config

      This will guide you through an interactive setup process.

      @@ -32475,14 +33645,15 @@ y/g/n> n Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config --------------------- -[remote] -host = example.com -user = sftpuser -port = -pass = -key_file = --------------------- +Configuration complete. +Options: +- type: sftp +- host: example.com +- user: sftpuser +- port: +- pass: +- key_file: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -32575,14 +33746,14 @@ known_hosts_file = ~/.ssh/known_hosts

      The options md5sum_command and sha1_command can be used to customize the command to be executed for calculation of checksums. You can for example set a specific path to where md5sum and sha1sum executables are located, or use them to specify some other tools that print checksums in compatible format. The value can include command-line arguments, or even shell script blocks as with PowerShell. Rclone has subcommands md5sum and sha1sum that use compatible format, which means if you have an rclone executable on the server it can be used. As mentioned above, they will be automatically picked up if found in PATH, but if not you can set something like /path/to/rclone md5sum as the value of option md5sum_command to make sure a specific executable is used.

      Remote checksumming is recommended and enabled by default. First time rclone is using a SFTP remote, if options md5sum_command or sha1_command are not set, it will check if any of the default commands for each of them, as described above, can be used. The result will be saved in the remote configuration, so next time it will use the same. Value none will be set if none of the default commands could be used for a specific algorithm, and this algorithm will not be supported by the remote.

      Disabling the checksumming may be required if you are connecting to SFTP servers which are not under your control, and to which the execution of remote shell commands is prohibited. Set the configuration option disable_hashcheck to true to disable checksumming entirely, or set shell_type to none to disable all functionality based on remote shell command execution.

      -

      Modification times and hashes

      +

      Modification times and hashes

      Modified times are stored on the server to 1 second precision.

      Modified times are used in syncing and are fully supported.

      Some SFTP servers disable setting/modifying the file modification time after upload (for example, certain configurations of ProFTPd with mod_sftp). If you are using one of these servers, you can set the option set_modtime = false in your RClone backend configuration to disable this behaviour.

      About command

      The about command returns the total space, free space, and used space on the remote for the disk of the specified path on the remote or, if not set, the disk of the root on the remote.

      SFTP usually supports the about command, but it depends on the server. If the server implements the vendor-specific VFS statistics extension, which is normally the case with OpenSSH instances, it will be used. If not, but the same login has access to a Unix shell, where the df command is available (e.g. in the remote's PATH), then this will be used instead. If the server shell is PowerShell, probably with a Windows OpenSSH server, rclone will use a built-in shell command (see shell access). If none of the above is applicable, about will fail.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to sftp (SSH/SFTP).

      --sftp-host

      SSH host to connect to.

      @@ -32624,7 +33795,11 @@ known_hosts_file = ~/.ssh/known_hosts

      --sftp-key-pem

      Raw PEM-encoded private key.

      -

      If specified, will override key_file parameter.

      +

      Note that this should be on a single line with line endings replaced with '', eg

      +
      key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY-----
      +

      This will generate the single line correctly:

      +
      awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa
      +

      If specified, it will override the key_file parameter.

      Properties:

      -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to sftp (SSH/SFTP).

      --sftp-known-hosts-file

      Optional path to known_hosts file.

      @@ -32945,9 +34120,9 @@ server_command = sudo /usr/libexec/openssh/sftp-server

      --sftp-connections

      Maximum number of SFTP simultaneous connections, 0 for unlimited.

      Note that setting this is very likely to cause deadlocks so it should be used with care.

      -

      If you are doing a sync or copy then make sure concurrency is one more than the sum of --transfers and --checkers.

      +

      If you are doing a sync or copy then make sure connections is one more than the sum of --transfers and --checkers.

      If you use --check-first then it just needs to be one more than the maximum of --checkers and --transfers.

      -

      So for concurrency 3 you'd use --checkers 2 --transfers 2 --check-first or --checkers 1 --transfers 1.

      +

      So for connections 3 you'd use --checkers 2 --transfers 2 --check-first or --checkers 1 --transfers 1.

      Properties:

      -

      Limitations

      +

      Limitations

      On some SFTP servers (e.g. Synology) the paths are different for SSH and SFTP so the hashes can't be calculated properly. For them using disable_hashcheck is a good idea.

      The only ssh agent supported under Windows is Putty's pageant.

      The Go SSH library disables the use of the aes128-cbc cipher by default, due to security concerns. This can be re-enabled on a per-connection basis by setting the use_insecure_cipher setting in the configuration file to true. Further details on the insecurity of this cipher can be found in this paper.

      @@ -33078,7 +34253,7 @@ server_command = sudo /usr/libexec/openssh/sftp-server

      The first path segment must be the name of the share, which you entered when you started to share on Windows. On smbd, it's the section title in smb.conf (usually in /etc/samba/) file. You can find shares by querying the root if you're unsure (e.g. rclone lsd remote:).

      You can't access to the shared printers from rclone, obviously.

      You can't use Anonymous access for logging in. You have to use the guest user with an empty password instead. The rclone client tries to avoid 8.3 names when uploading files by encoding trailing spaces and periods. Alternatively, the local backend on Windows can access SMB servers using UNC paths, by \\server\share. This doesn't apply to non-Windows OSes, such as Linux and macOS.

      -

      Configuration

      +

      Configuration

      Here is an example of making a SMB configuration.

      First run

      rclone config
      @@ -33153,7 +34328,7 @@ y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> d -

      Standard options

      +

      Standard options

      Here are the Standard options specific to smb (SMB / CIFS).

      --smb-host

      SMB server hostname to connect to.

      @@ -33214,7 +34389,7 @@ y/e/d> d
    77. Type: string
    78. Required: false
    79. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to smb (SMB / CIFS).

      --smb-idle-timeout

      Max time before closing idle connections.

      @@ -33325,7 +34500,7 @@ y/e/d> d
    80. S3 backend: secret encryption key is shared with the gateway
    81. -

      Configuration

      +

      Configuration

      To make a new Storj configuration you need one of the following: * Access Grant that someone else shared with you. * API Key of a Storj project you are a member of.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -33359,11 +34534,11 @@ Access Grant. Enter a string value. Press Enter for the default (""). access_grant> your-access-grant-received-by-someone-else Remote config --------------------- -[remote] -type = storj -access_grant = your-access-grant-received-by-someone-else --------------------- +Configuration complete. +Options: +- type: storj +- access_grant: your-access-grant-received-by-someone-else +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -33410,19 +34585,19 @@ Encryption Passphrase. To access existing objects enter passphrase used for uplo Enter a string value. Press Enter for the default (""). passphrase> your-human-readable-encryption-passphrase Remote config --------------------- -[remote] -type = storj -satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 -api_key = your-api-key-for-your-storj-project -passphrase = your-human-readable-encryption-passphrase -access_grant = the-access-grant-generated-from-the-api-key-and-passphrase --------------------- +Configuration complete. +Options: +- type: storj +- satellite_address: 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 +- api_key: your-api-key-for-your-storj-project +- passphrase: your-human-readable-encryption-passphrase +- access_grant: the-access-grant-generated-from-the-api-key-and-passphrase +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote y/e/d> y -

      Standard options

      +

      Standard options

      Here are the Standard options specific to storj (Storj Decentralized Cloud Storage).

      --storj-provider

      Choose an authentication method.

      @@ -33501,7 +34676,7 @@ y/e/d> y
    82. Type: string
    83. Required: false
    84. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to storj (Storj Decentralized Cloud Storage).

      --storj-description

      Description of the remote.

      @@ -33566,7 +34741,7 @@ y/e/d> y
      rclone sync --interactive --progress remote-us:bucket/path/to/dir/ remote-europe:bucket/path/to/dir/

      Or even between another cloud storage and Storj.

      rclone sync --interactive --progress s3:bucket/path/to/dir/ storj:bucket/path/to/dir/
      -

      Limitations

      +

      Limitations

      rclone about is not supported by the rclone Storj backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

      See List of backends that do not support rclone about and rclone about

      Known issues

      @@ -33574,7 +34749,7 @@ y/e/d> y

      To fix these, please raise your system limits. You can do this issuing a ulimit -n 65536 just before you run rclone. To change the limits more permanently you can add this to your shell startup script, e.g. $HOME/.bashrc, or change the system-wide configuration, usually /etc/sysctl.conf and/or /etc/security/limits.conf, but please refer to your operating system manual.

      SugarSync

      SugarSync is a cloud service that enables active synchronization of files across computers and other devices for file backup, access, syncing, and sharing.

      -

      Configuration

      +

      Configuration

      The initial setup for SugarSync involves getting a token from SugarSync which you can do with rclone. rclone config walks you through it.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -33619,11 +34794,11 @@ Remote config Username (email address)> nick@craig-wood.com Your Sugarsync password is only required during setup and will not be stored. password: --------------------- -[remote] -type = sugarsync -refresh_token = https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: sugarsync +- refresh_token: https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -33639,15 +34814,15 @@ y/e/d> y

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      NB you can't create files in the top level folder you have to create a folder, which rclone will create as a "Sync Folder" with SugarSync.

      -

      Modification times and hashes

      +

      Modification times and hashes

      SugarSync does not support modification times or hashes, therefore syncing will default to --size-only checking. Note that using --update will work as rclone can read the time files were uploaded.

      -

      Restricted filename characters

      +

      Restricted filename characters

      SugarSync replaces the default restricted characters set except for DEL.

      Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.

      Deleting files

      Deleted files will be moved to the "Deleted items" folder by default.

      However you can supply the flag --sugarsync-hard-delete or set the config parameter hard_delete = true if you would like files to be deleted straight away.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to sugarsync (Sugarsync).

      --sugarsync-app-id

      Sugarsync App ID.

      @@ -33688,7 +34863,7 @@ y/e/d> y
    85. Type: bool
    86. Default: false
    87. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to sugarsync (Sugarsync).

      --sugarsync-refresh-token

      Sugarsync refresh token.

      @@ -33769,16 +34944,14 @@ y/e/d> y
    88. Type: string
    89. Required: false
    90. -

      Limitations

      +

      Limitations

      rclone about is not supported by the SugarSync backend. Backends without this capability cannot determine free space for an rclone mount or use policy mfs (most free space) as a member of an rclone union remote.

      See List of backends that do not support rclone about and rclone about

      -

      Tardigrade

      -

      The Tardigrade backend has been renamed to be the Storj backend. Old configuration files will continue to work.

      Uloz.to

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      The initial setup for Uloz.to involves filling in the user credentials. rclone config walks you through it.

      -

      Configuration

      +

      Configuration

      Here is an example of how to make a remote called remote. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -33838,10 +35011,10 @@ y/e/d> y
      rclone copy /home/source remote:backup

      User credentials

      The only reliable method is to authenticate the user using username and password. Uloz.to offers an API key as well, but it's reserved for the use of Uloz.to's in-house application and using it in different circumstances is unreliable.

      -

      Modification times and hashes

      +

      Modification times and hashes

      Uloz.to doesn't allow the user to set a custom modification time, or retrieve the hashes after upload. As a result, the integration uses a free form field the API provides to encode client-provided timestamps and hashes. Timestamps are stored with microsecond precision.

      A server calculated MD5 hash of the file is verified upon upload. Afterwards, the backend only serves the client-side calculated hashes. Hashes can also be retrieved upon creating a file download link, but it's impractical for list-like use cases.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -33865,13 +35038,13 @@ y/e/d> y

      Deleting files

      By default, files are moved to the recycle bin whereas folders are deleted immediately. Trashed files are permanently deleted after 30 days in the recycle bin.

      Emptying the trash is currently not implemented in rclone.

      -

      Root folder ID

      +

      Root folder ID

      You can set the root_folder_slug for rclone. This is the folder (identified by its Folder slug) that rclone considers to be the root of your Uloz.to drive.

      Normally you will leave this blank and rclone will determine the correct root to use itself. However you can set this to restrict rclone to a specific folder hierarchy.

      In order to do this you will have to find the Folder slug of the folder you wish to use as root. This will be the last segment of the URL when you open the relevant folder in the Uloz.to web interface.

      For example, for exploring a folder with URL https://uloz.to/fm/my-files/foobar, foobar should be used as the root slug.

      root_folder_slug can be used alongside a specific path in the remote path. For example, if your remote's root_folder_slug corresponds to /foo/bar, remote:baz/qux will refer to ABSOLUTE_ULOZTO_ROOT/foo/bar/baz/qux.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to ulozto (Uloz.to).

      --ulozto-app-token

      The application token identifying the app. An app API key can be either found in the API doc https://uloz.to/upload-resumable-api-beta or obtained from customer service.

      @@ -33901,7 +35074,7 @@ y/e/d> y
    91. Type: string
    92. Required: false
    93. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to ulozto (Uloz.to).

      --ulozto-root-folder-slug

      If set, rclone will use this folder as the root folder for all operations. For example, if the slug identifies 'foo/bar/', 'ulozto:baz' is equivalent to 'ulozto:foo/bar/baz' without any root slug set.

      @@ -33940,7 +35113,7 @@ y/e/d> y
    94. Type: string
    95. Required: false
    96. -

      Limitations

      +

      Limitations

      Uloz.to file names can't have the \ character in. rclone maps this to and from an identical looking unicode equivalent (U+FF3C Fullwidth Reverse Solidus).

      Uloz.to only supports filenames up to 255 characters in length.

      Uloz.to rate limits access to the API, but exact details are undisclosed. Practical testing reveals that hitting the rate limit during normal use is very rare, although not impossible with higher number of concurrently uploaded files.

      @@ -33950,7 +35123,7 @@ y/e/d> y

      This is a Backend for Uptobox file storage service. Uptobox is closer to a one-click hoster than a traditional cloud storage provider and therefore not suitable for long term storage.

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      To configure an Uptobox backend you'll need your personal api token. You'll find it in your account settings

      Here is an example of how to make a remote called remote with the default setup. First run:

      rclone config
      @@ -34004,9 +35177,9 @@ y/e/d>
      rclone ls remote:

      To copy a local directory to an Uptobox directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      Uptobox supports neither modified times nor checksums. All timestamps will read as that set by --default-time.

      -

      Restricted filename characters

      +

      Restricted filename characters

      In addition to the default restricted characters set the following characters are also replaced:

      @@ -34030,7 +35203,7 @@ y/e/d>

      Invalid UTF-8 bytes will also be replaced, as they can't be used in XML strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to uptobox (Uptobox).

      --uptobox-access-token

      Your access token.

      @@ -34042,7 +35215,7 @@ y/e/d>
    97. Type: string
    98. Required: false
    99. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to uptobox (Uptobox).

      --uptobox-private

      Set to make uploaded files private

      @@ -34072,7 +35245,7 @@ y/e/d>
    100. Type: string
    101. Required: false
    102. -

      Limitations

      +

      Limitations

      Uptobox will delete inactive files that have not been accessed in 60 days.

      rclone about is not supported by this backend an overview of used space can however been seen in the uptobox web interface.

      Union

      @@ -34086,7 +35259,7 @@ y/e/d>

      Subfolders can be used in upstream remotes. Assume a union remote named backup with the remotes mydrive:private/backup. Invoking rclone mkdir backup:desktop is exactly the same as invoking rclone mkdir mydrive:private/backup/desktop.

      There is no special handling of paths containing .. segments. Invoking rclone mkdir backup:../desktop is exactly the same as invoking rclone mkdir mydrive:private/backup/../desktop.

      -

      Configuration

      +

      Configuration

      Here is an example of how to make a union called remote for local folders. First run:

       rclone config

      This will guide you through an interactive setup process:

      @@ -34120,11 +35293,11 @@ Cache time of usage and free space (in seconds). This option is only useful when Enter a signed integer. Press Enter for the default ("120"). cache_time> Remote config --------------------- -[remote] -type = union -upstreams = remote1:dir1 remote2:dir2 remote3:dir3 --------------------- +Configuration complete. +Options: +- type: union +- upstreams: remote1:dir1 remote2:dir2 remote3:dir3 +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34319,7 +35492,7 @@ upstreams = /local:writeback remote:dir

      When files are written, they will be written to both remote:dir and /local.

      As many remotes as desired can be added to upstreams but there should only be one :writeback tag.

      Rclone does not manage the :writeback remote in any way other than writing files back to it. So if you need to expire old files or manage the size then you will have to do this yourself.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to union (Union merges the contents of several upstream fs).

      --union-upstreams

      List of space separated upstreams.

      @@ -34368,7 +35541,7 @@ upstreams = /local:writeback remote:dir
    103. Type: int
    104. Default: 120
    105. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to union (Union merges the contents of several upstream fs).

      --union-min-free-space

      Minimum viable free space for lfs/eplfs policies.

      @@ -34389,13 +35562,13 @@ upstreams = /local:writeback remote:dir
    106. Type: string
    107. Required: false
    108. -

      Metadata

      +

      Metadata

      Any metadata supported by the underlying remote is read and written.

      See the metadata docs for more info.

      WebDAV

      Paths are specified as remote:path

      Paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Configuration

      +

      Configuration

      To configure the WebDAV remote you will need to have a URL for it, and a username and password. If you know what kind of system you are connecting to then rclone can enable extra features.

      Here is an example of how to make a remote called remote. First run:

       rclone config
      @@ -34449,15 +35622,15 @@ password: Bearer token instead of user/pass (e.g. a Macaroon) bearer_token> Remote config --------------------- -[remote] -type = webdav -url = https://example.com/remote.php/webdav/ -vendor = nextcloud -user = user -pass = *** ENCRYPTED *** -bearer_token = --------------------- +Configuration complete. +Options: +- type: webdav +- url: https://example.com/remote.php/webdav/ +- vendor: nextcloud +- user: user +- pass: *** ENCRYPTED *** +- bearer_token: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34469,10 +35642,10 @@ y/e/d> y
      rclone ls remote:

      To copy a local directory to an WebDAV directory called backup

      rclone copy /home/source remote:backup
      -

      Modification times and hashes

      +

      Modification times and hashes

      Plain WebDAV does not support modified times. However when used with Fastmail Files, Owncloud or Nextcloud rclone will support modified times.

      Likewise plain WebDAV does not support hashes, however when used with Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes. Depending on the exact version of Owncloud or Nextcloud hashes may appear on all objects, or only on objects which had a hash uploaded with them.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to webdav (WebDAV).

      --webdav-url

      URL of http host to connect to.

      @@ -34553,7 +35726,7 @@ y/e/d> y
    109. Type: string
    110. Required: false
    111. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to webdav (WebDAV).

      --webdav-bearer-token-command

      Command to run to get a bearer token.

      @@ -34626,6 +35799,15 @@ y/e/d> y
    112. Type: bool
    113. Default: false
    114. +

      --webdav-unix-socket

      +

      Path to a unix domain socket to dial to, instead of opening a TCP connection directly

      +

      Properties:

      +

      --webdav-description

      Description of the remote.

      Properties:

      @@ -34713,7 +35895,7 @@ vendor = other bearer_token_command = oidc-token XDC

      Yandex Disk

      Yandex Disk is a cloud storage solution created by Yandex.

      -

      Configuration

      +

      Configuration

      Here is an example of making a yandex configuration. First run

      rclone config

      This will guide you through an interactive setup process:

      @@ -34745,12 +35927,13 @@ If your browser doesn't open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} --------------------- +Configuration complete. +Options: +- type: yandex +- client_id: +- client_secret: +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34767,17 +35950,17 @@ y/e/d> y

      Sync /home/local/directory to the remote path, deleting any excess files in the path.

      rclone sync --interactive /home/local/directory remote:directory

      Yandex paths may be as deep as required, e.g. remote:directory/subdirectory.

      -

      Modification times and hashes

      +

      Modification times and hashes

      Modified times are supported and are stored accurate to 1 ns in custom metadata called rclone_modified in RFC3339 with nanoseconds format.

      The MD5 hash algorithm is natively supported by Yandex Disk.

      Emptying Trash

      If you wish to empty your trash you can use the rclone cleanup remote: command which will permanently delete all your trashed files. This command does not take any path arguments.

      Quota information

      To view your current quota you can use the rclone about remote: command which will display your usage limit (quota) and the current usage.

      -

      Restricted filename characters

      +

      Restricted filename characters

      The default restricted characters set are replaced.

      Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON strings.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to yandex (Yandex Disk).

      --yandex-client-id

      OAuth Client Id.

      @@ -34799,7 +35982,7 @@ y/e/d> y
    115. Type: string
    116. Required: false
    117. -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to yandex (Yandex Disk).

      --yandex-token

      OAuth Access Token as a JSON blob.

      @@ -34849,6 +36032,15 @@ y/e/d> y
    118. Type: Encoding
    119. Default: Slash,Del,Ctl,InvalidUtf8,Dot
    120. +

      --yandex-spoof-ua

      +

      Set the user agent to match an official version of the yandex disk client. May help with upload performance.

      +

      Properties:

      +

      --yandex-description

      Description of the remote.

      Properties:

      @@ -34858,13 +36050,13 @@ y/e/d> y
    121. Type: string
    122. Required: false
    123. -

      Limitations

      +

      Limitations

      When uploading very large files (bigger than about 5 GiB) you will need to increase the --timeout parameter. This is because Yandex pauses (perhaps to calculate the MD5SUM for the entire file) before returning confirmation that the file has been uploaded. The default handling of timeouts in rclone is to assume a 5 minute pause is an error and close the connection - you'll see net/http: timeout awaiting response headers errors in the logs if this is happening. Setting the timeout to twice the max size of file in GiB should be enough, so if you want to upload a 30 GiB file set a timeout of 2 * 30 = 60m, that is --timeout 60m.

      Having a Yandex Mail account is mandatory to use the Yandex.Disk subscription. Token generation will work without a mail account, but Rclone won't be able to complete any actions.

      [403 - DiskUnsupportedUserAccountTypeError] User account type is not supported.

      Zoho Workdrive

      Zoho WorkDrive is a cloud storage solution created by Zoho.

      -

      Configuration

      +

      Configuration

      Here is an example of making a zoho configuration. First run

      rclone config

      This will guide you through an interactive setup process:

      @@ -34915,12 +36107,12 @@ Choose a number from below, or type in your own value 1 / General \ "4u2869d2aa6fca04f4f2f896b6539243b85b1" Enter a Workspace ID> 1 --------------------- -[remote] -type = zoho -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} -root_folder_id = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: zoho +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} +- root_folder_id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -34937,14 +36129,14 @@ y/e/d>

      Sync /home/local/directory to the remote path, deleting any excess files in the path.

      rclone sync --interactive /home/local/directory remote:directory

      Zoho paths may be as deep as required, eg remote:directory/subdirectory.

      -

      Modification times and hashes

      +

      Modification times and hashes

      Modified times are currently not supported for Zoho Workdrive

      No hash algorithms are supported.

      Usage information

      To view your current quota you can use the rclone about remote: command which will display your current usage.

      -

      Restricted filename characters

      +

      Restricted filename characters

      Only control characters and invalid UTF-8 are replaced. In addition most Unicode full-width characters are not supported at all and will be removed from filenames during upload.

      -

      Standard options

      +

      Standard options

      Here are the Standard options specific to zoho (Zoho).

      --zoho-client-id

      OAuth Client Id.

      @@ -35003,7 +36195,7 @@ y/e/d> -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to zoho (Zoho).

      --zoho-token

      OAuth Access Token as a JSON blob.

      @@ -35065,7 +36257,7 @@ y/e/d>

      Local paths are specified as normal filesystem paths, e.g. /path/to/wherever, so

      rclone sync --interactive /home/source /tmp/destination

      Will sync /home/source to /tmp/destination.

      -

      Configuration

      +

      Configuration

      For consistencies sake one can also configure a remote of type local in the config file, and access the local filesystem using rclone remote paths, e.g. remote:path/to/wherever, but it is probably easier not to.

      Modification times

      Rclone reads and writes the modification times using an accuracy determined by the OS. Typically this is 1ns on Linux, 10 ns on Windows and 1 Second on OS X.

      @@ -35339,8 +36531,7 @@ y/e/d>

      Invalid UTF-8 bytes will also be replaced, as they can't be converted to UTF-16.

      Paths on Windows

      -

      On Windows there are many ways of specifying a path to a file system resource. Local paths can be absolute, like C:\path\to\wherever, or relative, like ..\wherever. Network paths in UNC format, \\server\share, are also supported. Path separator can be either \ (as in C:\path\to\wherever) or / (as in C:/path/to/wherever). Length of these paths are limited to 259 characters for files and 247 characters for directories, but there is an alternative extended-length path format increasing the limit to (approximately) 32,767 characters. This format requires absolute paths and the use of prefix \\?\, e.g. \\?\D:\some\very\long\path. For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more below).

      -

      Note that Windows supports using the same prefix \\?\ to specify path to volumes identified by their GUID, e.g. \\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path. This is not supported in rclone, due to an issue in go.

      +

      On Windows there are many ways of specifying a path to a file system resource. Local paths can be absolute, like C:\path\to\wherever, or relative, like ..\wherever. Network paths in UNC format, \\server\share, are also supported. Path separator can be either \ (as in C:\path\to\wherever) or / (as in C:/path/to/wherever). Length of these paths are limited to 259 characters for files and 247 characters for directories, but there is an alternative extended-length path format increasing the limit to (approximately) 32,767 characters. This format requires absolute paths and the use of prefix \\?\, e.g. \\?\D:\some\very\long\path. For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more below). Using the same prefix \\?\ it is also possible to specify path to volumes identified by their GUID, e.g. \\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path.

      Long paths

      Rclone handles long paths automatically, by converting all paths to extended-length path format, which allows paths up to 32,767 characters.

      This conversion will ensure paths are absolute and prefix them with the \\?\. This is why you will see that your paths, for instance .\files is shown as path \\?\C:\files in the output, and \\server\share as \\?\UNC\server\share.

      @@ -35442,7 +36633,7 @@ $ tree /tmp/c 0 file2

      NB Rclone (like most unix tools such as du, rsync and tar) treats a bind mount to the same device as being on the same filesystem.

      NB This flag is only available on Unix based systems. On systems where it isn't supported (e.g. Windows) it will be ignored.

      -

      Advanced options

      +

      Advanced options

      Here are the Advanced options specific to local (Local Disk).

      --local-nounc

      Disable UNC (long path names) conversion on Windows.

      @@ -35566,6 +36757,19 @@ $ tree /tmp/c
    124. Type: bool
    125. Default: false
    126. +

      --local-no-clone

      +

      Disable reflink cloning for server-side copies.

      +

      Normally, for local-to-local transfers, rclone will "clone" the file when possible, and fall back to "copying" only when cloning is not supported.

      +

      Cloning creates a shallow copy (or "reflink") which initially shares blocks with the original file. Unlike a "hardlink", the two files are independent and neither will affect the other if subsequently modified.

      +

      Cloning is usually preferable to copying, as it is much faster and is deduplicated by default (i.e. having two identical files does not consume more storage than having just one.) However, for use cases where data redundancy is preferable, --local-no-clone can be used to disable cloning and force "deep" copies.

      +

      Currently, cloning is only supported when using APFS on macOS (support for other platforms may be added in the future.)

      +

      Properties:

      +

      --local-no-preallocate

      Disable preallocation of disk space for transferred files.

      Preallocation of disk space helps prevent filesystem fragmentation. However, some virtual filesystem layers (such as Google Drive File Stream) may incorrectly set the actual file size equal to the preallocated space, causing checksum and file size checks to fail. Use this flag to disable preallocation.

      @@ -35653,7 +36857,7 @@ $ tree /tmp/c
    127. Type: string
    128. Required: false
    129. -

      Metadata

      +

      Metadata

      Depending on which OS is in use the local backend may return only some of the system metadata. Setting system metadata is supported on all OSes but setting user metadata is only supported on linux, freebsd, netbsd, macOS and Solaris. It is not supported on Windows yet (see pkg/attrs#47).

      User metadata is stored as extended attributes (which may not be supported by all file systems) under the "user.*" prefix.

      Metadata is supported on files and directories.

      @@ -35745,6 +36949,196 @@ $ tree /tmp/c
    130. "error": return an error based on option value
    131. Changelog

      +

      v1.68.0 - 2024-09-08

      +

      See commits

      +

      v1.67.0 - 2024-06-14

      See commits

      Bugs and Limitations

      -

      Limitations

      +

      Limitations

      Directory timestamps aren't preserved on some backends

      As of v1.66, rclone supports syncing directory modtimes, if the backend supports it. Some backends do not support it -- see overview for a complete list. Additionally, note that empty directories are not synced by default (this can be enabled with --create-empty-src-dirs.)

      Rclone struggles with millions of files in a directory/bucket

      @@ -43704,7 +45098,7 @@ yyyy/mm/dd hh:mm:ss Fatal error: config failed to refresh token: failed to start

      Rclone is using too much memory or appears to have a memory leak

      Rclone is written in Go which uses a garbage collector. The default settings for the garbage collector mean that it runs when the heap size has doubled.

      However it is possible to tune the garbage collector to use less memory by setting GOGC to a lower value, say export GOGC=20. This will make the garbage collector work harder, reducing memory size at the expense of CPU usage.

      -

      The most common cause of rclone using lots of memory is a single directory with thousands or millions of files in. Rclone has to load this entirely into memory as rclone objects. Each rclone object takes 0.5k-1k of memory.

      +

      The most common cause of rclone using lots of memory is a single directory with millions of files in. Rclone has to load this entirely into memory as rclone objects. Each rclone object takes 0.5k-1k of memory. There is a workaround for this which involves a bit of scripting.

      Rclone changes fullwidth Unicode punctuation marks in file names

      For example: On a Windows system, you have a file with name Test:1.jpg, where is the Unicode fullwidth colon symbol. When using rclone to copy this to your Google Drive, you will notice that the file gets renamed to Test:1.jpg, where : is the regular (halfwidth) colon.

      The reason for such renames is the way rclone handles different restricted filenames on different cloud storage systems. It tries to avoid ambiguous file names as much and allow moving files between many cloud storage systems transparently, by replacing invalid characters with similar looking Unicode characters when transferring to one storage system, and replacing back again when transferring to a different storage system where the original characters are supported. When the same Unicode characters are intentionally used in file names, this replacement strategy leads to unwanted renames. Read more here.

      @@ -44585,6 +45979,30 @@ THE SOFTWARE.
    132. Michał Dzienisiewicz
    133. Florian Klink
    134. Bill Fraser
    135. +
    136. Thearas
    137. +
    138. Filipe Herculano
    139. +
    140. Russ Bubley
    141. +
    142. Paul Collins
    143. +
    144. Tomasz Melcer
    145. +
    146. itsHenry
    147. +
    148. Ke Wang
    149. +
    150. AThePeanut4
    151. +
    152. Tobias Markus
    153. +
    154. Ernie Hershey
    155. +
    156. Will Miles
    157. +
    158. David Seifert
    159. +
    160. Fornax
    161. +
    162. Sam Harrison
    163. +
    164. Péter Bozsó
    165. +
    166. Georg Welzel
    167. +
    168. John Oxley
    169. +
    170. Pawel Palucha
    171. +
    172. crystalstall
    173. +
    174. nipil
    175. +
    176. yuval-cloudinary
    177. +
    178. Mathieu Moreau
    179. +
    180. fsantagostinobietti
    181. +
    182. Oleg Kunitsyn
    183. Contact the rclone project

      Forum

      diff --git a/MANUAL.md b/MANUAL.md index 27e5b2153..11e520ba8 100644 --- a/MANUAL.md +++ b/MANUAL.md @@ -1,6 +1,6 @@ % rclone(1) User Manual % Nick Craig-Wood -% Jun 14, 2024 +% Sep 08, 2024 # Rclone syncs your files to cloud storage @@ -119,7 +119,9 @@ WebDAV or S3, that work out of the box.) - Dropbox - Enterprise File Fabric - Fastmail Files +- Files.com - FTP +- Gofile - Google Cloud Storage - Google Drive - Google Photos @@ -158,6 +160,7 @@ WebDAV or S3, that work out of the box.) - pCloud - Petabox - PikPak +- Pixeldrain - premiumize.me - put.io - Proton Drive @@ -653,6 +656,11 @@ which corresponds to `~/go/bin/rclone` by default). go install github.com/rclone/rclone@latest ``` +In some situations, rclone executable size might be too big for deployment +in very restricted environments when all backends with large SDKs are included. +To limit binary size unused backends can be commented out in `backends/all/all.go` +and unused commands in `cmd/all/all.go` before building with `go build` or `make` + ## Ansible installation {#ansible} This can be done with [Stefan Weichinger's ansible @@ -872,12 +880,15 @@ See the following for detailed instructions for * [Digi Storage](https://rclone.org/koofr/#digi-storage) * [Dropbox](https://rclone.org/dropbox/) * [Enterprise File Fabric](https://rclone.org/filefabric/) + * [Files.com](https://rclone.org/filescom/) * [FTP](https://rclone.org/ftp/) + * [Gofile](https://rclone.org/gofile/) * [Google Cloud Storage](https://rclone.org/googlecloudstorage/) * [Google Drive](https://rclone.org/drive/) * [Google Photos](https://rclone.org/googlephotos/) * [Hasher](https://rclone.org/hasher/) - to handle checksums for other remotes * [HDFS](https://rclone.org/hdfs/) + * [Hetzner Storage Box](https://rclone.org/sftp/#hetzner-storage-box) * [HiDrive](https://rclone.org/hidrive/) * [HTTP](https://rclone.org/http/) * [Internet Archive](https://rclone.org/internetarchive/) @@ -895,11 +906,13 @@ See the following for detailed instructions for * [Oracle Object Storage](https://rclone.org/oracleobjectstorage/) * [Pcloud](https://rclone.org/pcloud/) * [PikPak](https://rclone.org/pikpak/) + * [Pixeldrain](https://rclone.org/pixeldrain/) * [premiumize.me](https://rclone.org/premiumizeme/) * [put.io](https://rclone.org/putio/) * [Proton Drive](https://rclone.org/protondrive/) * [QingStor](https://rclone.org/qingstor/) * [Quatrix by Maytech](https://rclone.org/quatrix/) + * [rsync.net](https://rclone.org/sftp/#rsync-net) * [Seafile](https://rclone.org/seafile/) * [SFTP](https://rclone.org/sftp/) * [Sia](https://rclone.org/sia/) @@ -979,10 +992,9 @@ rclone config [flags] -h, --help help for config ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone config create](https://rclone.org/commands/rclone_config_create/) - Create a new remote with name, type and options. @@ -990,6 +1002,7 @@ See the [global flags page](https://rclone.org/flags/) for global options not li * [rclone config disconnect](https://rclone.org/commands/rclone_config_disconnect/) - Disconnects user from remote * [rclone config dump](https://rclone.org/commands/rclone_config_dump/) - Dump the config file as JSON. * [rclone config edit](https://rclone.org/commands/rclone_config_edit/) - Enter an interactive configuration session. +* [rclone config encryption](https://rclone.org/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file * [rclone config file](https://rclone.org/commands/rclone_config_file/) - Show path of configuration file in use. * [rclone config password](https://rclone.org/commands/rclone_config_password/) - Update password in an existing remote. * [rclone config paths](https://rclone.org/commands/rclone_config_paths/) - Show paths used for configuration, cache, temp etc. @@ -1007,7 +1020,6 @@ Copy files from source to dest, skipping identical files. ## Synopsis - Copy the source to the destination. Does not transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. Doesn't delete files from the destination. If you @@ -1085,15 +1097,17 @@ rclone copy source:path dest:path [flags] -h, --help help for copy ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1125,9 +1139,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1135,9 +1149,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1164,18 +1178,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1185,7 +1197,6 @@ Make source and dest identical, modifying destination only. ## Synopsis - Sync the source to the destination, changing the destination only. Doesn't transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. @@ -1294,15 +1305,17 @@ rclone sync source:path dest:path [flags] -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1334,9 +1347,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Sync Options +### Sync Options -Flags just used for `rclone sync`. +Flags used for sync commands ``` --backup-dir string Make backups into hierarchy based in DIR @@ -1353,9 +1366,9 @@ Flags just used for `rclone sync`. --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash") ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1363,9 +1376,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1392,18 +1405,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1413,7 +1424,6 @@ Move files from source to dest. ## Synopsis - Moves the contents of the source directory to the destination directory. Rclone will error if the source and destination overlap and the remote does not support a server-side directory move operation. @@ -1465,15 +1475,17 @@ rclone move source:path dest:path [flags] -h, --help help for move ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1505,9 +1517,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1515,9 +1527,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1544,18 +1556,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1565,7 +1575,6 @@ Remove the files in path. ## Synopsis - Remove the files in path. Unlike [purge](https://rclone.org/commands/rclone_purge/) it obeys include/exclude filters so can be used to selectively delete files. @@ -1605,10 +1614,12 @@ rclone delete remote:path [flags] --rmdirs rmdirs removes empty directories but leaves root intact ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1616,9 +1627,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1645,18 +1656,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1666,7 +1675,6 @@ Remove the path and all of its contents. ## Synopsis - Remove the path and all of its contents. Note that this does not obey include/exclude filters - everything will be removed. Use the [delete](https://rclone.org/commands/rclone_delete/) command if you want to selectively @@ -1687,10 +1695,12 @@ rclone purge remote:path [flags] -h, --help help for purge ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1698,9 +1708,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1718,10 +1726,12 @@ rclone mkdir remote:path [flags] -h, --help help for mkdir ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1729,9 +1739,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1741,7 +1749,6 @@ Remove the empty directory at path. ## Synopsis - This removes empty directory given by path. Will not remove the path if it has any objects in it, not even empty subdirectories. Use command [rmdirs](https://rclone.org/commands/rclone_rmdirs/) (or [delete](https://rclone.org/commands/rclone_delete/) @@ -1760,10 +1767,12 @@ rclone rmdir remote:path [flags] -h, --help help for rmdir ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -1771,9 +1780,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1783,7 +1790,6 @@ Checks the files in the source and destination match. ## Synopsis - Checks the files in the source and destination match. It compares sizes and hashes (MD5 or SHA1) and logs a report of files that don't match. It doesn't alter the source or destination. @@ -1847,18 +1853,20 @@ rclone check source:path dest:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Check Options +### Check Options -Flags used for `rclone check`. +Flags used for check commands ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1885,18 +1893,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1906,7 +1912,6 @@ List the objects in the path with size and path. ## Synopsis - Lists the objects in the source path to standard output in a human readable format with size and path. Recurses by default. @@ -1952,10 +1957,12 @@ rclone ls remote:path [flags] -h, --help help for ls ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -1982,18 +1989,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2003,7 +2008,6 @@ List all directories/containers/buckets in the path. ## Synopsis - Lists the directories in the source path to standard output. Does not recurse by default. Use the `-R` flag to recurse. @@ -2060,10 +2064,12 @@ rclone lsd remote:path [flags] -R, --recursive Recurse into the listing ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -2090,18 +2096,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2111,7 +2115,6 @@ List the objects in path with modification time, size and path. ## Synopsis - Lists the objects in the source path to standard output in a human readable format with modification time, size and path. Recurses by default. @@ -2157,10 +2160,12 @@ rclone lsl remote:path [flags] -h, --help help for lsl ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -2187,18 +2192,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2208,7 +2211,6 @@ Produces an md5sum file for all the objects in the path. ## Synopsis - Produces an md5sum file for all the objects in the path. This is in the same format as the standard md5sum tool produces. @@ -2241,10 +2243,12 @@ rclone md5sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -2271,18 +2275,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2292,7 +2294,6 @@ Produces an sha1sum file for all the objects in the path. ## Synopsis - Produces an sha1sum file for all the objects in the path. This is in the same format as the standard sha1sum tool produces. @@ -2328,10 +2329,12 @@ rclone sha1sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -2358,18 +2361,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2379,7 +2380,6 @@ Prints the total size and number of objects in remote:path. ## Synopsis - Counts objects in the path and calculates the total size. Prints the result to standard output. @@ -2410,10 +2410,12 @@ rclone size remote:path [flags] --json Format output as JSON ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -2440,18 +2442,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2461,7 +2461,6 @@ Show the version number. ## Synopsis - Show the rclone version number, the go version, the build target OS and architecture, the runtime OS and kernel version and bitness, build tags and the type of executable (static or dynamic). @@ -2511,10 +2510,9 @@ rclone version [flags] -h, --help help for version ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2524,7 +2522,6 @@ Clean up the remote if possible. ## Synopsis - Clean up the remote if possible. Empty the trash or delete old file versions. Not supported by all remotes. @@ -2539,10 +2536,12 @@ rclone cleanup remote:path [flags] -h, --help help for cleanup ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -2550,9 +2549,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2562,8 +2559,6 @@ Interactively find duplicate filenames and delete/rename them. ## Synopsis - - By default `dedupe` interactively finds files with duplicate names and offers to delete all but one or rename them to be different. This is known as deduping by name. @@ -2682,10 +2677,12 @@ rclone dedupe [mode] remote:path [flags] -h, --help help for dedupe ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -2693,9 +2690,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2705,8 +2700,7 @@ Get quota information from the remote. ## Synopsis - -`rclone about` prints quota information about a remote to standard +Prints quota information about a remote to standard output. The output is typically used, free, quota and trash contents. E.g. Typical output from `rclone about remote:` is: @@ -2765,10 +2759,9 @@ rclone about remote: [flags] --json Format output as JSON ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2778,7 +2771,6 @@ Remote authorization. ## Synopsis - Remote authorization. Used to authorize a remote or headless rclone from a machine with a browser - use as instructed by rclone config. @@ -2800,10 +2792,9 @@ rclone authorize [flags] --template string The path to a custom Go template for generating HTML responses ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2813,7 +2804,6 @@ Run a backend-specific command. ## Synopsis - This runs a backend-specific command. The commands themselves (except for "help" and "features") are defined by the backends and you should see the backend docs for definitions. @@ -2853,10 +2843,12 @@ rclone backend remote:path [opts] [flags] -o, --option stringArray Option in the form name=value or name ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -2864,9 +2856,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2928,15 +2918,17 @@ rclone bisync remote1:path1 remote2:path2 [flags] --workdir string Use custom working dir - useful for testing. (default: {WORKDIR}) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -2968,9 +2960,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -2978,9 +2970,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -3007,9 +2999,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3019,8 +3009,7 @@ Concatenates any files and sends them to stdout. ## Synopsis - -rclone cat sends any files to standard output. +Sends any files to standard output. You can use it like this to output a single file @@ -3068,10 +3057,12 @@ rclone cat remote:path [flags] --tail int Only print the last N characters ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -3098,18 +3089,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3119,7 +3108,6 @@ Checks the files in the destination against a SUM file. ## Synopsis - Checks that hashsums of destination files match the SUM file. It compares hashes (MD5, SHA1, etc) and logs a report of files which don't match. It doesn't alter the file system. @@ -3176,10 +3164,12 @@ rclone checksum sumfile dst:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -3206,18 +3196,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3227,7 +3215,6 @@ Output completion script for a given shell. ## Synopsis - Generates a shell completion script for rclone. Run with `--help` to list the supported shells. @@ -3238,10 +3225,9 @@ Run with `--help` to list the supported shells. -h, --help help for completion ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone completion bash](https://rclone.org/commands/rclone_completion_bash/) - Output bash completion script for rclone. @@ -3255,12 +3241,11 @@ Output bash completion script for rclone. ## Synopsis - Generates a bash shell autocompletion script for rclone. By default, when run without any arguments, - rclone genautocomplete bash + rclone completion bash the generated script will be written to @@ -3295,10 +3280,9 @@ rclone completion bash [output_file] [flags] -h, --help help for bash ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -3308,13 +3292,12 @@ Output fish completion script for rclone. ## Synopsis - Generates a fish autocompletion script for rclone. This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete fish + sudo rclone completion fish Logout and login again to use the autocompletion scripts, or source them directly @@ -3337,10 +3320,9 @@ rclone completion fish [output_file] [flags] -h, --help help for fish ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -3350,7 +3332,6 @@ Output powershell completion script for rclone. ## Synopsis - Generate the autocompletion script for powershell. To load completions in your current shell session: @@ -3373,10 +3354,9 @@ rclone completion powershell [output_file] [flags] -h, --help help for powershell ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -3386,13 +3366,12 @@ Output zsh completion script for rclone. ## Synopsis - Generates a zsh autocompletion script for rclone. This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete zsh + sudo rclone completion zsh Logout and login again to use the autocompletion scripts, or source them directly @@ -3415,10 +3394,9 @@ rclone completion zsh [output_file] [flags] -h, --help help for zsh ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -3428,7 +3406,6 @@ Create a new remote with name, type and options. ## Synopsis - Create a new remote of `name` with `type` and options. The options should be passed in pairs of `key` `value` or as `key=value`. @@ -3548,10 +3525,9 @@ rclone config create name type [key value]* [flags] --state string State - use with --continue ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3569,10 +3545,9 @@ rclone config delete name [flags] -h, --help help for delete ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3582,7 +3557,6 @@ Disconnects user from remote ## Synopsis - This disconnects the remote: passed in to the cloud storage system. This normally means revoking the oauth token. @@ -3600,10 +3574,9 @@ rclone config disconnect remote: [flags] -h, --help help for disconnect ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3621,10 +3594,9 @@ rclone config dump [flags] -h, --help help for dump ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3649,12 +3621,144 @@ rclone config edit [flags] -h, --help help for edit ``` +See the [global flags page](https://rclone.org/flags/) for global options not listed here. + +## See Also + +* [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. + +# rclone config encryption + +set, remove and check the encryption for the config file + +## Synopsis + +This command sets, clears and checks the encryption for the config file using +the subcommands below. + + +## Options + +``` + -h, --help help for encryption +``` See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. +* [rclone config encryption check](https://rclone.org/commands/rclone_config_encryption_check/) - Check that the config file is encrypted +* [rclone config encryption remove](https://rclone.org/commands/rclone_config_encryption_remove/) - Remove the config file encryption password +* [rclone config encryption set](https://rclone.org/commands/rclone_config_encryption_set/) - Set or change the config file encryption password + +# rclone config encryption check + +Check that the config file is encrypted + +## Synopsis + +This checks the config file is encrypted and that you can decrypt it. + +It will attempt to decrypt the config using the password you supply. + +If decryption fails it will return a non-zero exit code if using +`--password-command`, otherwise it will prompt again for the password. + +If the config file is not encrypted it will return a non zero exit code. + + +``` +rclone config encryption check [flags] +``` + +## Options + +``` + -h, --help help for check +``` + +See the [global flags page](https://rclone.org/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](https://rclone.org/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file + +# rclone config encryption remove + +Remove the config file encryption password + +## Synopsis + +Remove the config file encryption password + +This removes the config file encryption, returning it to un-encrypted. + +If `--password-command` is in use, this will be called to supply the old config +password. + +If the config was not encrypted then no error will be returned and +this command will do nothing. + + +``` +rclone config encryption remove [flags] +``` + +## Options + +``` + -h, --help help for remove +``` + +See the [global flags page](https://rclone.org/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](https://rclone.org/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file + +# rclone config encryption set + +Set or change the config file encryption password + +## Synopsis + +This command sets or changes the config file encryption password. + +If there was no config password set then it sets a new one, otherwise +it changes the existing config password. + +Note that if you are changing an encryption password using +`--password-command` then this will be called once to decrypt the +config using the old password and then again to read the new +password to re-encrypt the config. + +When `--password-command` is called to change the password then the +environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if +changing passwords programatically you can use the environment +variable to distinguish which password you must supply. + +Alternatively you can remove the password first (with `rclone config +encryption remove`), then set it again with this command which may be +easier if you don't mind the unecrypted config file being on the disk +briefly. + + +``` +rclone config encryption set [flags] +``` + +## Options + +``` + -h, --help help for set +``` + +See the [global flags page](https://rclone.org/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](https://rclone.org/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file # rclone config file @@ -3670,10 +3774,9 @@ rclone config file [flags] -h, --help help for file ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3683,7 +3786,6 @@ Update password in an existing remote. ## Synopsis - Update an existing remote's password. The password should be passed in pairs of `key` `password` or as `key=password`. The `password` should be passed in in clear (unobscured). @@ -3707,10 +3809,9 @@ rclone config password name [key value]+ [flags] -h, --help help for password ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3728,10 +3829,9 @@ rclone config paths [flags] -h, --help help for paths ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3749,10 +3849,9 @@ rclone config providers [flags] -h, --help help for providers ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3762,7 +3861,6 @@ Re-authenticates user with remote. ## Synopsis - This reconnects remote: passed in to the cloud storage system. To disconnect the remote use "rclone config disconnect". @@ -3780,10 +3878,9 @@ rclone config reconnect remote: [flags] -h, --help help for reconnect ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3815,10 +3912,9 @@ rclone config redacted [] [flags] -h, --help help for redacted ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3836,10 +3932,9 @@ rclone config show [] [flags] -h, --help help for show ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3857,10 +3952,9 @@ rclone config touch [flags] -h, --help help for touch ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -3870,7 +3964,6 @@ Update options in an existing remote. ## Synopsis - Update an existing remote's options. The options should be passed in pairs of `key` `value` or as `key=value`. @@ -3990,10 +4083,9 @@ rclone config update name [key value]+ [flags] --state string State - use with --continue ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4003,7 +4095,6 @@ Prints info about logged in user of remote. ## Synopsis - This prints the details of the person logged in to the cloud storage system. @@ -4019,10 +4110,9 @@ rclone config userinfo remote: [flags] --json Format output as JSON ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4032,7 +4122,6 @@ Copy files from source to dest, skipping identical files. ## Synopsis - If source:path is a file or directory then it copies it to a file or directory named dest:path. @@ -4072,15 +4161,17 @@ rclone copyto source:path dest:path [flags] -h, --help help for copyto ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -4112,9 +4203,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -4122,9 +4213,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -4151,18 +4242,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4172,7 +4261,6 @@ Copy the contents of the URL supplied content to dest:path. ## Synopsis - Download a URL's content and copy it to the destination without saving it in temporary storage. @@ -4218,10 +4306,12 @@ rclone copyurl https://example.com dest:path [flags] --stdout Write the output to stdout rather than a file ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -4229,9 +4319,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4241,10 +4329,9 @@ Cryptcheck checks the integrity of an encrypted remote. ## Synopsis - -rclone cryptcheck checks a remote against a [crypted](https://rclone.org/crypt/) remote. -This is the equivalent of running rclone [check](https://rclone.org/commands/rclone_check/), -but able to check the checksums of the encrypted remote. +Checks a remote against a [crypted](https://rclone.org/crypt/) remote. This is the equivalent +of running rclone [check](https://rclone.org/commands/rclone_check/), but able to check the +checksums of the encrypted remote. For it to work the underlying remote of the cryptedremote must support some kind of checksum. @@ -4307,18 +4394,20 @@ rclone cryptcheck remote:path cryptedremote:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Check Options +### Check Options -Flags used for `rclone check`. +Flags used for check commands ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -4345,18 +4434,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4366,9 +4453,8 @@ Cryptdecode returns unencrypted file names. ## Synopsis - -rclone cryptdecode returns unencrypted file names when provided with -a list of encrypted file names. List limit is 10 items. +Returns unencrypted file names when provided with a list of encrypted file +names. List limit is 10 items. If you supply the `--reverse` flag, it will return encrypted file names. @@ -4393,10 +4479,9 @@ rclone cryptdecode encryptedremote: encryptedfilename [flags] --reverse Reverse cryptdecode, encrypts filenames ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4406,7 +4491,6 @@ Remove a single file from remote. ## Synopsis - Remove a single file from remote. Unlike `delete` it cannot be used to remove a directory and it doesn't obey include/exclude filters - if the specified file exists, it will always be removed. @@ -4422,10 +4506,12 @@ rclone deletefile remote:path [flags] -h, --help help for deletefile ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -4433,168 +4519,16 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. -# rclone genautocomplete - -Output completion script for a given shell. - -# Synopsis - - -Generates a shell completion script for rclone. -Run with `--help` to list the supported shells. - - -# Options - -``` - -h, --help help for genautocomplete -``` - -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO - -* [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. -* [rclone genautocomplete bash](https://rclone.org/commands/rclone_genautocomplete_bash/) - Output bash completion script for rclone. -* [rclone genautocomplete fish](https://rclone.org/commands/rclone_genautocomplete_fish/) - Output fish completion script for rclone. -* [rclone genautocomplete zsh](https://rclone.org/commands/rclone_genautocomplete_zsh/) - Output zsh completion script for rclone. - -# rclone genautocomplete bash - -Output bash completion script for rclone. - -# Synopsis - - -Generates a bash shell autocompletion script for rclone. - -This writes to /etc/bash_completion.d/rclone by default so will -probably need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete bash - -Logout and login again to use the autocompletion scripts, or source -them directly - - . /etc/bash_completion - -If you supply a command line argument the script will be written -there. - -If output_file is "-", then the output will be written to stdout. - - -``` -rclone genautocomplete bash [output_file] [flags] -``` - -# Options - -``` - -h, --help help for bash -``` - -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO - -* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell. - -# rclone genautocomplete fish - -Output fish completion script for rclone. - -# Synopsis - - -Generates a fish autocompletion script for rclone. - -This writes to /etc/fish/completions/rclone.fish by default so will -probably need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete fish - -Logout and login again to use the autocompletion scripts, or source -them directly - - . /etc/fish/completions/rclone.fish - -If you supply a command line argument the script will be written -there. - -If output_file is "-", then the output will be written to stdout. - - -``` -rclone genautocomplete fish [output_file] [flags] -``` - -# Options - -``` - -h, --help help for fish -``` - -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO - -* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell. - -# rclone genautocomplete zsh - -Output zsh completion script for rclone. - -# Synopsis - - -Generates a zsh autocompletion script for rclone. - -This writes to /usr/share/zsh/vendor-completions/_rclone by default so will -probably need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete zsh - -Logout and login again to use the autocompletion scripts, or source -them directly - - autoload -U compinit && compinit - -If you supply a command line argument the script will be written -there. - -If output_file is "-", then the output will be written to stdout. - - -``` -rclone genautocomplete zsh [output_file] [flags] -``` - -# Options - -``` - -h, --help help for zsh -``` - -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO - -* [rclone genautocomplete](https://rclone.org/commands/rclone_genautocomplete/) - Output completion script for a given shell. - # rclone gendocs Output markdown docs for rclone to the directory supplied. ## Synopsis - This produces markdown docs for the rclone commands to the directory supplied. These are in a format suitable for hugo to render into the rclone.org website. @@ -4609,10 +4543,9 @@ rclone gendocs output_directory [flags] -h, --help help for gendocs ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4703,10 +4636,9 @@ rclone gitannex [flags] -h, --help help for gitannex ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4716,7 +4648,6 @@ Produces a hashsum file for all the objects in the path. ## Synopsis - Produces a hash file for all the objects in the path using the hash named. The output is in the same format as the standard md5sum/sha1sum tool. @@ -4765,10 +4696,12 @@ rclone hashsum [ remote:path] [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -4795,18 +4728,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4816,8 +4747,7 @@ Generate public link to file/folder. ## Synopsis -rclone link will create, retrieve or remove a public link to the given -file or folder. +Create, retrieve or remove a public link to the given file or folder. rclone link remote:path/to/file rclone link remote:path/to/folder/ @@ -4851,10 +4781,9 @@ rclone link remote:path [flags] --unlink Remove existing public link to file/folder ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4865,26 +4794,40 @@ List all the remotes in the config file and defined in environment variables. ## Synopsis -rclone listremotes lists all the available remotes from the config file. +Lists all the available remotes from the config file, or the remotes matching +an optional filter. -When used with the `--long` flag it lists the types and the descriptions too. +Prints the result in human-readable format by default, and as a simple list of +remote names, or if used with flag `--long` a tabular format including +the remote names, types and descriptions. Using flag `--json` produces +machine-readable output instead, which always includes all attributes - including +the source (file or environment). + +Result can be filtered by a filter argument which applies to all attributes, +and/or filter flags specific for each attribute. The values must be specified +according to regular rclone filtering pattern syntax. ``` -rclone listremotes [flags] +rclone listremotes [] [flags] ``` ## Options ``` - -h, --help help for listremotes - --long Show the type and the description as well as names + --description string Filter remotes by description + -h, --help help for listremotes + --json Format output as JSON + --long Show type and description in addition to name + --name string Filter remotes by name + --order-by string Instructions on how to order the result, e.g. 'type,name=descending' + --source string Filter remotes by source, e.g. 'file' or 'environment' + --type string Filter remotes by type ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4894,7 +4837,6 @@ List directories and objects in remote:path formatted for parsing. ## Synopsis - List the contents of the source path (directories and objects) to standard output in a form which is easy to parse by scripts. By default this will just be the names of the objects and directories, @@ -5047,10 +4989,12 @@ rclone lsf remote:path [flags] -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -5077,18 +5021,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5100,7 +5042,7 @@ List directories and objects in the path in JSON format. List directories and objects in the path in JSON format. -The output is an array of Items, where each Item looks like this +The output is an array of Items, where each Item looks like this: { "Hashes" : { @@ -5122,44 +5064,50 @@ The output is an array of Items, where each Item looks like this "Tier" : "hot", } -If `--hash` is not specified, the Hashes property will be omitted. The -types of hash can be specified with the `--hash-type` parameter (which -may be repeated). If `--hash-type` is set then it implies `--hash`. +The exact set of properties included depends on the backend: -If `--no-modtime` is specified then ModTime will be blank. This can -speed things up on remotes where reading the ModTime takes an extra -request (e.g. s3, swift). +- The property IsBucket will only be included for bucket-based remotes, and only + for directories that are buckets. It will always be omitted when value is not true. +- Properties Encrypted and EncryptedPath will only be included for encrypted + remotes, and (as mentioned below) only if the `--encrypted` option is set. -If `--no-mimetype` is specified then MimeType will be blank. This can -speed things up on remotes where reading the MimeType takes an extra -request (e.g. s3, swift). +Different options may also affect which properties are included: -If `--encrypted` is not specified the Encrypted will be omitted. +- If `--hash` is not specified, the Hashes property will be omitted. The + types of hash can be specified with the `--hash-type` parameter (which + may be repeated). If `--hash-type` is set then it implies `--hash`. +- If `--no-modtime` is specified then ModTime will be blank. This can + speed things up on remotes where reading the ModTime takes an extra + request (e.g. s3, swift). +- If `--no-mimetype` is specified then MimeType will be blank. This can + speed things up on remotes where reading the MimeType takes an extra + request (e.g. s3, swift). +- If `--encrypted` is not specified the Encrypted and EncryptedPath + properties will be omitted - even for encrypted remotes. +- If `--metadata` is set then an additional Metadata property will be + returned. This will have [metadata](https://rclone.org/docs/#metadata) in rclone standard format + as a JSON object. -If `--dirs-only` is not specified files in addition to directories are -returned +The default is to list directories and files/objects, but this can be changed +with the following options: -If `--files-only` is not specified directories in addition to the files -will be returned. +- If `--dirs-only` is specified then directories will be returned + only, no files/objects. +- If `--files-only` is specified then files will be returned only, + no directories. -If `--metadata` is set then an additional Metadata key will be returned. -This will have metadata in rclone standard format as a JSON object. - -if `--stat` is set then a single JSON blob will be returned about the -item pointed to. This will return an error if the item isn't found. -However on bucket based backends (like s3, gcs, b2, azureblob etc) if -the item isn't found it will return an empty directory as it isn't -possible to tell empty directories from missing directories there. +If `--stat` is set then the the output is not an array of items, +but instead a single JSON blob will be returned about the item pointed to. +This will return an error if the item isn't found, however on bucket based +backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will +return an empty directory, as it isn't possible to tell empty directories +from missing directories there. The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without `--recursive` the Path will always be the same as Name. -If the directory is a bucket in a bucket-based backend, then -"IsBucket" will be set to true. This key won't be present unless it is -"true". - The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the @@ -5169,7 +5117,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it -can be processed line by line as each item is written one to a line. +can be processed line by line as each item is written on individual lines +(except with `--stat`). Any of the filtering options can be applied to this command. @@ -5215,10 +5164,12 @@ rclone lsjson remote:path [flags] --stat Just return the info for the pointed to file ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -5245,18 +5196,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5960,6 +5909,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -5975,6 +5929,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -6102,9 +6080,9 @@ rclone mount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for mount @@ -6120,7 +6098,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -6133,6 +6111,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -6142,10 +6121,12 @@ rclone mount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -6172,9 +6153,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -6184,7 +6163,6 @@ Move file or directory from source to dest. ## Synopsis - If source:path is a file or directory then it moves it to a file or directory named dest:path. @@ -6227,15 +6205,17 @@ rclone moveto source:path dest:path [flags] -h, --help help for moveto ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -6267,9 +6247,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -6277,9 +6257,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -6306,18 +6286,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -6327,7 +6305,6 @@ Explore a remote with a text based user interface. ## Synopsis - This displays a text based user interface allowing the navigation of a remote. It is most useful for answering the question - "What is using all my disk space?". @@ -6380,7 +6357,7 @@ These flags have the following meaning: This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for rclone remotes. It is missing lots of features at the moment -but is useful as it stands. +but is useful as it stands. Unlike ncdu it does not show excluded files. Note that it might take some time to delete big files/directories. The UI won't respond in the meantime since the deletion is done synchronously. @@ -6400,10 +6377,12 @@ rclone ncdu remote:path [flags] -h, --help help for ncdu ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -6430,18 +6409,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7145,6 +7122,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -7160,6 +7142,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -7288,16 +7294,18 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfsmount --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki) --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset) --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only) + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files @@ -7306,9 +7314,9 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] -o, --option stringArray Option for libfuse/WinFsp (repeat if required) --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --sudo Use sudo to run the mount command as root. + --sudo Use sudo to run the mount/umount commands as root. --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -7321,6 +7329,7 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -7330,10 +7339,12 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -7360,9 +7371,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7406,10 +7415,9 @@ rclone obscure password [flags] -h, --help help for obscure ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7419,8 +7427,6 @@ Run a command against a running rclone. ## Synopsis - - This runs a command against a running rclone. Use the `--url` flag to specify an non default URL to connect on. This can be either a ":port" which is taken to mean "http://localhost:port" or a @@ -7431,6 +7437,13 @@ A username and password can be passed in with `--user` and `--pass`. Note that `--rc-addr`, `--rc-user`, `--rc-pass` will be read also for `--url`, `--user`, `--pass`. +The `--unix-socket` flag can be used to connect over a unix socket like this + + # start server on /tmp/my.socket + rclone rcd --rc-addr unix:///tmp/my.socket + # Connect to it + rclone rc --unix-socket /tmp/my.socket core/stats + Arguments should be passed in as parameter=value. The result will be returned as a JSON object by default. @@ -7477,21 +7490,21 @@ rclone rc commands parameter [flags] ## Options ``` - -a, --arg stringArray Argument placed in the "arg" array - -h, --help help for rc - --json string Input JSON - use instead of key=value args - --loopback If set connect to this rclone instance not via HTTP - --no-output If set, don't output the JSON result - -o, --opt stringArray Option in the form name=value or name placed in the "opt" array - --pass string Password to use to connect to rclone remote control - --url string URL to connect to rclone remote control (default "http://localhost:5572/") - --user string Username to use to rclone remote control + -a, --arg stringArray Argument placed in the "arg" array + -h, --help help for rc + --json string Input JSON - use instead of key=value args + --loopback If set connect to this rclone instance not via HTTP + --no-output If set, don't output the JSON result + -o, --opt stringArray Option in the form name=value or name placed in the "opt" array + --pass string Password to use to connect to rclone remote control + --unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --url string URL to connect to rclone remote control (default "http://localhost:5572/") + --user string Username to use to rclone remote control ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7501,9 +7514,7 @@ Copies standard input to file on remote. ## Synopsis - -rclone rcat reads from standard input (stdin) and copies it to a -single remote file. +Reads from standard input (stdin) and copies it to a single remote file. echo "hello world" | rclone rcat remote:path/to/file ffmpeg - | rclone rcat remote:path/to/file @@ -7544,10 +7555,12 @@ rclone rcat remote:path [flags] --size int File size hint to preallocate (default -1) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -7555,9 +7568,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7592,6 +7603,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--rc-addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--rc-server-read-timeout` and `--rc-server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -7624,6 +7636,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --rc-addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--rc-template` allows a user to specify a custom markup template for HTTP @@ -7699,19 +7725,21 @@ rclone rcd * [flags] -h, --help help for rcd ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## RC Options +### RC Options -Flags to control the Remote Control API. +Flags to control the Remote Control API ``` --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -7736,9 +7764,7 @@ Flags to control the Remote Control API. --rc-web-gui-update Check and update to latest version of web gui ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7748,7 +7774,6 @@ Remove empty directories under the path. ## Synopsis - This recursively removes any empty directories (including directories that only contain empty directories), that it finds under the path. The root path itself will also be removed if it is empty, unless @@ -7780,10 +7805,12 @@ rclone rmdirs remote:path [flags] --leave-root Do not remove root directory if empty ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -7791,9 +7818,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7868,10 +7893,9 @@ rclone selfupdate [flags] --version string Install the given rclone version (default: latest) ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7899,10 +7923,9 @@ rclone serve [opts] [flags] -h, --help help for serve ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone serve dlna](https://rclone.org/commands/rclone_serve_dlna/) - Serve remote:path over DLNA @@ -7932,6 +7955,10 @@ based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly. +Rclone will add external subtitle files (.srt) to videos if they have the same +filename as the video file itself (except the extension), either in the same +directory as the video, or in a "Subs" subdirectory. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -8176,6 +8203,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -8191,6 +8223,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -8309,8 +8365,8 @@ rclone serve dlna remote:path [flags] --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") --announce-interval Duration The interval between SSDP announcements (default 12m0s) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for dlna --interface stringArray The interface to use for SSDP (repeat as necessary) @@ -8322,7 +8378,7 @@ rclone serve dlna remote:path [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -8335,6 +8391,7 @@ rclone serve dlna remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -8342,10 +8399,12 @@ rclone serve dlna remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -8372,9 +8431,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -8655,6 +8712,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -8670,6 +8732,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -8798,9 +8884,9 @@ rclone serve docker [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --forget-state Skip restoring previous state --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -8820,7 +8906,7 @@ rclone serve docker [flags] --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -8833,6 +8919,7 @@ rclone serve docker [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -8842,10 +8929,12 @@ rclone serve docker [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -8872,9 +8961,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -9136,6 +9223,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -9151,6 +9243,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -9351,8 +9467,8 @@ rclone serve ftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for ftp --key string TLS PEM Private key @@ -9365,7 +9481,7 @@ rclone serve ftp remote:path [flags] --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication (default "anonymous") --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -9379,6 +9495,7 @@ rclone serve ftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -9386,10 +9503,12 @@ rclone serve ftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -9416,9 +9535,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -9455,6 +9572,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -9487,6 +9605,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--template` allows a user to specify a custom markup template for HTTP @@ -9782,6 +9914,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -9797,6 +9934,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -9993,15 +10154,15 @@ rclone serve http remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done @@ -10020,7 +10181,7 @@ rclone serve http remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -10034,6 +10195,7 @@ rclone serve http remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -10041,10 +10203,12 @@ rclone serve http remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -10071,9 +10235,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -10085,34 +10247,69 @@ Serve the remote as an NFS mount Create an NFS server that serves the given remote over the network. -The primary purpose for this command is to enable [mount command](https://rclone.org/commands/rclone_mount/) on recent macOS versions where -installing FUSE is very cumbersome. +This implements an NFSv3 server to serve any rclone remote via NFS. -Since this is running on NFSv3, no authentication method is available. Any client -will be able to access the data. To limit access, you can use serve NFS on loopback address -and rely on secure tunnels (such as SSH). For this reason, by default, a random TCP port is chosen and loopback interface is used for the listening address; -meaning that it is only available to the local machine. If you want other machines to access the -NFS mount over local network, you need to specify the listening address and port using `--addr` flag. +The primary purpose for this command is to enable the [mount +command](https://rclone.org/commands/rclone_mount/) on recent macOS versions where +installing FUSE is very cumbersome. -Modifying files through NFS protocol requires VFS caching. Usually you will need to specify `--vfs-cache-mode` -in order to be able to write to the mountpoint (full is recommended). If you don't specify VFS cache mode, -the mount will be read-only. Note also that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the caching handler. -This should not be set too low or you may experience errors when trying to access files. The default is `1000000`, but consider lowering this limit if -the server's system resource usage causes problems. +This server does not implement any authentication so any client will be +able to access the data. To limit access, you can use `serve nfs` on +the loopback address or rely on secure tunnels (such as SSH) or use +firewalling. + +For this reason, by default, a random TCP port is chosen and the +loopback interface is used for the listening address by default; +meaning that it is only available to the local machine. If you want +other machines to access the NFS mount over local network, you need to +specify the listening address and port using the `--addr` flag. + +Modifying files through the NFS protocol requires VFS caching. Usually +you will need to specify `--vfs-cache-mode` in order to be able to +write to the mountpoint (`full` is recommended). If you don't specify +VFS cache mode, the mount will be read-only. + +`--nfs-cache-type` controls the type of the NFS handle cache. By +default this is `memory` where new handles will be randomly allocated +when needed. These are stored in memory. If the server is restarted +the handle cache will be lost and connected NFS clients will get stale +handle errors. + +`--nfs-cache-type disk` uses an on disk NFS handle cache. Rclone +hashes the path of the object and stores it in a file named after the +hash. These hashes are stored on disk the directory controlled by +`--cache-dir` or the exact directory may be specified with +`--nfs-cache-dir`. Using this means that the NFS server can be +restarted at will without affecting the connected clients. + +`--nfs-cache-type symlink` is similar to `--nfs-cache-type disk` in +that it uses an on disk cache, but the cache entries are held as +symlinks. Rclone will use the handle of the underlying file as the NFS +handle which improves performance. This sort of cache can't be backed +up and restored as the underlying handles will change. This is Linux +only. + +`--nfs-cache-handle-limit` controls the maximum number of cached NFS +handles stored by the caching handler. This should not be set too low +or you may experience errors when trying to access files. The default +is `1000000`, but consider lowering this limit if the server's system +resource usage causes problems. This is only used by the `memory` type +cache. To serve NFS over the network use following command: rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full -We specify a specific port that we can use in the mount command: - -To mount the server under Linux/macOS, use the following command: +This specifies a port that can be used in the mount command. To mount +the server under Linux/macOS, use the following command: - mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint + mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint -Where `$PORT` is the same port number we used in the serve nfs command. +Where `$PORT` is the same port number used in the `serve nfs` command +and `$HOSTNAME` is the network address of the machine that `serve nfs` +was run on. -This feature is only available on Unix platforms. +This command is only available on Unix platforms. ## VFS - Virtual File System @@ -10346,6 +10543,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -10361,6 +10563,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -10478,18 +10704,20 @@ rclone serve nfs remote:path [flags] ``` --addr string IPaddress:Port or :Port to bind server to --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfs + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -10502,6 +10730,7 @@ rclone serve nfs remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -10509,10 +10738,12 @@ rclone serve nfs remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -10539,9 +10770,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -10648,6 +10877,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -10680,6 +10910,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Authentication By default this will serve files without needing a login. @@ -10716,7 +10960,7 @@ rclone serve restic remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root @@ -10738,10 +10982,9 @@ rclone serve restic remote:path [flags] --user string User name for authentication ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -10887,6 +11130,33 @@ metadata which will be set as the modification time of the file. Other operations will return error `Unimplemented`. +### Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or +set a single username and password with the `--user` and `--pass` flags. + +If no static users are configured by either of the above methods, and client +certificates are required by the `--client-ca` flag passed to the server, the +client certificate common name will be considered as the username. + +Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is +in standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use `--realm` to set the authentication realm. + +Use `--salt` to change the password hashing salt from the default. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -10902,6 +11172,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -10934,6 +11205,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ## VFS - Virtual File System This command uses the VFS layer. This adapts the cloud storage objects @@ -11166,6 +11451,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -11181,6 +11471,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -11296,19 +11610,21 @@ rclone serve s3 remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key + --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5") - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --force-path-style If true use path style access if false use virtual hosted style (default true) (default true) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for s3 + --htpasswd string A htpasswd file - if not provided no authentication is done --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") @@ -11316,12 +11632,16 @@ rclone serve s3 remote:path [flags] --no-cleanup Not to cleanup empty folder after object is deleted --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files + --pass string Password for authentication --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) + --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -11334,6 +11654,7 @@ rclone serve s3 remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -11341,10 +11662,12 @@ rclone serve s3 remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -11371,9 +11694,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -11416,6 +11737,17 @@ directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply `--addr :2022` for example. +This also supports being run with socket activation, in which case it will +listen on the first passed FD. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command: + + systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ + +This will socket-activate rclone on the first connection to port 2222 over TCP. + Note that the default of `--vfs-cache-mode off` is fine for the rclone sftp backend, but it may not be with other SFTP clients. @@ -11667,6 +11999,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -11682,6 +12019,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -11882,8 +12243,8 @@ rclone serve sftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for sftp --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate) @@ -11896,7 +12257,7 @@ rclone serve sftp remote:path [flags] --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -11910,6 +12271,7 @@ rclone serve sftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -11917,10 +12279,12 @@ rclone serve sftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -11947,9 +12311,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -12001,6 +12363,19 @@ Create a new DWORD BasicAuthLevel with value 2. https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint +## Serving over a unix socket + +You can serve the webdav on a unix socket like this: + + rclone serve webdav --addr unix:///tmp/my.socket remote:path + +and connect to it like this using rclone and the webdav backend: + + rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav: + +Note that there is no authentication on http protocol - this is expected to be +done by the permissions on the socket. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -12016,6 +12391,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -12048,6 +12424,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--template` allows a user to specify a custom markup template for HTTP @@ -12343,6 +12733,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -12358,6 +12753,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -12554,17 +12973,17 @@ rclone serve webdav remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav --htpasswd string A htpasswd file - if not provided no authentication is done @@ -12583,7 +13002,7 @@ rclone serve webdav remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -12597,6 +13016,7 @@ rclone serve webdav remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -12604,10 +13024,12 @@ rclone serve webdav remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -12634,9 +13056,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -12646,10 +13066,9 @@ Changes storage class/tier of objects in remote. ## Synopsis - -rclone settier changes storage tier or class at remote if supported. -Few cloud storage services provides different storage classes on objects, -for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, +Changes storage tier or class at remote if supported. Few cloud storage +services provides different storage classes on objects, for example +AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline etc. Note that, certain tier changes make objects not available to access immediately. @@ -12680,10 +13099,9 @@ rclone settier tier remote:path [flags] -h, --help help for settier ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -12711,10 +13129,9 @@ so reading their documentation first is recommended. -h, --help help for test ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone test changenotify](https://rclone.org/commands/rclone_test_changenotify/) - Log any change notify requests for the remote passed in. @@ -12739,10 +13156,9 @@ rclone test changenotify remote: [flags] --poll-interval Duration Time to wait between polling for changes (default 10s) ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12769,10 +13185,9 @@ rclone test histogram [remote:path] [flags] -h, --help help for histogram ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12782,10 +13197,10 @@ Discovers file name or other limitations for paths. ## Synopsis -rclone info discovers what filenames and upload methods are possible -to write to the paths passed in and how long they can be. It can take some -time. It will write test files into the remote:path passed in. It outputs -a bit of go code for each one. +Discovers what filenames and upload methods are possible to write to the +paths passed in and how long they can be. It can take some time. It will +write test files into the remote:path passed in. It outputs a bit of go +code for each one. **NB** this can create undeletable files and other hazards - use with care @@ -12809,10 +13224,9 @@ rclone test info [remote:path]+ [flags] --write-json string Write results to file ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12836,10 +13250,9 @@ rclone test makefile []+ [flags] --zero Fill files with ASCII 0x00 ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12870,10 +13283,9 @@ rclone test makefiles [flags] --zero Fill files with ASCII 0x00 ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12891,10 +13303,9 @@ rclone test memory remote:path [flags] -h, --help help for memory ``` - See the [global flags page](https://rclone.org/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](https://rclone.org/commands/rclone_test/) - Run a test command @@ -12904,7 +13315,6 @@ Create new file or change file modification time. ## Synopsis - Set the modification time on file(s) as specified by remote:path to have the current time. @@ -12940,10 +13350,12 @@ rclone touch remote:path [flags] -t, --timestamp string Use specified time instead of the current time of day ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -12951,9 +13363,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -12980,18 +13392,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -13001,9 +13411,7 @@ List the contents of the remote in a tree like fashion. ## Synopsis - -rclone tree lists the contents of a remote in a similar way to the -unix tree command. +Lists the contents of a remote in a similar way to the unix tree command. For example @@ -13058,10 +13466,12 @@ rclone tree remote:path [flags] --version Sort files alphanumerically by version ``` +Options shared with other commands are described next. +See the [global flags page](https://rclone.org/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -13088,18 +13498,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](https://rclone.org/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -13664,12 +14072,20 @@ for upload:download, e.g.`10M:1M`. characters. It is optional. - `HH:MM` is an hour from 00:00 to 23:59. +Entries can be separated by spaces or semicolons. + +**Note:** Semicolons can be used as separators instead of spaces to avoid parsing issues in environments like Docker. + An example of a typical timetable to avoid link saturation during daytime working hours could be: +Using spaces as separators: `--bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off"` -In this example, the transfer bandwidth will be set to 512 KiB/s +Using semicolons as separators: +`--bwlimit "08:00,512k;12:00,10M;13:00,512k;18:00,30M;23:00,off"` + +In these examples, the transfer bandwidth will be set to 512 KiB/s at 8am every day. At noon, it will rise to 10 MiB/s, and drop back to 512 KiB/sec at 1pm. At 6pm, the bandwidth limit will be set to 30 MiB/s, and at 11pm it will be completely disabled (full speed). @@ -13677,8 +14093,12 @@ Anything between 11pm and 8am will remain unlimited. An example of timetable with `WEEKDAY` could be: +Using spaces as separators: `--bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off"` +Using semicolons as separators: +`--bwlimit "Mon-00:00,512;Fri-23:59,10M;Sat-10:00,1M;Sun-20:00,off"` + It means that, the transfer bandwidth will be set to 512 KiB/s on Monday. It will rise to 10 MiB/s before the end of Friday. At 10:00 on Saturday it will be set to 1 MiB/s. From 20:00 on Sunday it will @@ -14270,11 +14690,12 @@ flag set) such as: - local - ftp - sftp +- pcloud Without `--inplace` (the default) rclone will first upload to a temporary file with an extension like this, where `XXXXXX` represents a -random string and `.partial` is [--partial-suffix](#partial-suffix) value -(`.partial` by default). +hash of the source file's fingerprint and `.partial` is +[--partial-suffix](#partial-suffix) value (`.partial` by default). original-file-name.XXXXXX.partial @@ -14846,11 +15267,12 @@ Suffix length limit is 16 characters. The default is `.partial`. -### --password-command SpaceSepList ### +### --password-command SpaceSepList {#password-command} This flag supplies a program which should supply the config password when run. This is an alternative to rclone prompting for the password -or setting the `RCLONE_CONFIG_PASS` variable. +or setting the `RCLONE_CONFIG_PASS` variable. It is also used when +setting the config password for the first time. The argument to this should be a command with a space separated list of arguments. If one of the arguments has a space in then enclose it @@ -14864,6 +15286,11 @@ Eg --password-command 'echo "hello with space"' --password-command 'echo "hello with ""quotes"" and space"' +Note that when changing the configuration password the environment +variable `RCLONE_PASSWORD_CHANGE=1` will be set. This can be used to +distinguish initial decryption of the config file from the new +password. + See the [Configuration Encryption](#configuration-encryption) for more info. See a [Windows PowerShell example on the Wiki](https://github.com/rclone/rclone/wiki/Windows-Powershell-use-rclone-password-command-for-Config-file-password). @@ -15467,6 +15894,12 @@ encryption from your configuration. There is no way to recover the configuration if you lose your password. +You can also use + +- [rclone config encryption set](https://rclone.org/commands/rclone_config_encryption_set/) to set the config encryption directly +- [rclone config encryption remove](https://rclone.org/commands/rclone_config_encryption_remove/) to remove it +- [rclone config encryption check](https://rclone.org/commands/rclone_config_encryption_check/) to check that it is encrypted properly. + rclone uses [nacl secretbox](https://godoc.org/golang.org/x/crypto/nacl/secretbox) which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate your configuration with secret-key cryptography. @@ -15499,7 +15932,7 @@ An alternate means of supplying the password is to provide a script which will retrieve the password and print on standard output. This script should have a fully specified path name and not rely on any environment variables. The script is supplied either via -`--password-command="..."` command line argument or via the +[`--password-command="..."`](#password-command) command line argument or via the `RCLONE_PASSWORD_COMMAND` environment variable. One useful example of this is using the `passwordstore` application @@ -15536,12 +15969,63 @@ a configuration file, you can avoid it being loaded by overriding the location, e.g. with one of the documented special values for memory-only configuration. Since only backend options can be stored in configuration files, this is normally unnecessary for commands -that do not operate on backends, e.g. `genautocomplete`. However, +that do not operate on backends, e.g. `completion`. However, it will be relevant for commands that do operate on backends in general, but are used without referencing a stored remote, e.g. listing local filesystem paths, or [connection strings](#connection-strings): `rclone --config="" ls .` +Configuration Encryption Cheatsheet +----------------------------------- +You can quickly apply a configuration encryption without plain-text +at rest or transfer. Detailed instructions for popular OSes: + +### Mac ### + +* Generate and store a password + +`security add-generic-password -a rclone -s config -w $(openssl rand -base64 40)` + +* Add the retrieval instruction to your .zprofile / .profile + +`export RCLONE_PASSWORD_COMMAND="/usr/bin/security find-generic-password -a rclone -s config -w"` + +### Linux ### + +* Prerequisite + +Linux doesn't come with a default password manager. Let's install +the "pass" utility using a package manager, e.g. `apt install pass`, + `yum install pass`, + [etc.](https://www.passwordstore.org/#download); then initialize a + password store: + +`pass init rclone` + +* Generate and store a password + +`echo $(openssl rand -base64 40) | pass insert -m rclone/config` + +* Add the retrieval instruction + +`export RCLONE_PASSWORD_COMMAND="/usr/bin/pass rclone/config"` + +### Windows ### + +* Generate and store a password + +`New-Object -TypeName PSCredential -ArgumentList "rclone", (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path "rclone-credential.xml"` + +* Add the password retrieval instruction + +`[Environment]::SetEnvironmentVariable("RCLONE_PASSWORD_COMMAND", "[System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path "rclone-credential.xml").Password))")` + +### Encrypt the config file (all systems) ### + +* Execute `rclone config` -> `s` + +* Add/update the password from previous steps + Developer options ----------------- @@ -15695,6 +16179,17 @@ Rclone prefixes all log messages with their level in capitals, e.g. INFO which makes it easy to grep the log file for different kinds of information. +Metrics +------- + +Rclone can publish metrics in the OpenMetrics/Prometheus format. + +To enable the metrics endpoint, use the `--metrics-addr` flag. Metrics can also be published on the `--rc-addr` port if the `--rc` flag and `--rc-enable-metrics` flags are supplied or if using rclone rcd `--rc-enable-metrics` + +Rclone provides extensive configuration options for the metrics HTTP endpoint. These settings are grouped under the Metrics section and have a prefix `--metrics-*`. + +When metrics are enabled with `--rc-enable-metrics`, they will be published on the same port as the rc API. In this case, the `--metrics-*` flags will be ignored, and the HTTP endpoint configuration will be managed by the `--rc-*` parameters. + Exit Code --------- @@ -15860,37 +16355,33 @@ two ways of doing it, described below. ## Configuring using rclone authorize ## -On the headless box run `rclone` config but answer `N` to the `Use web browser -to automatically authenticate?` question. +On the headless box run `rclone` config but answer `N` to the `Use auto config?` question. ``` -... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> n + +Option config_token. For this to work, you will need rclone available on a machine that has a web browser available. - For more help and alternate methods see: https://rclone.org/remote_setup/ - Execute the following on the machine with the web browser (same rclone version recommended): - - rclone authorize "dropbox" - -Then paste the result below: -result> + rclone authorize "onedrive" +Then paste the result. +Enter a value. +config_token> ``` Then on your main desktop machine ``` -rclone authorize "dropbox" +rclone authorize "onedrive" If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... @@ -15903,7 +16394,7 @@ SECRET_TOKEN Then back to the headless box, paste in the code ``` -result> SECRET_TOKEN +config_token> SECRET_TOKEN -------------------- [acd12] client_id = @@ -15945,16 +16436,13 @@ Linux and MacOS users can utilize SSH Tunnel to redirect the headless box port 5 ``` ssh -L localhost:53682:localhost:53682 username@remote_server ``` -Then on the headless box run `rclone` config and answer `Y` to the `Use web -browser to automatically authenticate?` question. +Then on the headless box run `rclone config` and answer `Y` to the `Use auto config?` question. ``` -... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> y @@ -16630,7 +17118,7 @@ remote or flag value. The fix then is to quote values containing spaces. ### `--min-size` - Don't transfer any file smaller than this Controls the minimum size file within the scope of an rclone command. -Default units are `KiB` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. +Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are valid. E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 KiB size or larger. @@ -16640,7 +17128,7 @@ See [the size option docs](https://rclone.org/docs/#size-option) for more info. ### `--max-size` - Don't transfer any file larger than this Controls the maximum size file within the scope of an rclone command. -Default units are `KiB` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. +Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are valid. E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GiB size or smaller. @@ -16960,6 +17448,7 @@ Default Off. ### --rc-enable-metrics Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`. +If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the `--metrics-*` flags instead. Default Off. @@ -17260,6 +17749,76 @@ call and taken by the [options/set](#options-set) calls as well as the - `BandwidthSpec` - this will be set and returned as a string, eg "1M". +### Option blocks {#option-blocks} + +The calls [options/info](#options-info) (for the main config) and +[config/providers](#config-providers) (for the backend config) may be +used to get information on the rclone configuration options. This can +be used to build user interfaces for displaying and setting any rclone +option. + +These consist of arrays of `Option` blocks. These have the following +format. Each block describes a single option. + +| Field | Type | Optional | Description | +|-------|------|----------|-------------| +| Name | string | N | name of the option in snake_case | +| FieldName | string | N | name of the field used in the rc - if blank use Name | +| Help | string | N | help, started with a single sentence on a single line | +| Groups | string | Y | groups this option belongs to - comma separated string for options classification | +| Provider | string | Y | set to filter on provider | +| Default | any | N | default value, if set (and not to nil or "") then Required does nothing | +| Value | any | N | value to be set by flags | +| Examples | Examples | Y | predefined values that can be selected from list (multiple-choice option) | +| ShortOpt | string | Y | the short command line option for this | +| Hide | Visibility | N | if non zero, this option is hidden from the configurator or the command line | +| Required | bool | N | this option is required, meaning value cannot be empty unless there is a default | +| IsPassword | bool | N | set if the option is a password | +| NoPrefix | bool | N | set if the option for this should not use the backend prefix | +| Advanced | bool | N | set if this is an advanced config option | +| Exclusive | bool | N | set if the answer can only be one of the examples (empty string allowed unless Required or Default is set) | +| Sensitive | bool | N | set if this option should be redacted when using `rclone config redacted` | + +An example of this might be the `--log-level` flag. Note that the +`Name` of the option becomes the command line flag with `_` replaced +with `-`. + +``` +{ + "Advanced": false, + "Default": 5, + "DefaultStr": "NOTICE", + "Examples": [ + { + "Help": "", + "Value": "EMERGENCY" + }, + { + "Help": "", + "Value": "ALERT" + }, + ... + ], + "Exclusive": true, + "FieldName": "LogLevel", + "Groups": "Logging", + "Help": "Log level DEBUG|INFO|NOTICE|ERROR", + "Hide": 0, + "IsPassword": false, + "Name": "log_level", + "NoPrefix": true, + "Required": true, + "Sensitive": false, + "Type": "LogLevel", + "Value": null, + "ValueStr": "NOTICE" +}, +``` + +Note that the `Help` may be multiple lines separated by `\n`. The +first line will always be a short sentence and this is the sentence +shown when running `rclone help flags`. + ## Specifying remotes to work on Remotes are specified with the `fs=`, `srcFs=`, `dstFs=` @@ -17286,7 +17845,7 @@ For example this JSON is equivalent to `remote:/tmp` ``` { "_name": "remote", - "_path": "/tmp" + "_root": "/tmp" } ``` @@ -17296,7 +17855,7 @@ And this is equivalent to `:sftp,host='example.com':/tmp` { "type": "sftp", "host": "example.com", - "_path": "/tmp" + "_root": "/tmp" } ``` @@ -17305,7 +17864,7 @@ And this is equivalent to `/tmp/dir` ``` { type = "local", - _ path = "/tmp/dir" + _root = "/tmp/dir" } ``` @@ -17498,7 +18057,12 @@ See the [config paths](https://rclone.org/commands/rclone_config_paths/) command Returns a JSON object: - providers - array of objects -See the [config providers](https://rclone.org/commands/rclone_config_providers/) command for more information on the above. +See the [config providers](https://rclone.org/commands/rclone_config_providers/) command +for more information on the above. + +Note that the Options blocks are in the same format as returned by +"options/info". They are described in the +[option blocks](#option-blocks) section. **Authentication is required for this call.** @@ -18500,6 +19064,11 @@ Returns: Returns an object where keys are option block names and values are an object with the current option values in. +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + Note that these are the global options which are unaffected by use of the _config and _filter parameters. If you wish to read the parameters set in _config then use options/config and for _filter use options/filter. @@ -18507,6 +19076,19 @@ set in _config then use options/config and for _filter use options/filter. This shows the internal names of the option within rclone which should map to the external options very easily with a few exceptions. +### options/info: Get info about all the global options {#options-info} + +Returns an object where keys are option block names and values are an +array of objects with info about each options. + +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + +These objects are in the same format as returned by "config/providers". They are +described in the [option blocks](#option-blocks) section. + ### options/local: Get the currently active config for this call {#options-local} Returns an object with the keys "config" and "filter". @@ -18790,6 +19372,73 @@ If poll-interval is updated or disabled temporarily, some changes might not get picked up by the polling function, depending on the used remote. +This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied. + +### vfs/queue: Queue info for a VFS. {#vfs-queue} + +This returns info about the upload queue for the selected VFS. + +This is only useful if `--vfs-cache-mode` > off. If you call it when +the `--vfs-cache-mode` is off, it will return an empty result. + + { + "queued": // an array of files queued for upload + [ + { + "name": "file", // string: name (full path) of the file, + "id": 123, // integer: id of this item in the queue, + "size": 79, // integer: size of the file in bytes + "expiry": 1.5 // float: time until file is eligible for transfer, lowest goes first + "tries": 1, // integer: number of times we have tried to upload + "delay": 5.0, // float: seconds between upload attempts + "uploading": false, // boolean: true if item is being uploaded + }, + ], + } + +The `expiry` time is the time until the file is elegible for being +uploaded in floating point seconds. This may go negative. As rclone +only transfers `--transfers` files at once, only the lowest +`--transfers` expiry times will have `uploading` as `true`. So there +may be files with negative expiry times for which `uploading` is +`false`. + + +This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied. + +### vfs/queue-set-expiry: Set the expiry time for an item queued for upload. {#vfs-queue-set-expiry} + +Use this to adjust the `expiry` time for an item in the upload queue. +You will need to read the `id` of the item using `vfs/queue` before +using this call. + +You can then set `expiry` to a floating point number of seconds from +now when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed +for a long time then set it to a large positive number. + +Setting the `expiry` of an item which has already has started uploading +will have no effect - the item will carry on being uploaded. + +This will return an error if called with `--vfs-cache-mode` off or if +the `id` passed is not found. + +This takes the following parameters + +- `fs` - select the VFS in use (optional) +- `id` - a numeric ID as returned from `vfs/queue` +- `expiry` - a new expiry time as floating point seconds + +This returns an empty result on success, or an error. + + This command takes an "fs" parameter. If this parameter is not supplied and if there is only one VFS in use then that VFS will be used. If there is more than one VFS in use then the "fs" parameter @@ -19096,7 +19745,9 @@ Here is an overview of the major features of each cloud storage system. | Citrix ShareFile | MD5 | R/W | Yes | No | - | - | | Dropbox | DBHASH ¹ | R | Yes | No | - | - | | Enterprise File Fabric | - | R/W | Yes | No | R/W | - | +| Files.com | MD5, CRC32 | DR/W | Yes | No | R | - | | FTP | - | R/W ¹⁰ | No | No | - | - | +| Gofile | MD5 | DR/W | No | Yes | R | - | | Google Cloud Storage | MD5 | R/W | No | No | R/W | - | | Google Drive | MD5, SHA1, SHA256 | DR/W | No | Yes | R/W | DRWU | | Google Photos | - | - | No | Yes | R | - | @@ -19118,6 +19769,7 @@ Here is an overview of the major features of each cloud storage system. | Oracle Object Storage | MD5 | R/W | No | No | R/W | - | | pCloud | MD5, SHA1 ⁷ | R | No | No | W | - | | PikPak | MD5 | R | No | No | R | - | +| Pixeldrain | SHA256 | R/W | No | No | R | RW | | premiumize.me | - | - | Yes | No | R | - | | put.io | CRC-32 | R/W | No | Yes | R | - | | Proton Drive | SHA1 | R/W | No | No | R | - | @@ -19420,8 +20072,8 @@ have a Windows file system with Unicode fullwidth characters remote rather than being translated to regular (halfwidth) `*`, `?` and `:`. The `--backend-encoding` flags allow you to change that. You can -disable the encoding completely with `--backend-encoding None` or set -`encoding = None` in the config file. +disable the encoding completely with `--backend-encoding Raw` or set +`encoding = Raw` in the config file. Encoding takes a comma separated list of encodings. You can see the list of all possible values by passing an invalid value to this @@ -19440,6 +20092,7 @@ will show you the defaults for the backends. | Dollar | `$` | `$` | | Dot | `.` or `..` as entire string | `.`, `..` | | DoubleQuote | `"` | `"` | +| Exclamation | `!` | `!` | | Hash | `#` | `#` | | InvalidUtf8 | An invalid UTF-8 character (e.g. latin1) | `�` | | LeftCrLfHtVt | CR 0x0D, LF 0x0A, HT 0x09, VT 0x0B on the left of a string | `␍`, `␊`, `␉`, `␋` | @@ -19447,7 +20100,7 @@ will show you the defaults for the backends. | LeftSpace | SPACE on the left of a string | `␠` | | LeftTilde | `~` on the left of a string | `~` | | LtGt | `<`, `>` | `<`, `>` | -| None | No characters are encoded | | +| None ¹ | NUL 0x00 | ␀ | | Percent | `%` | `%` | | Pipe | \| | `|` | | Question | `?` | `?` | @@ -19459,6 +20112,10 @@ will show you the defaults for the backends. | Slash | `/` | `/` | | SquareBracket | `[`, `]` | `[`, `]` | +¹ Encoding from NUL 0x00 to ␀ is always implicit except when using Raw. +It was previously incorrectly documented as disabling encoding, +and to maintain backward compatibility, its behavior has not been changed. + ##### Encoding example: FTP To take a specific example, the FTP backend's default encoding is @@ -19502,7 +20159,7 @@ the default value but without `Colon,Question,Asterisk`: --local-encoding "Slash,LtGt,DoubleQuote,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot" ``` -Alternatively, you can disable the conversion of any characters with `--local-encoding None`. +Alternatively, you can disable the conversion of any characters with `--local-encoding Raw`. Instead of using command-line argument `--local-encoding`, you may also set it as [environment variable](https://rclone.org/docs/#environment-variables) `RCLONE_LOCAL_ENCODING`, @@ -19566,14 +20223,16 @@ upon backend-specific capabilities. | Citrix ShareFile | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes | | Dropbox | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | Enterprise File Fabric | Yes | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | +| Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes | | FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes | +| Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No | No | No | No | | Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | | Google Photos | No | No | No | No | No | No | No | No | No | No | No | | HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes | | HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes | | HTTP | No | No | No | No | No | No | No | No | No | No | Yes | -| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes | +| ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes | | Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No | | Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | | Koofr | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | @@ -19588,6 +20247,7 @@ upon backend-specific capabilities. | Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | | PikPak | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | +| Pixeldrain | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | | premiumize.me | Yes | No | Yes | Yes | No | No | No | No | Yes | Yes | Yes | | put.io | Yes | No | Yes | Yes | Yes | No | Yes | No | No | Yes | Yes | | Proton Drive | Yes | No | Yes | Yes | Yes | No | No | No | No | Yes | Yes | @@ -19711,12 +20371,12 @@ split into groups. ## Copy -Flags for anything which can Copy a file. +Flags for anything which can copy a file. ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -19751,7 +20411,7 @@ Flags for anything which can Copy a file. ## Sync -Flags just used for `rclone sync`. +Flags used for sync commands. ``` --backup-dir string Make backups into hierarchy based in DIR @@ -19782,7 +20442,7 @@ Important flags useful for most commands. ## Check -Flags used for `rclone check`. +Flags used for check commands. ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) @@ -19791,7 +20451,7 @@ Flags used for `rclone check`. ## Networking -General networking and HTTP stuff. +Flags for general networking and HTTP stuff. ``` --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name @@ -19801,7 +20461,7 @@ General networking and HTTP stuff. --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth --contimeout Duration Connect timeout (default 1m0s) - --disable-http-keep-alives Disable HTTP keep-alives and use each connection once. + --disable-http-keep-alives Disable HTTP keep-alives and use each connection once --disable-http2 Disable HTTP/2 in the global transport --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21 --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) @@ -19814,7 +20474,7 @@ General networking and HTTP stuff. --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --use-cookies Enable session cookiejar - --user-agent string Set the user-agent to a specified string (default "rclone/v1.67.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0") ``` @@ -19831,7 +20491,7 @@ Flags helpful for increasing performance. ## Config -General configuration of rclone. +Flags for general configuration of rclone. ``` --ask-password Allow prompt for password for encrypted configuration (default true) @@ -19915,7 +20575,7 @@ Flags for listing directories. ## Logging -Logging and statistics. +Flags for logging and statistics. ``` --log-file string Log everything to this file @@ -19934,7 +20594,7 @@ Logging and statistics. --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes") --syslog Use Syslog for logging - --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") + --syslog-facility string Facility for syslog, e.g. KERN,USER (default "DAEMON") --use-json-log Use json log format -v, --verbose count Print lots more stuff (repeat for more) ``` @@ -19963,12 +20623,12 @@ Flags to control the Remote Control API. ``` --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -19994,9 +20654,34 @@ Flags to control the Remote Control API. ``` +## Metrics + +Flags to control the Metrics HTTP endpoint.. + +``` + --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""]) + --metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from + --metrics-baseurl string Prefix for URLs - leave blank for root + --metrics-cert string TLS PEM key (concatenation of certificate and CA certificate) + --metrics-client-ca string Client certificate authority to verify clients with + --metrics-htpasswd string A htpasswd file - if not provided no authentication is done + --metrics-key string TLS PEM Private key + --metrics-max-header-bytes int Maximum size of request header (default 4096) + --metrics-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") + --metrics-pass string Password for authentication + --metrics-realm string Realm for authentication + --metrics-salt string Password hashing salt (default "dlPL2MqE") + --metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) + --metrics-template string User-specified template + --metrics-user string User name for authentication + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server +``` + + ## Backend -Backend only flags. These can be set in the config file also. +Backend-only flags (these can be set in the config file also). ``` --alias-description string Description of the remote @@ -20220,6 +20905,12 @@ Backend only flags. These can be set in the config file also. --filefabric-token-expiry string Token expiry time --filefabric-url string URL of the Enterprise File Fabric to connect to --filefabric-version string Version read from the file fabric + --filescom-api-key string The API key used to authenticate with Files.com + --filescom-description string Description of the remote + --filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --filescom-password string The password used to authenticate with Files.com (obscured) + --filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + --filescom-username string The username used to authenticate with Files.com --ftp-ask-password Allow asking for FTP password when needed --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited @@ -20263,6 +20954,12 @@ Backend only flags. These can be set in the config file also. --gcs-token string OAuth Access Token as a JSON blob --gcs-token-url string Token server url --gcs-user-project string User project + --gofile-access-token string API Access token + --gofile-account-id string Account ID + --gofile-description string Description of the remote + --gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation) + --gofile-list-chunk int Number of items to list in each call (default 1000) + --gofile-root-folder-id string ID of the root folder --gphotos-auth-url string Auth server URL --gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --gphotos-batch-mode string Upload file batching sync|async|off (default "sync") @@ -20354,6 +21051,7 @@ Backend only flags. These can be set in the config file also. --local-description string Description of the remote --local-encoding Encoding The encoding for the backend (default Slash,Dot) --local-no-check-updated Don't check to see if the files change during upload + --local-no-clone Disable reflink cloning for server-side copies --local-no-preallocate Disable preallocation of disk space for transferred files --local-no-set-modtime Disable setting modtime --local-no-sparse Disable sparse files for multi-thread downloads @@ -20470,6 +21168,10 @@ Backend only flags. These can be set in the config file also. --pikpak-upload-concurrency int Concurrency for multipart uploads (default 5) --pikpak-use-trash Send files to the trash instead of deleting permanently (default true) --pikpak-user string Pikpak username + --pixeldrain-api-key string API key for your pixeldrain account + --pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api") + --pixeldrain-description string Description of the remote + --pixeldrain-root-folder-id string Root of the filesystem to use (default "me") --premiumizeme-auth-url string Auth server URL --premiumizeme-client-id string OAuth Client Id --premiumizeme-client-secret string OAuth Client Secret @@ -20544,6 +21246,7 @@ Backend only flags. These can be set in the config file also. --s3-provider string Choose your S3 provider --s3-region string Region to connect to --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-sdk-log-mode Bits Set to debug the SDK (default Off) --s3-secret-access-key string AWS Secret Access Key (password) --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 --s3-session-token string An AWS session token @@ -20554,7 +21257,6 @@ Backend only flags. These can be set in the config file also. --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-sts-endpoint string Endpoint for STS --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint @@ -20564,6 +21266,7 @@ Backend only flags. These can be set in the config file also. --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset) --s3-v2-auth If true use v2 authentication --s3-version-at Time Show file versions as they were at the specified time (default off) --s3-version-deleted Show deleted file markers when using versions @@ -20672,10 +21375,12 @@ Backend only flags. These can be set in the config file also. --swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8) --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page --swift-key string API key or password (OS_PASSWORD) --swift-leave-parts-on-error If true avoid calling abort upload on a failure --swift-no-chunk Don't chunk files during streaming upload --swift-no-large-objects Disable support for static and dynamic large objects + --swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit --swift-region string Region name - optional (OS_REGION_NAME) --swift-storage-policy string The storage policy to use when creating a new container --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) @@ -20713,6 +21418,7 @@ Backend only flags. These can be set in the config file also. --webdav-owncloud-exclude-shares Exclude ownCloud shares --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms) --webdav-pass string Password (obscured) + --webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly --webdav-url string URL of http host to connect to --webdav-user string User name --webdav-vendor string Name of the WebDAV site/service/software you are using @@ -20722,6 +21428,7 @@ Backend only flags. These can be set in the config file also. --yandex-description string Description of the remote --yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true) --yandex-token string OAuth Access Token as a JSON blob --yandex-token-url string Token server url --zoho-auth-url string Auth server URL @@ -23097,6 +23804,12 @@ about _Unison_ and synchronization in general. ## Changelog +### `v1.68` +* Fixed an issue affecting backends that round modtimes to a lower precision. + +### `v1.67` +* Added integration tests against all backends. + ### `v1.66` * Copies and deletes are now handled in one operation instead of two * `--track-renames` and `--backup-dir` are now supported @@ -23284,7 +23997,7 @@ $ rclone hashsum sha256 -C SHA256SUMS rclone-v1.63.1-windows-amd64.zip You can verify the signatures and hashes in one command line like this: ``` -$ gpg --decrypt SHA256SUMS | sha256sum -c --ignore-missing +$ h=$(gpg --decrypt SHA256SUMS) && echo "$h" | sha256sum - -c --ignore-missing gpg: Signature made Mon 17 Jul 2023 15:03:17 BST gpg: using DSA key FBF737ECE9F8AB18604BD2AC93935E02FF3B54FA gpg: Good signature from "Nick Craig-Wood " [ultimate] @@ -23339,11 +24052,11 @@ y) Yes n) No y/n> Remote config --------------------- -[remote] -type = fichier -api_key = example_key --------------------- +Configuration complete. +Options: +- type: fichier +- api_key: example_key +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -23557,10 +24270,11 @@ Remote or path to alias. Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path". remote> /mnt/storage/backup Remote config --------------------- -[remote] -remote = /mnt/storage/backup --------------------- +Configuration complete. +Options: +- type: alias +- remote: /mnt/storage/backup +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -23878,20 +24592,20 @@ Choose a number from below, or type in your own value \ "GLACIER_IR" storage_class> 1 Remote config --------------------- -[remote] -type = s3 -provider = AWS -env_auth = false -access_key_id = XXX -secret_access_key = YYY -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = --------------------- +Configuration complete. +Options: +- type: s3 +- provider: AWS +- env_auth: false +- access_key_id: XXX +- secret_access_key: YYY +- region: us-east-1 +- endpoint: +- location_constraint: +- acl: private +- server_side_encryption: +- storage_class: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -24197,15 +24911,21 @@ The different authentication methods are tried in this order: - Session Token: `AWS_SESSION_TOKEN` (optional) - Or, use a [named profile](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html): - Profile files are standard files used by AWS CLI tools - - By default it will use the profile in your home directory (e.g. `~/.aws/credentials` on unix based systems) file and the "default" profile, to change set these environment variables: - - `AWS_SHARED_CREDENTIALS_FILE` to control which file. - - `AWS_PROFILE` to control which profile to use. + - By default it will use the profile in your home directory (e.g. `~/.aws/credentials` on unix based systems) file and the "default" profile, to change set these environment variables or config keys: + - `AWS_SHARED_CREDENTIALS_FILE` to control which file or the `shared_credentials_file` config key. + - `AWS_PROFILE` to control which profile to use or the `profile` config key. - Or, run `rclone` in an ECS task with an IAM role (AWS only). - Or, run `rclone` on an EC2 instance with an IAM role (AWS only). - Or, run `rclone` in an EKS pod with an IAM role that is associated with a service account (AWS only). + - Or, use [process credentials](https://docs.aws.amazon.com/sdkref/latest/guide/feature-process-credentials.html) to read config from an external program. + +With `env_auth = true` rclone (which uses the SDK for Go v2) should support +[all authentication methods](https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html) +that the `aws` CLI tool does and the other AWS SDKs. If none of these option actually end up providing `rclone` with AWS -credentials then S3 interaction will be non-authenticated (see below). +credentials then S3 interaction will be non-authenticated (see the +[anonymous access](#anonymous-access) section for more info). ### S3 Permissions @@ -25029,6 +25749,10 @@ Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this false - rclone will do this automatically based on the provider setting. +Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, +you'll need to set this to true. + + Properties: - Config: force_path_style @@ -25318,6 +26042,24 @@ Properties: - Type: Tristate - Default: unset +#### --s3-use-unsigned-payload + +Whether to use an unsigned payload in PutObject + +Rclone has to avoid the AWS SDK seeking the body when calling +PutObject. The AWS provider can add checksums in the trailer to avoid +seeking but other providers can't. + +This should be true, false or left unset to use the default for the provider. + + +Properties: + +- Config: use_unsigned_payload +- Env Var: RCLONE_S3_USE_UNSIGNED_PAYLOAD +- Type: Tristate +- Default: unset + #### --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -25475,7 +26217,7 @@ Properties: #### --s3-sts-endpoint -Endpoint for STS. +Endpoint for STS (deprecated). Leave blank if using AWS to use the default endpoint for the region. @@ -25538,6 +26280,33 @@ Properties: - Type: Tristate - Default: unset +#### --s3-sdk-log-mode + +Set to debug the SDK + +This can be set to a comma separated list of the following functions: + +- `Signing` +- `Retries` +- `Request` +- `RequestWithBody` +- `Response` +- `ResponseWithBody` +- `DeprecatedUsage` +- `RequestEventMessage` +- `ResponseEventMessage` + +Use `Off` to disable and `All` to set all log levels. You will need to +use `-vv` to see the debug level logs. + + +Properties: + +- Config: sdk_log_mode +- Env Var: RCLONE_S3_SDK_LOG_MODE +- Type: Bits +- Default: Off + #### --s3-description Description of the remote. @@ -25586,18 +26355,19 @@ These can be run on a running backend using the rc command ### restore -Restore objects from GLACIER to normal storage +Restore objects from GLACIER or INTELLIGENT-TIERING archive tier rclone backend restore remote: [options] [+] -This command can be used to restore one or more objects from GLACIER -to normal storage. +This command can be used to restore one or more objects from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS + rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags @@ -25627,17 +26397,17 @@ if not. Options: - "description": The optional description for the job. -- "lifetime": Lifetime of the active copy in days +- "lifetime": Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING storage - "priority": Priority of restore: Standard|Expedited|Bulk ### restore-status -Show the restore status for objects being restored from GLACIER to normal storage +Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage rclone backend restore-status remote: [options] [+] -This command can be used to show the status for objects being restored from GLACIER -to normal storage. +This command can be used to show the status for objects being restored from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: @@ -25667,6 +26437,15 @@ It returns a list of status dictionaries. "RestoreExpiryDate": "2023-09-06T12:29:19+01:00" }, "StorageClass": "DEEP_ARCHIVE" + }, + { + "Remote": "test.gz", + "VersionID": null, + "RestoreStatus": { + "IsRestoreInProgress": true, + "RestoreExpiryDate": "null" + }, + "StorageClass": "INTELLIGENT_TIERING" } ] @@ -25795,7 +26574,7 @@ It doesn't return anything. -### Anonymous access to public buckets +### Anonymous access to public buckets {#anonymous-access} If you want to use rclone to access a public bucket, configure with a blank `access_key_id` and `secret_access_key`. Your config should end @@ -25805,15 +26584,6 @@ up looking like this: [anons3] type = s3 provider = AWS -env_auth = false -access_key_id = -secret_access_key = -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = ``` Then use it as normal with the name of the public bucket, e.g. @@ -25822,6 +26592,10 @@ Then use it as normal with the name of the public bucket, e.g. You will be able to list and copy data but not upload it. +You can also do this entirely on the command line + + rclone lsd :s3,provider=AWS:1000genomes + ## Providers ### AWS S3 @@ -26011,7 +26785,14 @@ acl = private Now run `rclone lsf r2:` to see your buckets and `rclone lsf r2:bucket` to look within a bucket. -For R2 tokens with the "Object Read & Write" permission, you may also need to add `no_check_bucket = true` for object uploads to work correctly. +For R2 tokens with the "Object Read & Write" permission, you may also +need to add `no_check_bucket = true` for object uploads to work +correctly. + +Note that Cloudflare decompresses files uploaded with +`Content-Encoding: gzip` by default which is a deviation from what AWS +does. If this is causing a problem then upload the files with +`--header-upload "Cache-Control: no-transform"` ### Dreamhost @@ -28642,6 +29423,28 @@ nodes across the network. For more detailed comparison please check the documentation of the [storj](/storj) backend. +## Memory usage {memory} + +The most common cause of rclone using lots of memory is a single +directory with millions of files in. Despite s3 not really having the +concepts of directories, rclone does the sync on a directory by +directory basis to be compatible with normal filing systems. + +Rclone loads each directory into memory as rclone objects. Each rclone +object takes 0.5k-1k of memory, so approximately 1GB per 1,000,000 +files, and the sync for that directory does not begin until it is +entirely loaded in memory. So the sync can take a long time to start +for large directories. + +To sync a directory with 100,000,000 files in you would need approximately +100 GB of memory. At some point the amount of memory becomes difficult +to provide so there is +[a workaround for this](https://github.com/rclone/rclone/wiki/Big-syncs-with-millions-of-files) +which involves a bit of scripting. + +At some point rclone will gain a sync mode which is effectively this +workaround but built in to rclone. + ## Limitations `rclone about` is not supported by the S3 backend. Backends without @@ -28652,7 +29455,6 @@ remote. See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) - ### Synology C2 Object Storage {#synology-c2} [Synology C2 Object Storage](https://c2.synology.com/en-global/object-storage/overview) provides a secure, S3-compatible, and cost-effective cloud storage solution without API request, download fees, and deletion penalty. @@ -28825,12 +29627,13 @@ key> 0123456789abcdef0123456789abcdef0123456789 Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = 123456789abc -key = 0123456789abcdef0123456789abcdef0123456789 -endpoint = --------------------- +Configuration complete. +Options: +- type: b2 +- account: 123456789abc +- key: 0123456789abcdef0123456789abcdef0123456789 +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -28946,12 +29749,21 @@ used. ### Versions -When rclone uploads a new version of a file it creates a [new version +The default setting of B2 is to keep old versions of files. This means +when rclone uploads a new version of a file it creates a [new version of it](https://www.backblaze.com/docs/cloud-storage-file-versions). Likewise when you delete a file, the old version will be marked hidden -and still be available. Conversely, you may opt in to a "hard delete" -of files with the `--b2-hard-delete` flag which would permanently remove -the file instead of hiding it. +and still be available. + +Whether B2 keeps old versions of files or not can be adjusted on a per +bucket basis using the "Lifecycle settings" on the B2 control panel or +when creating the bucket using the [--b2-lifecycle](#b2-lifecycle) +flag or after creation using the [rclone backend lifecycle](#lifecycle) +command. + +You may opt in to a "hard delete" of files with the `--b2-hard-delete` +flag which permanently removes files on deletion instead of hiding +them. Old versions of files, where available, are visible using the `--b2-versions` flag. @@ -29616,12 +30428,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} --------------------- +Configuration complete. +Options: +- type: box +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -29704,11 +30517,11 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = box -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} +Keep this "remote" remote? Edit remote Value "client_id" = "" Edit? (y/n)> @@ -29736,11 +30549,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = box -token = {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31322,12 +32135,12 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = sharefile -endpoint = https://XXX.sharefile.com -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} --------------------- +Configuration complete. +Options: +- type: sharefile +- endpoint: https://XXX.sharefile.com +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -32659,11 +33472,11 @@ Embedded spaces can be added using quotes "dir=remote:path with space" "dir2=remote2:path with space" Enter a fs.SpaceSepList value. upstreams> images=s3:imagesbucket files=drive:important/files --------------------- -[remote] -type = combine -upstreams = images=s3:imagesbucket files=drive:important/files --------------------- +Configuration complete. +Options: +- type: combine +- upstreams: images=s3:imagesbucket files=drive:important/files +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -32792,12 +33605,13 @@ Remote config Please visit: https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX --------------------- -[remote] -app_key = -app_secret = -token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: dropbox +- app_key: +- app_secret: +- token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -33160,7 +33974,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 1000. -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 100 @@ -33350,12 +34164,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = filefabric -url = https://yourfabric.smestorage.com/ -permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: filefabric +- url: https://yourfabric.smestorage.com/ +- permanent_token: xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -33562,6 +34376,182 @@ Properties: +# Files.com + +[Files.com](https://www.files.com/) is a cloud storage service that provides a +secure and easy way to store and share files. + +The initial setup for filescom involves authenticating with your Files.com +account. You can do this by providing your site subdomain, username, and +password. Alternatively, you can authenticate using an API Key from +[Files.com](https://www.files.com/docs/sdk-and-apis/api-keys/). +`rclone config` walks you through it. + +## Configuration + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + + No remotes found, make a new one? + n) New remote + s) Set configuration password + q) Quit config + n/s/q> n + + Enter name for new remote. + name> remote + + Option Storage. + Type of storage to configure. + Choose a number from below, or type in your own value. + [snip] + XX / Files.com + \ "filescom" + [snip] + Storage> filescom + + Option site. + Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + Enter a value. Press Enter to leave empty. + site> mysite + + Option username. + The username used to authenticate with Files.com. + Enter a value. Press Enter to leave empty. + username> user + + Option password. + The password used to authenticate with Files.com. + Choose an alternative below. Press Enter for the default (n). + y) Yes, type in my own password + g) Generate random password + n) No, leave this optional password blank (default) + y/g/n> y + Enter the password: + password: + Confirm the password: + password: + + Edit advanced config? + y) Yes + n) No (default) + y/n> n + + Configuration complete. + Options: + - type: filescom + - site: mysite + - username: user + - password: *** ENCRYPTED *** + Keep this "remote" remote? + y) Yes this is OK (default) + e) Edit this remote + d) Delete this remote + y/e/d> y + +Once configured you can use rclone. + +See all files in the top level: + + rclone lsf remote: + +Make a new directory in the root: + + rclone mkdir remote:dir + +Recursively List the contents: + + rclone ls remote: + +Sync `/home/local/directory` to the remote directory, deleting any +excess files in the directory. + + rclone sync --interactive /home/local/directory remote:dir + + +### Standard options + +Here are the Standard options specific to filescom (Files.com). + +#### --filescom-site + +Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com). + +Properties: + +- Config: site +- Env Var: RCLONE_FILESCOM_SITE +- Type: string +- Required: false + +#### --filescom-username + +The username used to authenticate with Files.com. + +Properties: + +- Config: username +- Env Var: RCLONE_FILESCOM_USERNAME +- Type: string +- Required: false + +#### --filescom-password + +The password used to authenticate with Files.com. + +**NB** Input to this must be obscured - see [rclone obscure](https://rclone.org/commands/rclone_obscure/). + +Properties: + +- Config: password +- Env Var: RCLONE_FILESCOM_PASSWORD +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to filescom (Files.com). + +#### --filescom-api-key + +The API key used to authenticate with Files.com. + +Properties: + +- Config: api_key +- Env Var: RCLONE_FILESCOM_API_KEY +- Type: string +- Required: false + +#### --filescom-encoding + +The encoding for the backend. + +See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_FILESCOM_ENCODING +- Type: Encoding +- Default: Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot + +#### --filescom-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_FILESCOM_DESCRIPTION +- Type: string +- Required: false + + + # FTP FTP is the File Transfer Protocol. Rclone FTP support is provided using the @@ -33630,12 +34620,12 @@ Use FTP over TLS (Explicit) Enter a boolean value (true or false). Press Enter for the default ("false"). explicit_tls> Remote config --------------------- -[remote] -type = ftp -host = ftp.example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: ftp +- host: ftp.example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34075,6 +35065,268 @@ designating the remote time precision expressed as nanoseconds. A value of `1000000000` means that file time precision of 1 second is available. A value of `3153600000000000000` (or another large number) means "unsupported". +# Gofile + +[Gofile](https://gofile.io) is a content storage and distribution +platform. Its aim is to provide as much service as possible for free +or at a very low price. + +The initial setup for Gofile involves logging in to the web interface +and going to the "My Profile" section. Copy the "Account API token" +for use in the config file. + +Note that if you wish to connect rclone to Gofile you will need a +premium account. + +## Configuration + +Here is an example of how to make a remote called `remote`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> remote + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +XX / Gofile + \ (gofile) +Storage> gofile + +Option access_token. +API Access token +You can get this from the web control panel. +Enter a value. Press Enter to leave empty. +access_token> YOURACCESSTOKEN + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: gofile +- access_token: YOURACCESSTOKEN +Keep this "remote" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +Once configured you can then use `rclone` like this, + +List directories and files in the top level of your Gofile + + rclone lsf remote: + +To copy a local directory to an Gofile directory called backup + + rclone copy /home/source remote:backup + +### Modification times and hashes + +Gofile supports modification times with a resolution of 1 second. + +Gofile supports MD5 hashes, so you can use the `--checksum` flag. + +### Restricted filename characters + +In addition to the [default restricted characters set](https://rclone.org/overview/#restricted-characters) +the following characters are also replaced: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| ! | 0x21 | ! | +| " | 0x22 | " | +| * | 0x2A | * | +| : | 0x3A | : | +| < | 0x3C | < | +| > | 0x3E | > | +| ? | 0x3F | ? | +| \ | 0x5C | \ | +| \| | 0x7C | | | + + +File names can also not start or end with the following characters. +These only get replaced if they are the first or last character in the +name: + +| Character | Value | Replacement | +| --------- |:-----:|:-----------:| +| . | 0x2E | . | + +Invalid UTF-8 bytes will also be [replaced](https://rclone.org/overview/#invalid-utf8), +as they can't be used in JSON strings. + +### Public Links + +Gofile supports `rclone link` to make public links to files or +directories. If you specify a directory it will download as a `zip` +file. You can use the `--expire` flag to specify the time the link +should be valid. Note that `rclone link --unlink` removes all the +public links for a file. + +### Root folder ID + +You can set the `root_folder_id` for rclone. This is the directory +(identified by its `Folder ID`) that rclone considers to be the root +of your Gofile drive. + +Normally you will leave this blank and rclone will determine the +correct root to use itself and fill in the value in the config file. + +However you can set this to restrict rclone to a specific folder +hierarchy. + +In order to do this you will have to find the `Folder ID` of the +directory you wish rclone to display. + +You can do this with rclone + +``` +$ rclone lsf -Fip --dirs-only remote: +d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/ +f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/ +d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/ +``` + +The ID to use is the part before the `;` so you could set + +``` +root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0 +``` + +To restrict rclone to the `Files` directory. + + +### Standard options + +Here are the Standard options specific to gofile (Gofile). + +#### --gofile-access-token + +API Access token + +You can get this from the web control panel. + +Properties: + +- Config: access_token +- Env Var: RCLONE_GOFILE_ACCESS_TOKEN +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to gofile (Gofile). + +#### --gofile-root-folder-id + +ID of the root folder + +Leave this blank normally, rclone will fill it in automatically. + +If you want rclone to be restricted to a particular folder you can +fill it in - see the docs for more info. + + +Properties: + +- Config: root_folder_id +- Env Var: RCLONE_GOFILE_ROOT_FOLDER_ID +- Type: string +- Required: false + +#### --gofile-account-id + +Account ID + +Leave this blank normally, rclone will fill it in automatically. + + +Properties: + +- Config: account_id +- Env Var: RCLONE_GOFILE_ACCOUNT_ID +- Type: string +- Required: false + +#### --gofile-list-chunk + +Number of items to list in each call + +Properties: + +- Config: list_chunk +- Env Var: RCLONE_GOFILE_LIST_CHUNK +- Type: int +- Default: 1000 + +#### --gofile-encoding + +The encoding for the backend. + +See the [encoding section in the overview](https://rclone.org/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_GOFILE_ENCODING +- Type: Encoding +- Default: Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation + +#### --gofile-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_GOFILE_DESCRIPTION +- Type: string +- Required: false + + + +## Limitations + +Gofile only supports filenames up to 255 characters in length, where a +character is a unicode character. + +Directories should not be cached for more than 24h otherwise files in +the directory may not be downloadable. In practice this means when +using a VFS based rclone command such as `rclone mount` you should +make sure `--dir-cache-time` is less than `24h`. + +Note that Gofile is currently limited to a total of 100,000 items. If +you attempt to upload more than that you will get +`error-limit-100000`. This limit may be lifted in the future. + +### Duplicated files + +Gofile is capable of having files with duplicated file names. For +instance two files called `hello.txt` in the same directory. + +Rclone cannot sync that to a normal file system but it can be fixed +with the `rclone dedupe` command. + +Duplicated files cause problems with the syncing and you will see +messages in the log about duplicates. + +Use `rclone dedupe` to fix duplicated files. + # Google Cloud Storage Paths are specified as `remote:bucket` (or `remote:` for the `lsd` @@ -34199,16 +35451,16 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = google cloud storage -client_id = -client_secret = -token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} -project_number = 12345678 -object_acl = private -bucket_acl = private --------------------- +Configuration complete. +Options: +- type: google cloud storage +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} +- project_number: 12345678 +- object_acl: private +- bucket_acl: private +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34865,15 +36117,16 @@ Configure this as a Shared Drive (Team Drive)? y) Yes n) No y/n> n --------------------- -[remote] -client_id = -client_secret = -scope = drive -root_folder_id = -service_account_file = -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} --------------------- +Configuration complete. +Options: +type: drive +- client_id: +- client_secret: +- scope: drive +- root_folder_id: +- service_account_file: +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -35001,42 +36254,49 @@ credentials file into the rclone config file, you can set `service_account_credentials` with the actual contents of the file instead, or set the equivalent environment variable. -#### Use case - Google Apps/G-suite account and individual Drive +#### Use case - Google Workspace account and individual Drive -Let's say that you are the administrator of a Google Apps (old) or -G-suite account. -The goal is to store data on an individual's Drive account, who IS -a member of the domain. -We'll call the domain **example.com**, and the user -**foo@example.com**. +Let's say that you are the administrator of a Google Workspace. The +goal is to read or write data on an individual's Drive account, who IS +a member of the domain. We'll call the domain **example.com**, and the +user **foo@example.com**. There's a few steps we need to go through to accomplish this: ##### 1. Create a service account for example.com + - To create a service account and obtain its credentials, go to the [Google Developer Console](https://console.developers.google.com). - - You must have a project - create one if you don't. + - You must have a project - create one if you don't and make sure you are on the selected project. - Then go to "IAM & admin" -> "Service Accounts". - Use the "Create Service Account" button. Fill in "Service account name" and "Service account ID" with something that identifies your client. - Select "Create And Continue". Step 2 and 3 are optional. - - These credentials are what rclone will use for authentication. + - Click on the newly created service account + - Click "Keys" and then "Add Key" and then "Create new key" + - Choose type "JSON" and click create + - This will download a small JSON file that rclone will use for authentication. + If you ever need to remove access, press the "Delete service account key" button. ##### 2. Allowing API access to example.com Google Drive - - Go to example.com's admin console + + - Go to example.com's [Workspace Admin Console](https://admin.google.com) - Go into "Security" (or use the search bar) - - Select "Show more" and then "Advanced settings" - - Select "Manage API client access" in the "Authentication" section - - In the "Client Name" field enter the service account's + - Select "Access and data control" and then "API controls" + - Click "Manage domain-wide delegation" + - Click "Add new" + - In the "Client ID" field enter the service account's "Client ID" - this can be found in the Developer Console under "IAM & Admin" -> "Service Accounts", then "View Client ID" for the newly created service account. It is a ~21 character numerical string. - - In the next field, "One or More API Scopes", enter + - In the next field, "OAuth Scopes", enter `https://www.googleapis.com/auth/drive` -to grant access to Google Drive specifically. +to grant read/write access to Google Drive specifically. +You can also use `https://www.googleapis.com/auth/drive.readonly` for read only access. + - Click "Authorise" ##### 3. Configure rclone, assuming a new install @@ -35045,17 +36305,18 @@ rclone config n/s/q> n # New name>gdrive # Gdrive is an example name -Storage> # Select the number shown for Google Drive +Storage> # Type drive client_id> # Can be left blank client_secret> # Can be left blank -scope> # Select your scope, 1 for example +scope> # Select the scope use used in step 2 root_folder_id> # Can be left blank -service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes! +service_account_file> /home/foo/myJSONfile.json # Path to the JSON file you downloaded in step 1. y/n> # Auto config, n ``` ##### 4. Verify that it's working + - `rclone -v --drive-impersonate foo@example.com lsf gdrive:backup` - The arguments do: - `-v` - verbose logging @@ -35066,7 +36327,7 @@ the magic, pretending to be user foo. the folder named backup. Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using `--drive-impersonate`, do this instead: - - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step #1 + - in the gdrive web interface, share your root folder with the user/email of the new Service Account you created/selected at step 1 - use rclone without specifying the `--drive-impersonate` option, like this: `rclone -v lsf gdrive:backup` @@ -35097,13 +36358,14 @@ Choose a number from below, or type in your own value 3 / Rclone Test 3 \ "zzzzzzzzzzzzzzzzzzzz" Enter a Shared Drive ID> 1 --------------------- -[remote] -client_id = -client_secret = -token = {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} -team_drive = xxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: drive +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} +- team_drive: xxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -36691,11 +37953,11 @@ Got code *** are stored in full resolution at original quality. These uploads *** will count towards storage in your Google Account. --------------------- -[remote] -type = google photos -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} --------------------- +Configuration complete. +Options: +- type: google photos +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -37019,7 +38281,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 50. -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 50 @@ -37510,6 +38772,7 @@ The `rclone hashsum` (or `md5sum` or `sha1sum`) command will: ### Other operations +- any time a hash is requested, follow the logic from 1-4 from `hashsum` above - whenever a file is uploaded or downloaded **in full**, capture the stream to calculate all supported hashes on the fly and update database - server-side `move` will update keys of existing cache entries @@ -37581,12 +38844,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = hdfs -namenode = namenode.hadoop:8020 -username = root --------------------- +Configuration complete. +Options: +- type: hdfs +- namenode: namenode.hadoop:8020 +- username: root +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -37828,11 +39091,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = hidrive -token = {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} --------------------- +Configuration complete. +Options: +- type: hidrive +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -38289,10 +39552,11 @@ Choose a number from below, or type in your own value \ "https://example.com" url> https://beta.rclone.org Remote config --------------------- -[remote] -url = https://beta.rclone.org --------------------- +Configuration complete. +Options: +- type: http +- url: https://beta.rclone.org +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -38877,12 +40141,12 @@ Edit advanced config? y) Yes n) No (default) y/n> n --------------------- -[remote] -type = internetarchive -access_key_id = XXXX -secret_access_key = XXXX --------------------- +Configuration complete. +Options: +- type: internetarchive +- access_key_id: XXXX +- secret_access_key: XXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -39205,18 +40469,18 @@ Press Enter for the default (Archive). 2 > Shared 3 > Sync config_mountpoint> 1 --------------------- -[remote] -type = jottacloud -configVersion = 1 -client_id = jottacli -client_secret = -tokenURL = https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token -token = {........} -username = 2940e57271a93d987d6f8a21 -device = Jotta -mountpoint = Archive --------------------- +Configuration complete. +Options: +- type: jottacloud +- configVersion: 1 +- client_id: jottacli +- client_secret: +- tokenURL: https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token +- token: {........} +- username: 2940e57271a93d987d6f8a21 +- device: Jotta +- mountpoint: Archive +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -40066,13 +41330,13 @@ y) Yes n) No y/n> n Remote config --------------------- -[remote] -type = mailru -user = username@mail.ru -pass = *** ENCRYPTED *** -speedup_enable = true --------------------- +Configuration complete. +Options: +- type: mailru +- user: username@mail.ru +- pass: *** ENCRYPTED *** +- speedup_enable: true +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -40459,12 +41723,12 @@ password: Confirm the password: password: Remote config --------------------- -[remote] -type = mega -user = you@example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: mega +- user: you@example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -40747,10 +42011,10 @@ Storage> memory Remote config --------------------- -[remote] -type = memory --------------------- +Configuration complete. +Options: +- type: memory +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -41114,12 +42378,13 @@ key> base64encodedkey== Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = account_name -key = base64encodedkey== -endpoint = --------------------- +Configuration complete. +Options: +- type: azureblob +- account: account_name +- key: base64encodedkey== +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -41363,6 +42628,13 @@ be explicitly specified using exactly one of the `msi_object_id`, If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is set, this is is equivalent to using `env_auth`. +#### Anonymous {#anonymous} + +If you want to access resources with public anonymous access then set +`account` only. You can do this without making an rclone config: + + rclone lsf :azureblob,account=ACCOUNT:CONTAINER + ### Standard options @@ -42770,13 +44042,13 @@ Is that okay? y) Yes n) No y/n> y --------------------- -[remote] -type = onedrive -token = {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} -drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk -drive_type = business --------------------- +Configuration complete. +Options: +- type: onedrive +- token: {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} +- drive_id: b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk +- drive_type: business +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -43839,11 +45111,12 @@ Enter the password: password: Confirm the password: password: --------------------- -[remote] -username = -password = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: opendrive +- username: +- password: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -44886,15 +46159,16 @@ Number of connection retry. Leave blank will use the default value "3". connection_retries> Remote config --------------------- -[remote] -env_auth = false -access_key_id = access_key -secret_access_key = secret_key -endpoint = -zone = pek3a -connection_retries = --------------------- +Configuration complete. +Options: +- type: qingstor +- env_auth: false +- access_key_id: access_key +- secret_access_key: secret_key +- endpoint: +- zone: pek3a +- connection_retries: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -45198,11 +46472,12 @@ api_key> your_api_key Host name of Quatrix account. host> example.quatrix.it --------------------- -[remote] -api_key = your_api_key -host = example.quatrix.it --------------------- +Configuration complete. +Options: +- type: quatrix +- api_key: your_api_key +- host: example.quatrix.it +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -45248,12 +46523,12 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? Edit remote Option api_key. API key for accessing Quatrix account @@ -45263,12 +46538,12 @@ Option host. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it). --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -46141,6 +47416,48 @@ Properties: - Type: bool - Default: false +#### --swift-fetch-until-empty-page + +When paginating, always fetch unless we received an empty page. + +Consider using this option if rclone listings show fewer objects +than expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations +of the Swift API that do not implement pagination as expected. See +also "partial_page_fetch_threshold". + +Properties: + +- Config: fetch_until_empty_page +- Env Var: RCLONE_SWIFT_FETCH_UNTIL_EMPTY_PAGE +- Type: bool +- Default: false + +#### --swift-partial-page-fetch-threshold + +When paginating, fetch if the current page is within this percentage of the limit. + +Consider using this option if rclone listings show fewer objects +than expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations +of the Swift API that do not implement pagination as expected. See +also "fetch_until_empty_page". + +Properties: + +- Config: partial_page_fetch_threshold +- Env Var: RCLONE_SWIFT_PARTIAL_PAGE_FETCH_THRESHOLD +- Type: int +- Default: 0 + #### --swift-chunk-size Above this size files will be chunked. @@ -46363,12 +47680,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} --------------------- +Configuration complete. +Options: +- type: pcloud +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -46980,6 +48298,182 @@ PikPak supports MD5 hash, but sometimes given empty especially for user-uploaded Deleted files will still be visible with `--pikpak-trashed-only` even after the trash emptied. This goes away after few days. +# Pixeldrain + +This is the backend for Pixeldrain's premium filesystem feature. This is not the +same as pixeldrain's free file sharing product. The filesystem requires either a +Pro subscription or the Prepaid plan. [More information on +subscriptions](https://pixeldrain.com/#pro). + +An overview of the filesystem's features and limitations is available in the +[filesystem guide](https://pixeldrain.com/filesystem) on pixeldrain. + +### Usage with account + +To use the personal filesystem you will need a [pixeldrain +account](https://pixeldrain.com/register) and either the Prepaid plan or one of +the Patreon-based subscriptions. After registering and subscribing, your +personal filesystem will be available at this link: https://pixeldrain.com/d/me. + +Go to the [API keys page](https://pixeldrain.com/user/api_keys) on your account +and generate a new API key for rclone. Then run `rclone config` and use the API +key to create a new backend. + +Example: + +``` +No remotes found, make a new one? +n) New remote +d) Delete remote +c) Copy remote +s) Set configuration password +q) Quit config +n/d/c/s/q> n + +Enter name for new remote. +name> pixeldrainfs + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +... +XX / Pixeldrain Filesystem + \ (pixeldrain) +... +Storage> pixeldrain + +Option api_key. +API key for your pixeldrain account. +Found on https://pixeldrain.com/user/api_keys. +Enter a value. Press Enter to leave empty. +api_key> b1bb1e81-9b7b-406b-986a-c9b20be76e15 + +Option directory_id. +Root of the filesystem to use. Set to 'me' to use your personal filesystem. +Set to a shared directory ID to use a shared directory. +Enter a string value. Press Enter for the default (me). +directory_id> + +Edit advanced config? +y) Yes +n) No (default) +y/n> + +Configuration complete. +Options: +- type: pixeldrain +- api_key: b1bb1e81-9b7b-406b-986a-c9b20be76e15 +Keep this "pixeldrainfs" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> + +Current remotes: + +Name Type +==== ==== +pixeldrainfs pixeldrain + +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> q +``` + +### Usage without account + +It is possible to gain read-only access to publicly shared directories through +rclone. For this you only need a directory ID. The directory ID can be found in +the URL of a shared directory, the URL will look like this +`https://pixeldrain.com/d/abcd1234` where `abcd1234` is the directory ID. +Directory IDs in your own filesystem can also be listed with the `lsf` command: + +`rclone lsf Pixeldrain: --dirs-only -Fpi` + +This will print directories in your `Pixeldrain` home directory and their public +IDs. + +Enter this directory ID in the rclone config and you will be able to access the +directory. + + +### Standard options + +Here are the Standard options specific to pixeldrain (Pixeldrain Filesystem). + +#### --pixeldrain-api-key + +API key for your pixeldrain account. +Found on https://pixeldrain.com/user/api_keys. + +Properties: + +- Config: api_key +- Env Var: RCLONE_PIXELDRAIN_API_KEY +- Type: string +- Required: false + +#### --pixeldrain-root-folder-id + +Root of the filesystem to use. + +Set to 'me' to use your personal filesystem. Set to a shared directory ID to use a shared directory. + +Properties: + +- Config: root_folder_id +- Env Var: RCLONE_PIXELDRAIN_ROOT_FOLDER_ID +- Type: string +- Default: "me" + +### Advanced options + +Here are the Advanced options specific to pixeldrain (Pixeldrain Filesystem). + +#### --pixeldrain-api-url + +The API endpoint to connect to. In the vast majority of cases it's fine to leave +this at default. It is only intended to be changed for testing purposes. + +Properties: + +- Config: api_url +- Env Var: RCLONE_PIXELDRAIN_API_URL +- Type: string +- Default: "https://pixeldrain.com/api" + +#### --pixeldrain-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_PIXELDRAIN_DESCRIPTION +- Type: string +- Required: false + +### Metadata + +Pixeldrain supports file modes and creation times. + +Here are the possible system metadata items for the pixeldrain backend. + +| Name | Help | Type | Example | Read Only | +|------|------|------|---------|-----------| +| btime | Time of file birth (creation) | RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 | N | +| mode | File mode | octal, unix style | 755 | N | +| mtime | Time of last modification | RFC 3339 | 2006-01-02T15:04:05.999999999Z07:00 | N | + +See the [metadata](https://rclone.org/docs/#metadata) docs for more info. + + + # premiumize.me Paths are specified as `remote:path` @@ -47026,11 +48520,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = premiumizeme -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} --------------------- +Configuration complete. +Options: +- type: premiumizeme +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -47262,12 +48756,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you@protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you@protonmail.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -47843,12 +49337,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you@protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you@protonmail.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48615,14 +50109,15 @@ y/g/n> n Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config --------------------- -[remote] -host = example.com -user = sftpuser -port = -pass = -key_file = --------------------- +Configuration complete. +Options: +- type: sftp +- host: example.com +- user: sftpuser +- port: +- pass: +- key_file: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48986,7 +50481,15 @@ Properties: Raw PEM-encoded private key. -If specified, will override key_file parameter. +Note that this should be on a single line with line endings replaced with '\n', eg + + key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- + +This will generate the single line correctly: + + awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa + +If specified, it will override the key_file parameter. Properties: @@ -49443,13 +50946,13 @@ Maximum number of SFTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. -If you are doing a sync or copy then make sure concurrency is one more +If you are doing a sync or copy then make sure connections is one more than the sum of `--transfers` and `--checkers`. If you use `--check-first` then it just needs to be one more than the maximum of `--checkers` and `--transfers`. -So for `concurrency 3` you'd use `--checkers 2 --transfers 2 +So for `connections 3` you'd use `--checkers 2 --transfers 2 --check-first` or `--checkers 1 --transfers 1`. @@ -50061,11 +51564,11 @@ Access Grant. Enter a string value. Press Enter for the default (""). access_grant> your-access-grant-received-by-someone-else Remote config --------------------- -[remote] -type = storj -access_grant = your-access-grant-received-by-someone-else --------------------- +Configuration complete. +Options: +- type: storj +- access_grant: your-access-grant-received-by-someone-else +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -50116,14 +51619,14 @@ Encryption Passphrase. To access existing objects enter passphrase used for uplo Enter a string value. Press Enter for the default (""). passphrase> your-human-readable-encryption-passphrase Remote config --------------------- -[remote] -type = storj -satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 -api_key = your-api-key-for-your-storj-project -passphrase = your-human-readable-encryption-passphrase -access_grant = the-access-grant-generated-from-the-api-key-and-passphrase --------------------- +Configuration complete. +Options: +- type: storj +- satellite_address: 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 +- api_key: your-api-key-for-your-storj-project +- passphrase: your-human-readable-encryption-passphrase +- access_grant: the-access-grant-generated-from-the-api-key-and-passphrase +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -50414,11 +51917,11 @@ Remote config Username (email address)> nick@craig-wood.com Your Sugarsync password is only required during setup and will not be stored. password: --------------------- -[remote] -type = sugarsync -refresh_token = https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: sugarsync +- refresh_token: https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -50646,11 +52149,6 @@ remote. See [List of backends that do not support rclone about](https://rclone.org/overview/#optional-features) and [rclone about](https://rclone.org/commands/rclone_about/) -# Tardigrade - -The Tardigrade backend has been renamed to be the [Storj backend](https://rclone.org/storj/). -Old configuration files will continue to work. - # Uloz.to Paths are specified as `remote:path` @@ -51144,11 +52642,11 @@ Cache time of usage and free space (in seconds). This option is only useful when Enter a signed integer. Press Enter for the default ("120"). cache_time> Remote config --------------------- -[remote] -type = union -upstreams = remote1:dir1 remote2:dir2 remote3:dir3 --------------------- +Configuration complete. +Options: +- type: union +- upstreams: remote1:dir1 remote2:dir2 remote3:dir3 +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -51450,15 +52948,15 @@ password: Bearer token instead of user/pass (e.g. a Macaroon) bearer_token> Remote config --------------------- -[remote] -type = webdav -url = https://example.com/remote.php/webdav/ -vendor = nextcloud -user = user -pass = *** ENCRYPTED *** -bearer_token = --------------------- +Configuration complete. +Options: +- type: webdav +- url: https://example.com/remote.php/webdav/ +- vendor: nextcloud +- user: user +- pass: *** ENCRYPTED *** +- bearer_token: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -51672,6 +53170,17 @@ Properties: - Type: bool - Default: false +#### --webdav-unix-socket + +Path to a unix domain socket to dial to, instead of opening a TCP connection directly + +Properties: + +- Config: unix_socket +- Env Var: RCLONE_WEBDAV_UNIX_SOCKET +- Type: string +- Required: false + #### --webdav-description Description of the remote. @@ -51920,12 +53429,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} --------------------- +Configuration complete. +Options: +- type: yandex +- client_id: +- client_secret: +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -52084,6 +53594,17 @@ Properties: - Type: Encoding - Default: Slash,Del,Ctl,InvalidUtf8,Dot +#### --yandex-spoof-ua + +Set the user agent to match an official version of the yandex disk client. May help with upload performance. + +Properties: + +- Config: spoof_ua +- Env Var: RCLONE_YANDEX_SPOOF_UA +- Type: bool +- Default: true + #### --yandex-description Description of the remote. @@ -52176,12 +53697,12 @@ Choose a number from below, or type in your own value 1 / General \ "4u2869d2aa6fca04f4f2f896b6539243b85b1" Enter a Workspace ID> 1 --------------------- -[remote] -type = zoho -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} -root_folder_id = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: zoho +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} +- root_folder_id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -52500,12 +54021,8 @@ This format requires absolute paths and the use of prefix `\\?\`, e.g. `\\?\D:\some\very\long\path`. For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more [below](#long-paths)). - -Note that Windows supports using the same prefix `\\?\` to -specify path to volumes identified by their GUID, e.g. -`\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path`. -This is *not* supported in rclone, due to an [issue](https://github.com/golang/go/issues/39785) -in go. +Using the same prefix `\\?\` it is also possible to specify path to volumes +identified by their GUID, e.g. `\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path`. #### Long paths #### @@ -52886,6 +54403,32 @@ Properties: - Type: bool - Default: false +#### --local-no-clone + +Disable reflink cloning for server-side copies. + +Normally, for local-to-local transfers, rclone will "clone" the file when +possible, and fall back to "copying" only when cloning is not supported. + +Cloning creates a shallow copy (or "reflink") which initially shares blocks with +the original file. Unlike a "hardlink", the two files are independent and +neither will affect the other if subsequently modified. + +Cloning is usually preferable to copying, as it is much faster and is +deduplicated by default (i.e. having two identical files does not consume more +storage than having just one.) However, for use cases where data redundancy is +preferable, --local-no-clone can be used to disable cloning and force "deep" copies. + +Currently, cloning is only supported when using APFS on macOS (support for other +platforms may be added in the future.) + +Properties: + +- Config: no_clone +- Env Var: RCLONE_LOCAL_NO_CLONE +- Type: bool +- Default: false + #### --local-no-preallocate Disable preallocation of disk space for transferred files. @@ -53059,6 +54602,127 @@ Options: # Changelog +## v1.68.0 - 2024-09-08 + +[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0) + +* New backends + * [Files.com](/filescom) (Sam Harrison) + * [Gofile](https://rclone.org/gofile/) (Nick Craig-Wood) + * [Pixeldrain](https://rclone.org/pixeldrain/) (Fornax) +* Changed backends + * [S3](https://rclone.org/s3/) backend updated to use [AWS SDKv2](https://github.com/aws/aws-sdk-go-v2) as v1 is now unsupported. + * The matrix of providers and auth methods is huge and there could be problems with obscure combinations. + * Please report problems in a [new issue](https://github.com/rclone/rclone/issues/new/choose) on Github. +* New commands + * [config encryption](https://rclone.org/commands/rclone_config_encryption/): set, remove and check to manage config file encryption (Nick Craig-Wood) +* New Features + * build + * Update to go1.23 and make go1.21 the minimum required version (Nick Craig-Wood) + * Update all dependencies (Nick Craig-Wood) + * Disable wasm/js build due to [go bug #64856](https://github.com/golang/go/issues/64856) (Nick Craig-Wood) + * Enable custom linting rules with ruleguard via gocritic (albertony) + * Update logging statements to make `--use-json-log` work always (albertony) + * Adding new code quality tests and fixing the fallout (albertony) + * config + * Internal config re-organised to be more consistent and make it available from the rc (Nick Craig-Wood) + * Avoid remotes with empty names from the environment (albertony) + * Make listing of remotes more consistent (albertony) + * Make getting config values more consistent (albertony) + * Use `--password-command` to set config file password if supplied (Nick Craig-Wood) + * doc fixes (albertony, crystalstall, David Seifert, Eng Zer Jun, Ernie Hershey, Florian Klink, John Oxley, kapitainsky, Mathieu Moreau, Nick Craig-Wood, nipil, Pétr Bozsó, Russ Bubley, Sam Harrison, Thearas, URenko, Will Miles, yuval-cloudinary) + * fs: Allow semicolons as well as spaces in `--bwlimit` timetable parsing (Kyle Reynolds) + * help + * Global flags help command now takes glob filter (albertony) + * Make help command output less distracting (albertony) + * lib/encoder: Add Raw encoding for use where no encoding at all is required, eg `--local-encoding Raw` (URenko) + * listremotes: Added options for filtering, ordering and json output (albertony) + * nfsmount + * Make the `--sudo` flag work for umount as well as mount (Nick Craig-Wood) + * Add `-o tcp` option to NFS mount options to fix mounting under Linux (Nick Craig-Wood) + * operations: copy: generate stable partial suffix (Georg Welzel) + * rc + * Add [options/info](https://rclone.org/rc/#options-info) call to enumerate options (Nick Craig-Wood) + * Add option blocks parameter to [options/get](https://rclone.org/rc/#options-get) and [options/info](/rc/#options-info) (Nick Craig-Wood) + * Add [vfs/queue](https://rclone.org/rc/#vfs-queue) to show the status of the upload queue (Nick Craig-Wood) + * Add [vfs/queue-set-expiry](https://rclone.org/rc/#vfs-queue-set-expiry) to adjust expiry of items in the VFS queue (Nick Craig-Wood) + * Add `--unix-socket` option to `rc` command (Florian Klink) + * Prevent unmount rc command from sending a `STOPPING=1` sd-notify message (AThePeanut4) + * rcserver: Implement [prometheus metrics](https://rclone.org/docs/#metrics) on a dedicated port (Oleg Kunitsyn) + * serve dlna + * Also look at "Subs" subdirectory (Florian Klink) + * Don't swallow `video.{idx,sub}` (Florian Klink) + * Set more correct mime type (Florian Klink) + * serve nfs + * Implement on disk cache for file handles selected with `--nfs-cache-type` (Nick Craig-Wood) + * Add tracing to filesystem calls (Nick Craig-Wood) + * Mask unimplemented error from chmod (Nick Craig-Wood) + * Unify the nfs library logging with rclone's logging better (Nick Craig-Wood) + * Fix incorrect user id and group id exported to NFS (Nick Craig-Wood) + * serve s3 + * Implement `--auth-proxy` (Sawjan Gurung) + * Update to AWS SDKv2 by updating `github.com/rclone/gofakes3` (Nick Craig-Wood) +* Bug Fixes + * bisync: Fix sync time problems with backends that round time (eg Dropbox) (nielash) + * serve dlna: Fix panic: invalid argument to Int63n (Nick Craig-Wood) +* VFS + * Add [--vfs-read-chunk-streams](https://rclone.org/commands/rclone_mount/#vfs-read-chunk-streams-0-1) to parallel read chunks from files (Nick Craig-Wood) + * This can increase mount performance on high bandwidth or large latency links + * Fix cache encoding with special characters (URenko) +* Local + * Fix encoding of root path fix (URenko) + * Add server-side copy (using clone) with xattrs on macOS (nielash) + * `--local-no-clone` flag to disable cloning for server-side copies (nielash) + * Support setting custom `--metadata` during server-side Copy (nielash) +* Azure Blob + * Allow anonymous access for public resources (Nick Craig-Wood) +* B2 + * Include custom upload headers in large file info (Pat Patterson) +* Drive + * Fix copying Google Docs to a backend which only supports SHA1 (Nick Craig-Wood) +* Fichier + * Fix detection of Flood Detected error (Nick Craig-Wood) + * Fix server side move (Nick Craig-Wood) +* HTTP + * Reload client certificates on expiry (Saleh Dindar) + * Support listening on passed FDs (Florian Klink) +* Jottacloud + * Fix setting of metadata on server side move (albertony) +* Onedrive + * Fix nil pointer error when uploading small files (Nick Craig-Wood) +* Pcloud + * Implement `SetModTime` (Georg Welzel) + * Implement `OpenWriterAt` feature to enable multipart uploads (Georg Welzel) +* Pikpak + * Improve data consistency by ensuring async tasks complete (wiserain) + * Implement custom hash to replace wrong sha1 (wiserain) + * Fix error with `copyto` command (wiserain) + * Optimize file move by removing unnecessary `readMetaData()` call (wiserain) + * Non-buffered hash calculation for local source files (wiserain) + * Optimize upload by pre-fetching gcid from API (wiserain) + * Correct file transfer progress for uploads by hash (wiserain) + * Update to using AWS SDK v2 (wiserain) +* S3 + * Update to using AWS SDK v2 (Nick Craig-Wood) + * Add `--s3-sdk-log-mode` to control SDKv2 debugging (Nick Craig-Wood) + * Fix incorrect region for Magalu provider (Filipe Herculano) + * Allow restoring from intelligent-tiering storage class (Pawel Palucha) +* SFTP + * Use `uint32` for mtime to save memory (Tomasz Melcer) + * Ignore useless errors when closing the connection pool (Nick Craig-Wood) + * Support listening on passed FDs (Florian Klink) +* Swift + * Add workarounds for bad listings in Ceph RGW (Paul Collins) + * Add total/free space info in `about` command. (fsantagostinobietti) +* Ulozto + * Fix upload of > 2GB files on 32 bit platforms (Tobias Markus) +* WebDAV + * Add `--webdav-unix-socket-path` to connect to a unix socket (Florian Klink) +* Yandex + * Implement custom user agent to help with upload speeds (Sebastian Bünger) +* Zoho + * Fix inefficiencies uploading with new API to avoid throttling (Nick Craig-Wood) + ## v1.67.0 - 2024-06-14 [See commits](https://github.com/rclone/rclone/compare/v1.66.0...v1.67.0) @@ -58627,9 +60291,11 @@ value, say `export GOGC=20`. This will make the garbage collector work harder, reducing memory size at the expense of CPU usage. The most common cause of rclone using lots of memory is a single -directory with thousands or millions of files in. Rclone has to load -this entirely into memory as rclone objects. Each rclone object takes -0.5k-1k of memory. +directory with millions of files in. Rclone has to load this entirely +into memory as rclone objects. Each rclone object takes 0.5k-1k of +memory. There is +[a workaround for this](https://github.com/rclone/rclone/wiki/Big-syncs-with-millions-of-files) +which involves a bit of scripting. ### Rclone changes fullwidth Unicode punctuation marks in file names @@ -59538,6 +61204,30 @@ put them back in again.` >}} * Michał Dzienisiewicz * Florian Klink * Bill Fraser + * Thearas + * Filipe Herculano + * Russ Bubley + * Paul Collins + * Tomasz Melcer + * itsHenry <2671230065@qq.com> + * Ke Wang + * AThePeanut4 <49614525+AThePeanut4@users.noreply.github.com> + * Tobias Markus + * Ernie Hershey + * Will Miles + * David Seifert <16636962+SoapGentoo@users.noreply.github.com> + * Fornax + * Sam Harrison + * Péter Bozsó <3806723+peterbozso@users.noreply.github.com> + * Georg Welzel + * John Oxley + * Pawel Palucha + * crystalstall + * nipil + * yuval-cloudinary <46710068+yuval-cloudinary@users.noreply.github.com> + * Mathieu Moreau + * fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com> + * Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com> # Contact the rclone project diff --git a/MANUAL.txt b/MANUAL.txt index a401417af..0c2d0d066 100644 --- a/MANUAL.txt +++ b/MANUAL.txt @@ -1,6 +1,6 @@ rclone(1) User Manual Nick Craig-Wood -Jun 14, 2024 +Sep 08, 2024 Rclone syncs your files to cloud storage @@ -108,7 +108,9 @@ S3, that work out of the box.) - Dropbox - Enterprise File Fabric - Fastmail Files +- Files.com - FTP +- Gofile - Google Cloud Storage - Google Drive - Google Photos @@ -147,6 +149,7 @@ S3, that work out of the box.) - pCloud - Petabox - PikPak +- Pixeldrain - premiumize.me - put.io - Proton Drive @@ -617,6 +620,12 @@ resulting executable will be in your GOPATH bin folder go install github.com/rclone/rclone@latest +In some situations, rclone executable size might be too big for +deployment in very restricted environments when all backends with large +SDKs are included. To limit binary size unused backends can be commented +out in backends/all/all.go and unused commands in cmd/all/all.go before +building with go build or make + Ansible installation This can be done with Stefan Weichinger's ansible role. @@ -835,12 +844,15 @@ See the following for detailed instructions for - Digi Storage - Dropbox - Enterprise File Fabric +- Files.com - FTP +- Gofile - Google Cloud Storage - Google Drive - Google Photos - Hasher - to handle checksums for other remotes - HDFS +- Hetzner Storage Box - HiDrive - HTTP - Internet Archive @@ -859,11 +871,13 @@ See the following for detailed instructions for - Oracle Object Storage - Pcloud - PikPak +- Pixeldrain - premiumize.me - put.io - Proton Drive - QingStor - Quatrix by Maytech +- rsync.net - Seafile - SFTP - Sia @@ -937,7 +951,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. - rclone config create - Create a new remote with name, type and @@ -946,6 +960,8 @@ SEE ALSO - rclone config disconnect - Disconnects user from remote - rclone config dump - Dump the config file as JSON. - rclone config edit - Enter an interactive configuration session. +- rclone config encryption - set, remove and check the encryption for + the config file - rclone config file - Show path of configuration file in use. - rclone config password - Update password in an existing remote. - rclone config paths - Show paths used for configuration, cache, temp @@ -1037,13 +1053,16 @@ Options --create-empty-src-dirs Create empty source dirs on destination after copy -h, --help help for copy +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1076,7 +1095,7 @@ Flags for anything which can Copy a file. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -1084,7 +1103,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1111,14 +1130,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1237,13 +1254,16 @@ Options -s, --separator string Separator for the items in the format (default ";") -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1276,7 +1296,7 @@ Flags for anything which can Copy a file. Sync Options -Flags just used for rclone sync. +Flags used for sync commands --backup-dir string Make backups into hierarchy based in DIR --delete-after When synchronizing, delete files on destination after transferring (default) @@ -1293,7 +1313,7 @@ Flags just used for rclone sync. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -1301,7 +1321,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1328,14 +1348,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1387,13 +1405,16 @@ Options --delete-empty-src-dirs Delete empty source dirs after move -h, --help help for move +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1426,7 +1447,7 @@ Flags for anything which can Copy a file. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -1434,7 +1455,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1461,14 +1482,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1512,9 +1531,12 @@ Options -h, --help help for delete --rmdirs rmdirs removes empty directories but leaves root intact +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -1522,7 +1544,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1549,14 +1571,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1580,17 +1600,18 @@ Options -h, --help help for purge +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1604,17 +1625,18 @@ Options -h, --help help for mkdir +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1636,17 +1658,18 @@ Options -h, --help help for rmdir +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1718,15 +1741,18 @@ Options --missing-on-src string Report all files missing from the source to this file --one-way Check one way only, source files must exist on remote +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Check Options -Flags used for rclone check. +Flags used for check commands --max-backlog int Maximum number of objects in sync or check backlog (default 10000) Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1753,14 +1779,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1810,9 +1834,12 @@ Options -h, --help help for ls +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1839,14 +1866,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1907,9 +1932,12 @@ Options -h, --help help for lsd -R, --recursive Recurse into the listing +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -1936,14 +1964,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -1994,9 +2020,12 @@ Options -h, --help help for lsl +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2023,14 +2052,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2067,9 +2094,12 @@ Options -h, --help help for md5sum --output-file string Output hashsums to a file rather than the terminal +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2096,14 +2126,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2143,9 +2171,12 @@ Options -h, --help help for sha1sum --output-file string Output hashsums to a file rather than the terminal +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2172,14 +2203,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2211,9 +2240,12 @@ Options -h, --help help for size --json Format output as JSON +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2240,14 +2272,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2302,7 +2332,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2321,17 +2351,18 @@ Options -h, --help help for cleanup +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2465,17 +2496,18 @@ Options --dedupe-mode string Dedupe mode interactive|skip|first|newest|oldest|largest|smallest|rename (default "interactive") -h, --help help for dedupe +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2485,8 +2517,8 @@ Get quota information from the remote. Synopsis -rclone about prints quota information about a remote to standard output. -The output is typically used, free, quota and trash contents. +Prints quota information about a remote to standard output. The output +is typically used, free, quota and trash contents. E.g. Typical output from rclone about remote: is: @@ -2541,7 +2573,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2571,7 +2603,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2615,17 +2647,18 @@ Options --json Always output in JSON format -o, --option stringArray Option in the form name=value or name +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2680,13 +2713,16 @@ Options --slow-hash-sync-only Ignore slow checksums for listings and deltas, but still consider them during sync calls. --workdir string Use custom working dir - useful for testing. (default: {WORKDIR}) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -2719,7 +2755,7 @@ Flags for anything which can Copy a file. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -2727,7 +2763,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2752,9 +2788,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2764,7 +2798,7 @@ Concatenates any files and sends them to stdout. Synopsis -rclone cat sends any files to standard output. +Sends any files to standard output. You can use it like this to output a single file @@ -2807,9 +2841,12 @@ Options --separator string Separator to use between objects when printing multiple files --tail int Only print the last N characters +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2836,14 +2873,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2910,9 +2945,12 @@ Options --missing-on-src string Report all files missing from the source to this file --one-way Check one way only, source files must exist on remote +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -2939,14 +2977,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -2965,7 +3001,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. - rclone completion bash - Output bash completion script for rclone. @@ -2984,7 +3020,7 @@ Generates a bash shell autocompletion script for rclone. By default, when run without any arguments, - rclone genautocomplete bash + rclone completion bash the generated script will be written to @@ -3016,7 +3052,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone completion - Output completion script for a given shell. @@ -3031,7 +3067,7 @@ Generates a fish autocompletion script for rclone. This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete fish + sudo rclone completion fish Logout and login again to use the autocompletion scripts, or source them directly @@ -3050,7 +3086,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone completion - Output completion script for a given shell. @@ -3080,7 +3116,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone completion - Output completion script for a given shell. @@ -3095,7 +3131,7 @@ Generates a zsh autocompletion script for rclone. This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete zsh + sudo rclone completion zsh Logout and login again to use the autocompletion scripts, or source them directly @@ -3114,7 +3150,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone completion - Output completion script for a given shell. @@ -3239,7 +3275,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3255,7 +3291,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3279,7 +3315,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3295,7 +3331,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3317,10 +3353,131 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. +rclone config encryption + +set, remove and check the encryption for the config file + +Synopsis + +This command sets, clears and checks the encryption for the config file +using the subcommands below. + +Options + + -h, --help help for encryption + +See the global flags page for global options not listed here. + +See Also + +- rclone config - Enter an interactive configuration session. +- rclone config encryption check - Check that the config file is + encrypted +- rclone config encryption remove - Remove the config file encryption + password +- rclone config encryption set - Set or change the config file + encryption password + +rclone config encryption check + +Check that the config file is encrypted + +Synopsis + +This checks the config file is encrypted and that you can decrypt it. + +It will attempt to decrypt the config using the password you supply. + +If decryption fails it will return a non-zero exit code if using +--password-command, otherwise it will prompt again for the password. + +If the config file is not encrypted it will return a non zero exit code. + + rclone config encryption check [flags] + +Options + + -h, --help help for check + +See the global flags page for global options not listed here. + +See Also + +- rclone config encryption - set, remove and check the encryption for + the config file + +rclone config encryption remove + +Remove the config file encryption password + +Synopsis + +Remove the config file encryption password + +This removes the config file encryption, returning it to un-encrypted. + +If --password-command is in use, this will be called to supply the old +config password. + +If the config was not encrypted then no error will be returned and this +command will do nothing. + + rclone config encryption remove [flags] + +Options + + -h, --help help for remove + +See the global flags page for global options not listed here. + +See Also + +- rclone config encryption - set, remove and check the encryption for + the config file + +rclone config encryption set + +Set or change the config file encryption password + +Synopsis + +This command sets or changes the config file encryption password. + +If there was no config password set then it sets a new one, otherwise it +changes the existing config password. + +Note that if you are changing an encryption password using +--password-command then this will be called once to decrypt the config +using the old password and then again to read the new password to +re-encrypt the config. + +When --password-command is called to change the password then the +environment variable RCLONE_PASSWORD_CHANGE=1 will be set. So if +changing passwords programatically you can use the environment variable +to distinguish which password you must supply. + +Alternatively you can remove the password first (with +rclone config encryption remove), then set it again with this command +which may be easier if you don't mind the unecrypted config file being +on the disk briefly. + + rclone config encryption set [flags] + +Options + + -h, --help help for set + +See the global flags page for global options not listed here. + +See Also + +- rclone config encryption - set, remove and check the encryption for + the config file + rclone config file Show path of configuration file in use. @@ -3333,7 +3490,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3363,7 +3520,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3379,7 +3536,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3395,7 +3552,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3419,7 +3576,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3449,7 +3606,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3465,7 +3622,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3481,7 +3638,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3606,7 +3763,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3628,7 +3785,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone config - Enter an interactive configuration session. @@ -3672,13 +3829,16 @@ Options -h, --help help for copyto +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -3711,7 +3871,7 @@ Flags for anything which can Copy a file. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -3719,7 +3879,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -3746,14 +3906,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -3804,17 +3962,18 @@ Options -p, --print-filename Print the resulting name from --auto-filename --stdout Write the output to stdout rather than a file +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -3824,9 +3983,9 @@ Cryptcheck checks the integrity of an encrypted remote. Synopsis -rclone cryptcheck checks a remote against a crypted remote. This is the -equivalent of running rclone check, but able to check the checksums of -the encrypted remote. +Checks a remote against a crypted remote. This is the equivalent of +running rclone check, but able to check the checksums of the encrypted +remote. For it to work the underlying remote of the cryptedremote must support some kind of checksum. @@ -3889,15 +4048,18 @@ Options --missing-on-src string Report all files missing from the source to this file --one-way Check one way only, source files must exist on remote +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Check Options -Flags used for rclone check. +Flags used for check commands --max-backlog int Maximum number of objects in sync or check backlog (default 10000) Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -3924,14 +4086,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -3941,8 +4101,8 @@ Cryptdecode returns unencrypted file names. Synopsis -rclone cryptdecode returns unencrypted file names when provided with a -list of encrypted file names. List limit is 10 items. +Returns unencrypted file names when provided with a list of encrypted +file names. List limit is 10 items. If you supply the --reverse flag, it will return encrypted file names. @@ -3965,7 +4125,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -3985,147 +4145,21 @@ Options -h, --help help for deletefile +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. -rclone genautocomplete - -Output completion script for a given shell. - -Synopsis - -Generates a shell completion script for rclone. Run with --help to list -the supported shells. - -Options - - -h, --help help for genautocomplete - -See the global flags page for global options not listed here. - -SEE ALSO - -- rclone - Show help for rclone commands, flags and backends. -- rclone genautocomplete bash - Output bash completion script for - rclone. -- rclone genautocomplete fish - Output fish completion script for - rclone. -- rclone genautocomplete zsh - Output zsh completion script for - rclone. - -rclone genautocomplete bash - -Output bash completion script for rclone. - -Synopsis - -Generates a bash shell autocompletion script for rclone. - -This writes to /etc/bash_completion.d/rclone by default so will probably -need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete bash - -Logout and login again to use the autocompletion scripts, or source them -directly - - . /etc/bash_completion - -If you supply a command line argument the script will be written there. - -If output_file is "-", then the output will be written to stdout. - - rclone genautocomplete bash [output_file] [flags] - -Options - - -h, --help help for bash - -See the global flags page for global options not listed here. - -SEE ALSO - -- rclone genautocomplete - Output completion script for a given shell. - -rclone genautocomplete fish - -Output fish completion script for rclone. - -Synopsis - -Generates a fish autocompletion script for rclone. - -This writes to /etc/fish/completions/rclone.fish by default so will -probably need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete fish - -Logout and login again to use the autocompletion scripts, or source them -directly - - . /etc/fish/completions/rclone.fish - -If you supply a command line argument the script will be written there. - -If output_file is "-", then the output will be written to stdout. - - rclone genautocomplete fish [output_file] [flags] - -Options - - -h, --help help for fish - -See the global flags page for global options not listed here. - -SEE ALSO - -- rclone genautocomplete - Output completion script for a given shell. - -rclone genautocomplete zsh - -Output zsh completion script for rclone. - -Synopsis - -Generates a zsh autocompletion script for rclone. - -This writes to /usr/share/zsh/vendor-completions/_rclone by default so -will probably need to be run with sudo or as root, e.g. - - sudo rclone genautocomplete zsh - -Logout and login again to use the autocompletion scripts, or source them -directly - - autoload -U compinit && compinit - -If you supply a command line argument the script will be written there. - -If output_file is "-", then the output will be written to stdout. - - rclone genautocomplete zsh [output_file] [flags] - -Options - - -h, --help help for zsh - -See the global flags page for global options not listed here. - -SEE ALSO - -- rclone genautocomplete - Output completion script for a given shell. - rclone gendocs Output markdown docs for rclone to the directory supplied. @@ -4144,7 +4178,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4217,7 +4251,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4271,9 +4305,12 @@ Options -h, --help help for hashsum --output-file string Output hashsums to a file rather than the terminal +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -4300,14 +4337,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4317,8 +4352,7 @@ Generate public link to file/folder. Synopsis -rclone link will create, retrieve or remove a public link to the given -file or folder. +Create, retrieve or remove a public link to the given file or folder. rclone link remote:path/to/file rclone link remote:path/to/folder/ @@ -4349,7 +4383,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4360,21 +4394,35 @@ variables. Synopsis -rclone listremotes lists all the available remotes from the config file. +Lists all the available remotes from the config file, or the remotes +matching an optional filter. -When used with the --long flag it lists the types and the descriptions -too. +Prints the result in human-readable format by default, and as a simple +list of remote names, or if used with flag --long a tabular format +including the remote names, types and descriptions. Using flag --json +produces machine-readable output instead, which always includes all +attributes - including the source (file or environment). - rclone listremotes [flags] +Result can be filtered by a filter argument which applies to all +attributes, and/or filter flags specific for each attribute. The values +must be specified according to regular rclone filtering pattern syntax. + + rclone listremotes [] [flags] Options - -h, --help help for listremotes - --long Show the type and the description as well as names + --description string Filter remotes by description + -h, --help help for listremotes + --json Format output as JSON + --long Show type and description in addition to name + --name string Filter remotes by name + --order-by string Instructions on how to order the result, e.g. 'type,name=descending' + --source string Filter remotes by source, e.g. 'file' or 'environment' + --type string Filter remotes by type See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4530,9 +4578,12 @@ Options -s, --separator string Separator for the items in the format (default ";") -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -4559,14 +4610,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -4578,7 +4627,7 @@ Synopsis List directories and objects in the path in JSON format. -The output is an array of Items, where each Item looks like this +The output is an array of Items, where each Item looks like this: { "Hashes" : { @@ -4600,34 +4649,46 @@ The output is an array of Items, where each Item looks like this "Tier" : "hot", } -If --hash is not specified, the Hashes property will be omitted. The -types of hash can be specified with the --hash-type parameter (which may -be repeated). If --hash-type is set then it implies --hash. +The exact set of properties included depends on the backend: -If --no-modtime is specified then ModTime will be blank. This can speed -things up on remotes where reading the ModTime takes an extra request -(e.g. s3, swift). +- The property IsBucket will only be included for bucket-based + remotes, and only for directories that are buckets. It will always + be omitted when value is not true. +- Properties Encrypted and EncryptedPath will only be included for + encrypted remotes, and (as mentioned below) only if the --encrypted + option is set. -If --no-mimetype is specified then MimeType will be blank. This can -speed things up on remotes where reading the MimeType takes an extra -request (e.g. s3, swift). +Different options may also affect which properties are included: -If --encrypted is not specified the Encrypted will be omitted. +- If --hash is not specified, the Hashes property will be omitted. The + types of hash can be specified with the --hash-type parameter (which + may be repeated). If --hash-type is set then it implies --hash. +- If --no-modtime is specified then ModTime will be blank. This can + speed things up on remotes where reading the ModTime takes an extra + request (e.g. s3, swift). +- If --no-mimetype is specified then MimeType will be blank. This can + speed things up on remotes where reading the MimeType takes an extra + request (e.g. s3, swift). +- If --encrypted is not specified the Encrypted and EncryptedPath + properties will be omitted - even for encrypted remotes. +- If --metadata is set then an additional Metadata property will be + returned. This will have metadata in rclone standard format as a + JSON object. -If --dirs-only is not specified files in addition to directories are -returned +The default is to list directories and files/objects, but this can be +changed with the following options: -If --files-only is not specified directories in addition to the files -will be returned. +- If --dirs-only is specified then directories will be returned only, + no files/objects. +- If --files-only is specified then files will be returned only, no + directories. -If --metadata is set then an additional Metadata key will be returned. -This will have metadata in rclone standard format as a JSON object. - -if --stat is set then a single JSON blob will be returned about the item -pointed to. This will return an error if the item isn't found. However -on bucket based backends (like s3, gcs, b2, azureblob etc) if the item -isn't found it will return an empty directory as it isn't possible to -tell empty directories from missing directories there. +If --stat is set then the the output is not an array of items, but +instead a single JSON blob will be returned about the item pointed to. +This will return an error if the item isn't found, however on bucket +based backends (like s3, gcs, b2, azureblob etc) if the item isn't found +it will return an empty directory, as it isn't possible to tell empty +directories from missing directories there. The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the @@ -4635,9 +4696,6 @@ Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without --recursive the Path will always be the same as Name. -If the directory is a bucket in a bucket-based backend, then "IsBucket" -will be set to true. This key won't be present unless it is "true". - The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the @@ -4647,7 +4705,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it -can be processed line by line as each item is written one to a line. +can be processed line by line as each item is written on individual +lines (except with --stat). Any of the filtering options can be applied to this command. @@ -4689,9 +4748,12 @@ Options -R, --recursive Recurse into the listing --stat Just return the info for the pointed to file +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -4718,14 +4780,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -5425,6 +5485,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -5441,6 +5507,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -5568,9 +5658,9 @@ Options --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for mount @@ -5586,7 +5676,7 @@ Options --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -5599,6 +5689,7 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -5607,9 +5698,12 @@ Options --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -5634,9 +5728,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -5683,13 +5775,16 @@ Options -h, --help help for moveto +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -5722,7 +5817,7 @@ Flags for anything which can Copy a file. Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -5730,7 +5825,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -5757,14 +5852,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -5823,7 +5916,8 @@ flags have the following meaning: ! means an error occurred while reading this directory This an homage to the ncdu tool but for rclone remotes. It is missing -lots of features at the moment but is useful as it stands. +lots of features at the moment but is useful as it stands. Unlike ncdu +it does not show excluded files. Note that it might take some time to delete big files/directories. The UI won't respond in the meantime since the deletion is done @@ -5838,9 +5932,12 @@ Options -h, --help help for ncdu +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -5867,14 +5964,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -6574,6 +6669,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -6590,6 +6691,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -6718,16 +6843,18 @@ Options --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfsmount --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki) --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset) --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only) + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files @@ -6736,9 +6863,9 @@ Options -o, --option stringArray Option for libfuse/WinFsp (repeat if required) --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --sudo Use sudo to run the mount command as root. + --sudo Use sudo to run the mount/umount commands as root. --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -6751,6 +6878,7 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -6759,9 +6887,12 @@ Options --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -6786,9 +6917,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -6828,7 +6957,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -6848,6 +6977,14 @@ A username and password can be passed in with --user and --pass. Note that --rc-addr, --rc-user, --rc-pass will be read also for --url, --user, --pass. +The --unix-socket flag can be used to connect over a unix socket like +this + + # start server on /tmp/my.socket + rclone rcd --rc-addr unix:///tmp/my.socket + # Connect to it + rclone rc --unix-socket /tmp/my.socket core/stats + Arguments should be passed in as parameter=value. The result will be returned as a JSON object by default. @@ -6890,19 +7027,20 @@ Use rclone rc to see a list of all possible commands. Options - -a, --arg stringArray Argument placed in the "arg" array - -h, --help help for rc - --json string Input JSON - use instead of key=value args - --loopback If set connect to this rclone instance not via HTTP - --no-output If set, don't output the JSON result - -o, --opt stringArray Option in the form name=value or name placed in the "opt" array - --pass string Password to use to connect to rclone remote control - --url string URL to connect to rclone remote control (default "http://localhost:5572/") - --user string Username to use to rclone remote control + -a, --arg stringArray Argument placed in the "arg" array + -h, --help help for rc + --json string Input JSON - use instead of key=value args + --loopback If set connect to this rclone instance not via HTTP + --no-output If set, don't output the JSON result + -o, --opt stringArray Option in the form name=value or name placed in the "opt" array + --pass string Password to use to connect to rclone remote control + --unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --url string URL to connect to rclone remote control (default "http://localhost:5572/") + --user string Username to use to rclone remote control See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -6912,8 +7050,7 @@ Copies standard input to file on remote. Synopsis -rclone rcat reads from standard input (stdin) and copies it to a single -remote file. +Reads from standard input (stdin) and copies it to a single remote file. echo "hello world" | rclone rcat remote:path/to/file ffmpeg - | rclone rcat remote:path/to/file @@ -6950,17 +7087,18 @@ Options -h, --help help for rcat --size int File size hint to preallocate (default -1) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -6996,6 +7134,8 @@ the authentication - this is expected to be done with file system permissions. --rc-addr may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. --rc-server-read-timeout and --rc-server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for @@ -7027,7 +7167,23 @@ client certificate authority certificate. --rc-min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). -Template +Socket activation + +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --rc-addr`). + +This allows rclone to be a socket-activated service. It can be +configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 +over TCP. ### Template --rc-template allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to @@ -7134,17 +7290,20 @@ Options -h, --help help for rcd +Options shared with other commands are described next. See the global +flags page for global options not listed here. + RC Options -Flags to control the Remote Control API. +Flags to control the Remote Control API --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -7168,9 +7327,7 @@ Flags to control the Remote Control API. --rc-web-gui-no-open-browser Don't open the browser automatically --rc-web-gui-update Check and update to latest version of web gui -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -7204,17 +7361,18 @@ Options -h, --help help for rmdirs --leave-root Do not remove root directory if empty +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode -v, --verbose count Print lots more stuff (repeat for more) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -7287,7 +7445,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -7312,7 +7470,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. - rclone serve dlna - Serve remote:path over DLNA @@ -7343,6 +7501,10 @@ based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly. +Rclone will add external subtitle files (.srt) to videos if they have +the same filename as the video file itself (except the extension), +either in the same directory as the video, or in a "Subs" subdirectory. + Server options Use --addr to specify which IP address and port the server should listen @@ -7580,6 +7742,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -7596,6 +7764,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -7714,8 +7906,8 @@ Options --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") --announce-interval Duration The interval between SSDP announcements (default 12m0s) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for dlna --interface stringArray The interface to use for SSDP (repeat as necessary) @@ -7727,7 +7919,7 @@ Options --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -7740,15 +7932,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -7773,9 +7969,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -8049,6 +8243,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -8065,6 +8265,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -8193,9 +8417,9 @@ Options --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --forget-state Skip restoring previous state --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -8215,7 +8439,7 @@ Options --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -8228,6 +8452,7 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -8236,9 +8461,12 @@ Options --volname string Set the volume name (supported on Windows and OSX only) --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -8263,9 +8491,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -8522,6 +8748,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -8538,6 +8770,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -8728,8 +8984,8 @@ Options --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for ftp --key string TLS PEM Private key @@ -8742,7 +8998,7 @@ Options --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication (default "anonymous") --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -8756,15 +9012,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -8789,9 +9049,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -8827,7 +9085,9 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. ---addr may be repeated to listen on multiple IPs/ports/sockets. +--addr may be repeated to listen on multiple IPs/ports/sockets. Socket +activation, described further below, can also be used to accomplish the +same. --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a @@ -8859,7 +9119,23 @@ authority certificate. --min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). -Template +Socket activation + +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr`). + +This allows rclone to be a socket-activated service. It can be +configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 +over TCP. ### Template --template allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to @@ -9186,6 +9462,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -9202,6 +9484,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -9388,15 +9694,15 @@ that rclone supports. Options - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done @@ -9415,7 +9721,7 @@ Options --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -9429,15 +9735,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -9462,9 +9772,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -9476,41 +9784,66 @@ Synopsis Create an NFS server that serves the given remote over the network. -The primary purpose for this command is to enable mount command on +This implements an NFSv3 server to serve any rclone remote via NFS. + +The primary purpose for this command is to enable the mount command on recent macOS versions where installing FUSE is very cumbersome. -Since this is running on NFSv3, no authentication method is available. -Any client will be able to access the data. To limit access, you can use -serve NFS on loopback address and rely on secure tunnels (such as SSH). -For this reason, by default, a random TCP port is chosen and loopback -interface is used for the listening address; meaning that it is only -available to the local machine. If you want other machines to access the -NFS mount over local network, you need to specify the listening address -and port using --addr flag. +This server does not implement any authentication so any client will be +able to access the data. To limit access, you can use serve nfs on the +loopback address or rely on secure tunnels (such as SSH) or use +firewalling. -Modifying files through NFS protocol requires VFS caching. Usually you -will need to specify --vfs-cache-mode in order to be able to write to -the mountpoint (full is recommended). If you don't specify VFS cache -mode, the mount will be read-only. Note also that ---nfs-cache-handle-limit controls the maximum number of cached file +For this reason, by default, a random TCP port is chosen and the +loopback interface is used for the listening address by default; meaning +that it is only available to the local machine. If you want other +machines to access the NFS mount over local network, you need to specify +the listening address and port using the --addr flag. + +Modifying files through the NFS protocol requires VFS caching. Usually +you will need to specify --vfs-cache-mode in order to be able to write +to the mountpoint (full is recommended). If you don't specify VFS cache +mode, the mount will be read-only. + +--nfs-cache-type controls the type of the NFS handle cache. By default +this is memory where new handles will be randomly allocated when needed. +These are stored in memory. If the server is restarted the handle cache +will be lost and connected NFS clients will get stale handle errors. + +--nfs-cache-type disk uses an on disk NFS handle cache. Rclone hashes +the path of the object and stores it in a file named after the hash. +These hashes are stored on disk the directory controlled by --cache-dir +or the exact directory may be specified with --nfs-cache-dir. Using this +means that the NFS server can be restarted at will without affecting the +connected clients. + +--nfs-cache-type symlink is similar to --nfs-cache-type disk in that it +uses an on disk cache, but the cache entries are held as symlinks. +Rclone will use the handle of the underlying file as the NFS handle +which improves performance. This sort of cache can't be backed up and +restored as the underlying handles will change. This is Linux only. + +--nfs-cache-handle-limit controls the maximum number of cached NFS handles stored by the caching handler. This should not be set too low or you may experience errors when trying to access files. The default is 1000000, but consider lowering this limit if the server's system -resource usage causes problems. +resource usage causes problems. This is only used by the memory type +cache. To serve NFS over the network use following command: rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full -We specify a specific port that we can use in the mount command: +This specifies a port that can be used in the mount command. To mount +the server under Linux/macOS, use the following command: -To mount the server under Linux/macOS, use the following command: + mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint - mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint +Where $PORT is the same port number used in the serve nfs command and +$HOSTNAME is the network address of the machine that serve nfs was run +on. -Where $PORT is the same port number we used in the serve nfs command. - -This feature is only available on Unix platforms. +This command is only available on Unix platforms. VFS - Virtual File System @@ -9738,6 +10071,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -9754,6 +10093,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -9871,18 +10234,20 @@ Options --addr string IPaddress:Port or :Port to bind server to --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfs + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -9895,15 +10260,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -9928,9 +10297,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -10034,7 +10401,9 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. ---addr may be repeated to listen on multiple IPs/ports/sockets. +--addr may be repeated to listen on multiple IPs/ports/sockets. Socket +activation, described further below, can also be used to accomplish the +same. --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a @@ -10066,7 +10435,23 @@ authority certificate. --min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). -Authentication +Socket activation + +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr`). + +This allows rclone to be a socket-activated service. It can be +configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 +over TCP. ### Authentication By default this will serve files without needing a login. @@ -10098,7 +10483,7 @@ Use --salt to change the password hashing salt from the default. Options - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root @@ -10121,7 +10506,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -10253,6 +10638,34 @@ serve s3 currently supports the following operations. Other operations will return error Unimplemented. +Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or set +a single username and password with the --user and --pass flags. + +If no static users are configured by either of the above methods, and +client certificates are required by the --client-ca flag passed to the +server, the client certificate common name will be considered as the +username. + +Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is in +standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use --realm to set the authentication realm. + +Use --salt to change the password hashing salt from the default. + Server options Use --addr to specify which IP address and port the server should listen @@ -10268,7 +10681,9 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. ---addr may be repeated to listen on multiple IPs/ports/sockets. +--addr may be repeated to listen on multiple IPs/ports/sockets. Socket +activation, described further below, can also be used to accomplish the +same. --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a @@ -10300,7 +10715,23 @@ authority certificate. --min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). -VFS - Virtual File System +Socket activation + +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr`). + +This allows rclone to be a socket-activated service. It can be +configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 +over TCP. ## VFS - Virtual File System This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something which looks much more like a disk filing @@ -10526,6 +10957,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -10542,6 +10979,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -10657,19 +11118,21 @@ only with caching. Options - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key + --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5") - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --force-path-style If true use path style access if false use virtual hosted style (default true) (default true) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for s3 + --htpasswd string A htpasswd file - if not provided no authentication is done --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") @@ -10677,12 +11140,16 @@ Options --no-cleanup Not to cleanup empty folder after object is deleted --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files + --pass string Password for authentication --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) + --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -10695,15 +11162,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -10728,9 +11199,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -10772,6 +11241,19 @@ directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply --addr :2022 for example. +This also supports being run with socket activation, in which case it +will listen on the first passed FD. It can be configured with .socket +and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand: + + systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ + +This will socket-activate rclone on the first connection to port 2222 +over TCP. + Note that the default of --vfs-cache-mode off is fine for the rclone sftp backend, but it may not be with other SFTP clients. @@ -11018,6 +11500,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -11034,6 +11522,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -11224,8 +11736,8 @@ Options --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for sftp --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate) @@ -11238,7 +11750,7 @@ Options --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -11252,15 +11764,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -11285,9 +11801,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -11338,6 +11852,19 @@ Basic authentication enabled for SSL and for non-SSL connections https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint +Serving over a unix socket + +You can serve the webdav on a unix socket like this: + + rclone serve webdav --addr unix:///tmp/my.socket remote:path + +and connect to it like this using rclone and the webdav backend: + + rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav: + +Note that there is no authentication on http protocol - this is expected +to be done by the permissions on the socket. + Server options Use --addr to specify which IP address and port the server should listen @@ -11353,7 +11880,9 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. ---addr may be repeated to listen on multiple IPs/ports/sockets. +--addr may be repeated to listen on multiple IPs/ports/sockets. Socket +activation, described further below, can also be used to accomplish the +same. --server-read-timeout and --server-write-timeout can be used to control the timeouts on the server. Note that this is the total time for a @@ -11385,7 +11914,23 @@ authority certificate. --min-tls-version is minimum TLS version that is acceptable. Valid values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). -Template +Socket activation + +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr`). + +This allows rclone to be a socket-activated service. It can be +configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the +systemd-socket-activatecommand + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 +over TCP. ### Template --template allows a user to specify a custom markup template for HTTP and WebDAV serve functions. The server exports the following markup to @@ -11712,6 +12257,12 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the +--vfs-read-chunk-streams parameter. + +--vfs-read-chunk-streams == 0 Rclone will start reading a chunk of size --vfs-read-chunk-size, and then double the size for each read. When --vfs-read-chunk-size-limit is @@ -11728,6 +12279,30 @@ the result would be 0-100M, 100M-300M, 300M-700M, 700M-1200M, Setting --vfs-read-chunk-size to 0 or "off" disables chunked reading. +The chunks will not be buffered in memory. + +--vfs-read-chunk-streams > 0 + +Rclone reads --vfs-read-chunk-streams chunks of size +--vfs-read-chunk-size concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +--vfs-read-chunk-size and --vfs-read-chunk-streams as these will depend +on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be --vfs-read-chunk-streams 16 and --vfs-read-chunk-size 4M. +In testing with AWS S3 the performance scaled roughly as the +--vfs-read-chunk-streams setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more --vfs-read-chunk-streams in order to get +the throughput. + VFS Performance These flags may be used to enable/disable features of the VFS for @@ -11914,17 +12489,17 @@ that rclone supports. Options - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav --htpasswd string A htpasswd file - if not provided no authentication is done @@ -11943,7 +12518,7 @@ Options --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -11957,15 +12532,19 @@ Options --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size --vfs-write-back Duration Time to writeback files after last use when using cache (default 5s) --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -11990,9 +12569,7 @@ Flags for filtering directory listings. --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone serve - Serve a remote over a protocol. @@ -12002,10 +12579,10 @@ Changes storage class/tier of objects in remote. Synopsis -rclone settier changes storage tier or class at remote if supported. Few -cloud storage services provides different storage classes on objects, -for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and -Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline etc. +Changes storage tier or class at remote if supported. Few cloud storage +services provides different storage classes on objects, for example AWS +S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud +Storage, Regional Storage, Nearline, Coldline etc. Note that, certain tier changes make objects not available to access immediately. For example tiering to archive in azure blob storage makes @@ -12033,7 +12610,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -12060,7 +12637,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. - rclone test changenotify - Log any change notify requests for the @@ -12087,7 +12664,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12111,7 +12688,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12121,10 +12698,10 @@ Discovers file name or other limitations for paths. Synopsis -rclone info discovers what filenames and upload methods are possible to -write to the paths passed in and how long they can be. It can take some -time. It will write test files into the remote:path passed in. It -outputs a bit of go code for each one. +Discovers what filenames and upload methods are possible to write to the +paths passed in and how long they can be. It can take some time. It will +write test files into the remote:path passed in. It outputs a bit of go +code for each one. NB this can create undeletable files and other hazards - use with care @@ -12145,7 +12722,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12167,7 +12744,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12196,7 +12773,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12212,7 +12789,7 @@ Options See the global flags page for global options not listed here. -SEE ALSO +See Also - rclone test - Run a test command @@ -12252,9 +12829,12 @@ Options -R, --recursive Recursively touch all files -t, --timestamp string Use specified time instead of the current time of day +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Important Options -Important flags useful for most commands. +Important flags useful for most commands -n, --dry-run Do a trial run with no permanent changes -i, --interactive Enable interactive mode @@ -12262,7 +12842,7 @@ Important flags useful for most commands. Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -12289,14 +12869,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -12306,8 +12884,8 @@ List the contents of the remote in a tree like fashion. Synopsis -rclone tree lists the contents of a remote in a similar way to the unix -tree command. +Lists the contents of a remote in a similar way to the unix tree +command. For example @@ -12356,9 +12934,12 @@ Options -U, --unsorted Leave files unsorted --version Sort files alphanumerically by version +Options shared with other commands are described next. See the global +flags page for global options not listed here. + Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings --delete-excluded Delete files on dest excluded from sync --exclude stringArray Exclude files matching pattern @@ -12385,14 +12966,12 @@ Flags for filtering directory listings. Listing Options -Flags for listing directories. +Flags for listing directories --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions -See the global flags page for global options not listed here. - -SEE ALSO +See Also - rclone - Show help for rclone commands, flags and backends. @@ -12971,21 +13550,34 @@ optional element. characters. It is optional. - HH:MM is an hour from 00:00 to 23:59. +Entries can be separated by spaces or semicolons. + +Note: Semicolons can be used as separators instead of spaces to avoid +parsing issues in environments like Docker. + An example of a typical timetable to avoid link saturation during daytime working hours could be: +Using spaces as separators: --bwlimit "08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off" -In this example, the transfer bandwidth will be set to 512 KiB/s at 8am -every day. At noon, it will rise to 10 MiB/s, and drop back to 512 +Using semicolons as separators: +--bwlimit "08:00,512k;12:00,10M;13:00,512k;18:00,30M;23:00,off" + +In these examples, the transfer bandwidth will be set to 512 KiB/s at +8am every day. At noon, it will rise to 10 MiB/s, and drop back to 512 KiB/sec at 1pm. At 6pm, the bandwidth limit will be set to 30 MiB/s, and at 11pm it will be completely disabled (full speed). Anything between 11pm and 8am will remain unlimited. An example of timetable with WEEKDAY could be: +Using spaces as separators: --bwlimit "Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off" +Using semicolons as separators: +--bwlimit "Mon-00:00,512;Fri-23:59,10M;Sat-10:00,1M;Sun-20:00,off" + It means that, the transfer bandwidth will be set to 512 KiB/s on Monday. It will rise to 10 MiB/s before the end of Friday. At 10:00 on Saturday it will be set to 1 MiB/s. From 20:00 on Sunday it will be @@ -13573,10 +14165,12 @@ such as: - local - ftp - sftp +- pcloud Without --inplace (the default) rclone will first upload to a temporary -file with an extension like this, where XXXXXX represents a random -string and .partial is --partial-suffix value (.partial by default). +file with an extension like this, where XXXXXX represents a hash of the +source file's fingerprint and .partial is --partial-suffix value +(.partial by default). original-file-name.XXXXXX.partial @@ -14138,7 +14732,8 @@ The default is .partial. This flag supplies a program which should supply the config password when run. This is an alternative to rclone prompting for the password or -setting the RCLONE_CONFIG_PASS variable. +setting the RCLONE_CONFIG_PASS variable. It is also used when setting +the config password for the first time. The argument to this should be a command with a space separated list of arguments. If one of the arguments has a space in then enclose it in ", @@ -14151,6 +14746,10 @@ Eg --password-command 'echo "hello with space"' --password-command 'echo "hello with ""quotes"" and space"' +Note that when changing the configuration password the environment +variable RCLONE_PASSWORD_CHANGE=1 will be set. This can be used to +distinguish initial decryption of the config file from the new password. + See the Configuration Encryption for more info. See a Windows PowerShell example on the Wiki. @@ -14752,6 +15351,13 @@ your configuration. There is no way to recover the configuration if you lose your password. +You can also use + +- rclone config encryption set to set the config encryption directly +- rclone config encryption remove to remove it +- rclone config encryption check to check that it is encrypted + properly. + rclone uses nacl secretbox which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate your configuration with secret-key cryptography. The password is SHA-256 hashed, which produces the key for @@ -14814,11 +15420,60 @@ actually using anything from such a configuration file, you can avoid it being loaded by overriding the location, e.g. with one of the documented special values for memory-only configuration. Since only backend options can be stored in configuration files, this is normally unnecessary for -commands that do not operate on backends, e.g. genautocomplete. However, -it will be relevant for commands that do operate on backends in general, +commands that do not operate on backends, e.g. completion. However, it +will be relevant for commands that do operate on backends in general, but are used without referencing a stored remote, e.g. listing local filesystem paths, or connection strings: rclone --config="" ls . +Configuration Encryption Cheatsheet + +You can quickly apply a configuration encryption without plain-text at +rest or transfer. Detailed instructions for popular OSes: + +Mac + +- Generate and store a password + +security add-generic-password -a rclone -s config -w $(openssl rand -base64 40) + +- Add the retrieval instruction to your .zprofile / .profile + +export RCLONE_PASSWORD_COMMAND="/usr/bin/security find-generic-password -a rclone -s config -w" + +Linux + +- Prerequisite + +Linux doesn't come with a default password manager. Let's install the +"pass" utility using a package manager, e.g. apt install pass, +yum install pass, etc.; then initialize a password store: + +pass init rclone + +- Generate and store a password + +echo $(openssl rand -base64 40) | pass insert -m rclone/config + +- Add the retrieval instruction + +export RCLONE_PASSWORD_COMMAND="/usr/bin/pass rclone/config" + +Windows + +- Generate and store a password + +New-Object -TypeName PSCredential -ArgumentList "rclone", (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path "rclone-credential.xml" + +- Add the password retrieval instruction + +[Environment]::SetEnvironmentVariable("RCLONE_PASSWORD_COMMAND", "[System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path "rclone-credential.xml").Password))") + +Encrypt the config file (all systems) + +- Execute rclone config -> s + +- Add/update the password from previous steps + Developer options These options are useful when developing or debugging rclone. There are @@ -14966,6 +15621,24 @@ Rclone prefixes all log messages with their level in capitals, e.g. INFO which makes it easy to grep the log file for different kinds of information. +Metrics + +Rclone can publish metrics in the OpenMetrics/Prometheus format. + +To enable the metrics endpoint, use the --metrics-addr flag. Metrics can +also be published on the --rc-addr port if the --rc flag and +--rc-enable-metrics flags are supplied or if using rclone rcd +--rc-enable-metrics + +Rclone provides extensive configuration options for the metrics HTTP +endpoint. These settings are grouped under the Metrics section and have +a prefix --metrics-*. + +When metrics are enabled with --rc-enable-metrics, they will be +published on the same port as the rc API. In this case, the --metrics-* +flags will be ignored, and the HTTP endpoint configuration will be +managed by the --rc-* parameters. + Exit Code If any errors occur during the command execution, rclone will exit with @@ -15139,33 +15812,30 @@ two ways of doing it, described below. Configuring using rclone authorize On the headless box run rclone config but answer N to the -Use web browser to automatically authenticate? question. +Use auto config? question. + + Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine - ... - Remote config - Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access - If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> n + + Option config_token. For this to work, you will need rclone available on a machine that has a web browser available. - For more help and alternate methods see: https://rclone.org/remote_setup/ - Execute the following on the machine with the web browser (same rclone version recommended): - - rclone authorize "dropbox" - - Then paste the result below: - result> + rclone authorize "onedrive" + Then paste the result. + Enter a value. + config_token> Then on your main desktop machine - rclone authorize "dropbox" + rclone authorize "onedrive" If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... @@ -15176,7 +15846,7 @@ Then on your main desktop machine Then back to the headless box, paste in the code - result> SECRET_TOKEN + config_token> SECRET_TOKEN -------------------- [acd12] client_id = @@ -15217,14 +15887,12 @@ box port 53682 to local machine by using the following command: ssh -L localhost:53682:localhost:53682 username@remote_server Then on the headless box run rclone config and answer Y to the -Use web browser to automatically authenticate? question. +Use auto config? question. + + Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine - ... - Remote config - Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access - If not sure try Y. If Y failed, try N. y) Yes (default) n) No y/n> y @@ -15896,7 +16564,7 @@ Other filters --min-size - Don't transfer any file smaller than this Controls the minimum size file within the scope of an rclone command. -Default units are KiB but abbreviations K, M, G, T or P are valid. +Default units are KiB but abbreviations B, K, M, G, T or P are valid. E.g. rclone ls remote: --min-size 50k lists files on remote: of 50 KiB size or larger. @@ -15906,7 +16574,7 @@ See the size option docs for more info. --max-size - Don't transfer any file larger than this Controls the maximum size file within the scope of an rclone command. -Default units are KiB but abbreviations K, M, G, T or P are valid. +Default units are KiB but abbreviations B, K, M, G, T or P are valid. E.g. rclone ls remote: --max-size 1G lists files on remote: of 1 GiB size or smaller. @@ -16231,7 +16899,10 @@ Default Off. --rc-enable-metrics -Enable OpenMetrics/Prometheus compatible endpoint at /metrics. +Enable OpenMetrics/Prometheus compatible endpoint at /metrics. If more +control over the metrics is desired (for example running it on a +different port or with different auth) then endpoint can be enabled with +the --metrics-* flags instead. Default Off. @@ -16520,6 +17191,119 @@ parameters. "HARD" for CutoffMode or DEBUG for LogLevel. - BandwidthSpec - this will be set and returned as a string, eg "1M". +Option blocks + +The calls options/info (for the main config) and config/providers (for +the backend config) may be used to get information on the rclone +configuration options. This can be used to build user interfaces for +displaying and setting any rclone option. + +These consist of arrays of Option blocks. These have the following +format. Each block describes a single option. + + ------------------------------------------------------------------------ + Field Type Optional Description + ------------- ------------ ------------------- ------------------------- + Name string N name of the option in + snake_case + + FieldName string N name of the field used in + the rc - if blank use + Name + + Help string N help, started with a + single sentence on a + single line + + Groups string Y groups this option + belongs to - comma + separated string for + options classification + + Provider string Y set to filter on provider + + Default any N default value, if set + (and not to nil or "") + then Required does + nothing + + Value any N value to be set by flags + + Examples Examples Y predefined values that + can be selected from list + (multiple-choice option) + + ShortOpt string Y the short command line + option for this + + Hide Visibility N if non zero, this option + is hidden from the + configurator or the + command line + + Required bool N this option is required, + meaning value cannot be + empty unless there is a + default + + IsPassword bool N set if the option is a + password + + NoPrefix bool N set if the option for + this should not use the + backend prefix + + Advanced bool N set if this is an + advanced config option + + Exclusive bool N set if the answer can + only be one of the + examples (empty string + allowed unless Required + or Default is set) + + Sensitive bool N set if this option should + be redacted when using + rclone config redacted + ------------------------------------------------------------------------ + +An example of this might be the --log-level flag. Note that the Name of +the option becomes the command line flag with _ replaced with -. + + { + "Advanced": false, + "Default": 5, + "DefaultStr": "NOTICE", + "Examples": [ + { + "Help": "", + "Value": "EMERGENCY" + }, + { + "Help": "", + "Value": "ALERT" + }, + ... + ], + "Exclusive": true, + "FieldName": "LogLevel", + "Groups": "Logging", + "Help": "Log level DEBUG|INFO|NOTICE|ERROR", + "Hide": 0, + "IsPassword": false, + "Name": "log_level", + "NoPrefix": true, + "Required": true, + "Sensitive": false, + "Type": "LogLevel", + "Value": null, + "ValueStr": "NOTICE" + }, + +Note that the Help may be multiple lines separated by \n. The first line +will always be a short sentence and this is the sentence shown when +running rclone help flags. + Specifying remotes to work on Remotes are specified with the fs=, srcFs=, dstFs= parameters depending @@ -16545,7 +17329,7 @@ For example this JSON is equivalent to remote:/tmp { "_name": "remote", - "_path": "/tmp" + "_root": "/tmp" } And this is equivalent to :sftp,host='example.com':/tmp @@ -16553,14 +17337,14 @@ And this is equivalent to :sftp,host='example.com':/tmp { "type": "sftp", "host": "example.com", - "_path": "/tmp" + "_root": "/tmp" } And this is equivalent to /tmp/dir { type = "local", - _ path = "/tmp/dir" + _root = "/tmp/dir" } Supported commands @@ -16745,6 +17529,9 @@ Returns a JSON object: - providers - array of objects See the config providers command for more information on the above. +Note that the Options blocks are in the same format as returned by +"options/info". They are described in the option blocks section. + Authentication is required for this call. config/setpath: Set the path of the config file @@ -17777,6 +18564,11 @@ options/get: Get all the global options Returns an object where keys are option block names and values are an object with the current option values in. +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + Note that these are the global options which are unaffected by use of the _config and _filter parameters. If you wish to read the parameters set in _config then use options/config and for _filter use @@ -17785,6 +18577,19 @@ options/filter. This shows the internal names of the option within rclone which should map to the external options very easily with a few exceptions. +options/info: Get info about all the global options + +Returns an object where keys are option block names and values are an +array of objects with info about each options. + +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + +These objects are in the same format as returned by "config/providers". +They are described in the option blocks section. + options/local: Get the currently active config for this call Returns an object with the keys "config" and "filter". The "config" key @@ -18080,6 +18885,68 @@ This command takes an "fs" parameter. If this parameter is not supplied and if there is only one VFS in use then that VFS will be used. If there is more than one VFS in use then the "fs" parameter must be supplied. +vfs/queue: Queue info for a VFS. + +This returns info about the upload queue for the selected VFS. + +This is only useful if --vfs-cache-mode > off. If you call it when the +--vfs-cache-mode is off, it will return an empty result. + + { + "queued": // an array of files queued for upload + [ + { + "name": "file", // string: name (full path) of the file, + "id": 123, // integer: id of this item in the queue, + "size": 79, // integer: size of the file in bytes + "expiry": 1.5 // float: time until file is eligible for transfer, lowest goes first + "tries": 1, // integer: number of times we have tried to upload + "delay": 5.0, // float: seconds between upload attempts + "uploading": false, // boolean: true if item is being uploaded + }, + ], + } + +The expiry time is the time until the file is elegible for being +uploaded in floating point seconds. This may go negative. As rclone only +transfers --transfers files at once, only the lowest --transfers expiry +times will have uploading as true. So there may be files with negative +expiry times for which uploading is false. + +This command takes an "fs" parameter. If this parameter is not supplied +and if there is only one VFS in use then that VFS will be used. If there +is more than one VFS in use then the "fs" parameter must be supplied. + +vfs/queue-set-expiry: Set the expiry time for an item queued for upload. + +Use this to adjust the expiry time for an item in the upload queue. You +will need to read the id of the item using vfs/queue before using this +call. + +You can then set expiry to a floating point number of seconds from now +when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed for a +long time then set it to a large positive number. + +Setting the expiry of an item which has already has started uploading +will have no effect - the item will carry on being uploaded. + +This will return an error if called with --vfs-cache-mode off or if the +id passed is not found. + +This takes the following parameters + +- fs - select the VFS in use (optional) +- id - a numeric ID as returned from vfs/queue +- expiry - a new expiry time as floating point seconds + +This returns an empty result on success, or an error. + +This command takes an "fs" parameter. If this parameter is not supplied +and if there is only one VFS in use then that VFS will be used. If there +is more than one VFS in use then the "fs" parameter must be supplied. + vfs/refresh: Refresh the directory cache. This reads the directories for the specified paths and freshens the @@ -18351,7 +19218,9 @@ Here is an overview of the major features of each cloud storage system. Citrix ShareFile MD5 R/W Yes No - - Dropbox DBHASH ¹ R Yes No - - Enterprise File Fabric - R/W Yes No R/W - + Files.com MD5, CRC32 DR/W Yes No R - FTP - R/W ¹⁰ No No - - + Gofile MD5 DR/W No Yes R - Google Cloud Storage MD5 R/W No No R/W - Google Drive MD5, SHA1, SHA256 DR/W No Yes R/W DRWU Google Photos - - No Yes R - @@ -18373,6 +19242,7 @@ Here is an overview of the major features of each cloud storage system. Oracle Object Storage MD5 R/W No No R/W - pCloud MD5, SHA1 ⁷ R No No W - PikPak MD5 R No No R - + Pixeldrain SHA256 R/W No No R RW premiumize.me - - Yes No R - put.io CRC-32 R/W No Yes R - Proton Drive SHA1 R/W No No R - @@ -18681,8 +19551,8 @@ that you want to remain as those characters on the remote rather than being translated to regular (halfwidth) *, ? and :. The --backend-encoding flags allow you to change that. You can disable -the encoding completely with --backend-encoding None or set -encoding = None in the config file. +the encoding completely with --backend-encoding Raw or set +encoding = Raw in the config file. Encoding takes a comma separated list of encodings. You can see the list of all possible values by passing an invalid value to this flag, e.g. @@ -18713,6 +19583,8 @@ show you the defaults for the backends. DoubleQuote " " + Exclamation ! ! + Hash # # InvalidUtf8 An invalid UTF-8 � @@ -18733,8 +19605,7 @@ show you the defaults for the backends. LtGt <, > <, > - None No characters are - encoded + None ¹ NUL 0x00 ␀ Percent % % @@ -18761,6 +19632,10 @@ show you the defaults for the backends. SquareBracket [, ] [, ] ---------------------------------------------------------------------------------- +¹ Encoding from NUL 0x00 to ␀ is always implicit except when using Raw. +It was previously incorrectly documented as disabling encoding, and to +maintain backward compatibility, its behavior has not been changed. + Encoding example: FTP To take a specific example, the FTP backend's default encoding is @@ -18802,7 +19677,7 @@ same as the default value but without Colon,Question,Asterisk: --local-encoding "Slash,LtGt,DoubleQuote,Pipe,BackSlash,Ctl,RightSpace,RightPeriod,InvalidUtf8,Dot" Alternatively, you can disable the conversion of any characters with ---local-encoding None. +--local-encoding Raw. Instead of using command-line argument --local-encoding, you may also set it as environment variable RCLONE_LOCAL_ENCODING, or configure a @@ -18887,8 +19762,12 @@ upon backend-specific capabilities. Enterprise File Yes Yes Yes Yes Yes No No No No No Yes Fabric + Files.com Yes Yes Yes Yes No No Yes No Yes No Yes + FTP No No Yes Yes No No Yes No No No Yes + Gofile Yes Yes Yes Yes No No Yes No Yes Yes Yes + Google Cloud Yes Yes No No No Yes Yes No No No No Storage @@ -18937,6 +19816,8 @@ upon backend-specific capabilities. PikPak Yes Yes Yes Yes Yes No No No Yes Yes Yes + Pixeldrain Yes No Yes Yes No No Yes No Yes Yes Yes + premiumize.me Yes No Yes Yes No No No No Yes Yes Yes put.io Yes No Yes Yes Yes No Yes No No Yes Yes @@ -19076,11 +19957,11 @@ into groups. Copy -Flags for anything which can Copy a file. +Flags for anything which can copy a file. --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -19113,7 +19994,7 @@ Flags for anything which can Copy a file. Sync -Flags just used for rclone sync. +Flags used for sync commands. --backup-dir string Make backups into hierarchy based in DIR --delete-after When synchronizing, delete files on destination after transferring (default) @@ -19138,13 +20019,13 @@ Important flags useful for most commands. Check -Flags used for rclone check. +Flags used for check commands. --max-backlog int Maximum number of objects in sync or check backlog (default 10000) Networking -General networking and HTTP stuff. +Flags for general networking and HTTP stuff. --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable @@ -19153,7 +20034,7 @@ General networking and HTTP stuff. --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth --contimeout Duration Connect timeout (default 1m0s) - --disable-http-keep-alives Disable HTTP keep-alives and use each connection once. + --disable-http-keep-alives Disable HTTP keep-alives and use each connection once --disable-http2 Disable HTTP/2 in the global transport --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21 --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) @@ -19166,7 +20047,7 @@ General networking and HTTP stuff. --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --use-cookies Enable session cookiejar - --user-agent string Set the user-agent to a specified string (default "rclone/v1.67.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0") Performance @@ -19178,7 +20059,7 @@ Flags helpful for increasing performance. Config -General configuration of rclone. +Flags for general configuration of rclone. --ask-password Allow prompt for password for encrypted configuration (default true) --auto-confirm If enabled, do not request console confirmation @@ -19250,7 +20131,7 @@ Flags for listing directories. Logging -Logging and statistics. +Flags for logging and statistics. --log-file string Log everything to this file --log-format string Comma separated list of log format options (default "date,time") @@ -19268,7 +20149,7 @@ Logging and statistics. --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes") --syslog Use Syslog for logging - --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") + --syslog-facility string Facility for syslog, e.g. KERN,USER (default "DAEMON") --use-json-log Use json log format -v, --verbose count Print lots more stuff (repeat for more) @@ -19291,12 +20172,12 @@ RC Flags to control the Remote Control API. --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -19320,9 +20201,31 @@ Flags to control the Remote Control API. --rc-web-gui-no-open-browser Don't open the browser automatically --rc-web-gui-update Check and update to latest version of web gui +Metrics + +Flags to control the Metrics HTTP endpoint.. + + --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""]) + --metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from + --metrics-baseurl string Prefix for URLs - leave blank for root + --metrics-cert string TLS PEM key (concatenation of certificate and CA certificate) + --metrics-client-ca string Client certificate authority to verify clients with + --metrics-htpasswd string A htpasswd file - if not provided no authentication is done + --metrics-key string TLS PEM Private key + --metrics-max-header-bytes int Maximum size of request header (default 4096) + --metrics-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") + --metrics-pass string Password for authentication + --metrics-realm string Realm for authentication + --metrics-salt string Password hashing salt (default "dlPL2MqE") + --metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) + --metrics-template string User-specified template + --metrics-user string User name for authentication + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server + Backend -Backend only flags. These can be set in the config file also. +Backend-only flags (these can be set in the config file also). --alias-description string Description of the remote --alias-remote string Remote or path to alias @@ -19545,6 +20448,12 @@ Backend only flags. These can be set in the config file also. --filefabric-token-expiry string Token expiry time --filefabric-url string URL of the Enterprise File Fabric to connect to --filefabric-version string Version read from the file fabric + --filescom-api-key string The API key used to authenticate with Files.com + --filescom-description string Description of the remote + --filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --filescom-password string The password used to authenticate with Files.com (obscured) + --filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + --filescom-username string The username used to authenticate with Files.com --ftp-ask-password Allow asking for FTP password when needed --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited @@ -19588,6 +20497,12 @@ Backend only flags. These can be set in the config file also. --gcs-token string OAuth Access Token as a JSON blob --gcs-token-url string Token server url --gcs-user-project string User project + --gofile-access-token string API Access token + --gofile-account-id string Account ID + --gofile-description string Description of the remote + --gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation) + --gofile-list-chunk int Number of items to list in each call (default 1000) + --gofile-root-folder-id string ID of the root folder --gphotos-auth-url string Auth server URL --gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --gphotos-batch-mode string Upload file batching sync|async|off (default "sync") @@ -19679,6 +20594,7 @@ Backend only flags. These can be set in the config file also. --local-description string Description of the remote --local-encoding Encoding The encoding for the backend (default Slash,Dot) --local-no-check-updated Don't check to see if the files change during upload + --local-no-clone Disable reflink cloning for server-side copies --local-no-preallocate Disable preallocation of disk space for transferred files --local-no-set-modtime Disable setting modtime --local-no-sparse Disable sparse files for multi-thread downloads @@ -19795,6 +20711,10 @@ Backend only flags. These can be set in the config file also. --pikpak-upload-concurrency int Concurrency for multipart uploads (default 5) --pikpak-use-trash Send files to the trash instead of deleting permanently (default true) --pikpak-user string Pikpak username + --pixeldrain-api-key string API key for your pixeldrain account + --pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api") + --pixeldrain-description string Description of the remote + --pixeldrain-root-folder-id string Root of the filesystem to use (default "me") --premiumizeme-auth-url string Auth server URL --premiumizeme-client-id string OAuth Client Id --premiumizeme-client-secret string OAuth Client Secret @@ -19869,6 +20789,7 @@ Backend only flags. These can be set in the config file also. --s3-provider string Choose your S3 provider --s3-region string Region to connect to --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-sdk-log-mode Bits Set to debug the SDK (default Off) --s3-secret-access-key string AWS Secret Access Key (password) --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 --s3-session-token string An AWS session token @@ -19879,7 +20800,6 @@ Backend only flags. These can be set in the config file also. --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-sts-endpoint string Endpoint for STS --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint @@ -19889,6 +20809,7 @@ Backend only flags. These can be set in the config file also. --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset) --s3-v2-auth If true use v2 authentication --s3-version-at Time Show file versions as they were at the specified time (default off) --s3-version-deleted Show deleted file markers when using versions @@ -19997,10 +20918,12 @@ Backend only flags. These can be set in the config file also. --swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8) --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page --swift-key string API key or password (OS_PASSWORD) --swift-leave-parts-on-error If true avoid calling abort upload on a failure --swift-no-chunk Don't chunk files during streaming upload --swift-no-large-objects Disable support for static and dynamic large objects + --swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit --swift-region string Region name - optional (OS_REGION_NAME) --swift-storage-policy string The storage policy to use when creating a new container --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) @@ -20038,6 +20961,7 @@ Backend only flags. These can be set in the config file also. --webdav-owncloud-exclude-shares Exclude ownCloud shares --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms) --webdav-pass string Password (obscured) + --webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly --webdav-url string URL of http host to connect to --webdav-user string User name --webdav-vendor string Name of the WebDAV site/service/software you are using @@ -20047,6 +20971,7 @@ Backend only flags. These can be set in the config file also. --yandex-description string Description of the remote --yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true) --yandex-token string OAuth Access Token as a JSON blob --yandex-token-url string Token server url --zoho-auth-url string Auth server URL @@ -22405,6 +23330,15 @@ Unison and synchronization in general. Changelog +v1.68 + +- Fixed an issue affecting backends that round modtimes to a lower + precision. + +v1.67 + +- Added integration tests against all backends. + v1.66 - Copies and deletes are now handled in one operation instead of two @@ -22606,7 +23540,7 @@ Verify signatures and hashes together You can verify the signatures and hashes in one command line like this: - $ gpg --decrypt SHA256SUMS | sha256sum -c --ignore-missing + $ h=$(gpg --decrypt SHA256SUMS) && echo "$h" | sha256sum - -c --ignore-missing gpg: Signature made Mon 17 Jul 2023 15:03:17 BST gpg: using DSA key FBF737ECE9F8AB18604BD2AC93935E02FF3B54FA gpg: Good signature from "Nick Craig-Wood " [ultimate] @@ -22658,11 +23592,11 @@ This will guide you through an interactive setup process: n) No y/n> Remote config - -------------------- - [remote] - type = fichier - api_key = example_key - -------------------- + Configuration complete. + Options: + - type: fichier + - api_key: example_key + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -22874,10 +23808,11 @@ This will guide you through an interactive setup process: Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path". remote> /mnt/storage/backup Remote config - -------------------- - [remote] - remote = /mnt/storage/backup - -------------------- + Configuration complete. + Options: + - type: alias + - remote: /mnt/storage/backup + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -23192,20 +24127,20 @@ This will guide you through an interactive setup process. \ "GLACIER_IR" storage_class> 1 Remote config - -------------------- - [remote] - type = s3 - provider = AWS - env_auth = false - access_key_id = XXX - secret_access_key = YYY - region = us-east-1 - endpoint = - location_constraint = - acl = private - server_side_encryption = - storage_class = - -------------------- + Configuration complete. + Options: + - type: s3 + - provider: AWS + - env_auth: false + - access_key_id: XXX + - secret_access_key: YYY + - region: us-east-1 + - endpoint: + - location_constraint: + - acl: private + - server_side_encryption: + - storage_class: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -23502,17 +24437,26 @@ The different authentication methods are tried in this order: - Profile files are standard files used by AWS CLI tools - By default it will use the profile in your home directory (e.g. ~/.aws/credentials on unix based systems) file and the - "default" profile, to change set these environment - variables: - - AWS_SHARED_CREDENTIALS_FILE to control which file. - - AWS_PROFILE to control which profile to use. + "default" profile, to change set these environment variables + or config keys: + - AWS_SHARED_CREDENTIALS_FILE to control which file or the + shared_credentials_file config key. + - AWS_PROFILE to control which profile to use or the + profile config key. - Or, run rclone in an ECS task with an IAM role (AWS only). - Or, run rclone on an EC2 instance with an IAM role (AWS only). - Or, run rclone in an EKS pod with an IAM role that is associated with a service account (AWS only). + - Or, use process credentials to read config from an external + program. + +With env_auth = true rclone (which uses the SDK for Go v2) should +support all authentication methods that the aws CLI tool does and the +other AWS SDKs. If none of these option actually end up providing rclone with AWS -credentials then S3 interaction will be non-authenticated (see below). +credentials then S3 interaction will be non-authenticated (see the +anonymous access section for more info). S3 Permissions @@ -24357,6 +25301,9 @@ Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. +Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, +you'll need to set this to true. + Properties: - Config: force_path_style @@ -24644,6 +25591,24 @@ Properties: - Type: Tristate - Default: unset +--s3-use-unsigned-payload + +Whether to use an unsigned payload in PutObject + +Rclone has to avoid the AWS SDK seeking the body when calling PutObject. +The AWS provider can add checksums in the trailer to avoid seeking but +other providers can't. + +This should be true, false or left unset to use the default for the +provider. + +Properties: + +- Config: use_unsigned_payload +- Env Var: RCLONE_S3_USE_UNSIGNED_PAYLOAD +- Type: Tristate +- Default: unset + --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -24797,7 +25762,7 @@ Properties: --s3-sts-endpoint -Endpoint for STS. +Endpoint for STS (deprecated). Leave blank if using AWS to use the default endpoint for the region. @@ -24858,6 +25823,32 @@ Properties: - Type: Tristate - Default: unset +--s3-sdk-log-mode + +Set to debug the SDK + +This can be set to a comma separated list of the following functions: + +- Signing +- Retries +- Request +- RequestWithBody +- Response +- ResponseWithBody +- DeprecatedUsage +- RequestEventMessage +- ResponseEventMessage + +Use Off to disable and All to set all log levels. You will need to use +-vv to see the debug level logs. + +Properties: + +- Config: sdk_log_mode +- Env Var: RCLONE_S3_SDK_LOG_MODE +- Type: Bits +- Default: Off + --s3-description Description of the remote. @@ -24923,18 +25914,20 @@ backend/command. restore -Restore objects from GLACIER to normal storage +Restore objects from GLACIER or INTELLIGENT-TIERING archive tier rclone backend restore remote: [options] [+] This command can be used to restore one or more objects from GLACIER to -normal storage. +normal storage or from INTELLIGENT-TIERING Archive Access / Deep Archive +Access tier to the Frequent Access tier. Usage Examples: rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS + rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags @@ -24962,18 +25955,20 @@ The Status will be OK if it was successful or an error message if not. Options: - "description": The optional description for the job. -- "lifetime": Lifetime of the active copy in days +- "lifetime": Lifetime of the active copy in days, ignored for + INTELLIGENT-TIERING storage - "priority": Priority of restore: Standard|Expedited|Bulk restore-status -Show the restore status for objects being restored from GLACIER to -normal storage +Show the restore status for objects being restored from GLACIER or +INTELLIGENT-TIERING storage rclone backend restore-status remote: [options] [+] This command can be used to show the status for objects being restored -from GLACIER to normal storage. +from GLACIER to normal storage or from INTELLIGENT-TIERING Archive +Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: @@ -25003,6 +25998,15 @@ It returns a list of status dictionaries. "RestoreExpiryDate": "2023-09-06T12:29:19+01:00" }, "StorageClass": "DEEP_ARCHIVE" + }, + { + "Remote": "test.gz", + "VersionID": null, + "RestoreStatus": { + "IsRestoreInProgress": true, + "RestoreExpiryDate": "null" + }, + "StorageClass": "INTELLIGENT_TIERING" } ] @@ -25132,15 +26136,6 @@ looking like this: [anons3] type = s3 provider = AWS - env_auth = false - access_key_id = - secret_access_key = - region = us-east-1 - endpoint = - location_constraint = - acl = private - server_side_encryption = - storage_class = Then use it as normal with the name of the public bucket, e.g. @@ -25148,6 +26143,10 @@ Then use it as normal with the name of the public bucket, e.g. You will be able to list and copy data but not upload it. +You can also do this entirely on the command line + + rclone lsd :s3,provider=AWS:1000genomes + Providers AWS S3 @@ -25329,6 +26328,11 @@ look within a bucket. For R2 tokens with the "Object Read & Write" permission, you may also need to add no_check_bucket = true for object uploads to work correctly. +Note that Cloudflare decompresses files uploaded with +Content-Encoding: gzip by default which is a deviation from what AWS +does. If this is causing a problem then upload the files with +--header-upload "Cache-Control: no-transform" + Dreamhost Dreamhost DreamObjects is an object storage system based on CEPH. @@ -27867,6 +28871,27 @@ network. For more detailed comparison please check the documentation of the storj backend. +Memory usage {memory} + +The most common cause of rclone using lots of memory is a single +directory with millions of files in. Despite s3 not really having the +concepts of directories, rclone does the sync on a directory by +directory basis to be compatible with normal filing systems. + +Rclone loads each directory into memory as rclone objects. Each rclone +object takes 0.5k-1k of memory, so approximately 1GB per 1,000,000 +files, and the sync for that directory does not begin until it is +entirely loaded in memory. So the sync can take a long time to start for +large directories. + +To sync a directory with 100,000,000 files in you would need +approximately 100 GB of memory. At some point the amount of memory +becomes difficult to provide so there is a workaround for this which +involves a bit of scripting. + +At some point rclone will gain a sync mode which is effectively this +workaround but built in to rclone. + Limitations rclone about is not supported by the S3 backend. Backends without this @@ -28044,12 +29069,13 @@ generating and using an Application Key. Endpoint for the service - leave blank normally. endpoint> Remote config - -------------------- - [remote] - account = 123456789abc - key = 0123456789abcdef0123456789abcdef0123456789 - endpoint = - -------------------- + Configuration complete. + Options: + - type: b2 + - account: 123456789abc + - key: 0123456789abcdef0123456789abcdef0123456789 + - endpoint: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -28162,11 +29188,18 @@ used. Versions -When rclone uploads a new version of a file it creates a new version of +The default setting of B2 is to keep old versions of files. This means +when rclone uploads a new version of a file it creates a new version of it. Likewise when you delete a file, the old version will be marked -hidden and still be available. Conversely, you may opt in to a "hard -delete" of files with the --b2-hard-delete flag which would permanently -remove the file instead of hiding it. +hidden and still be available. + +Whether B2 keeps old versions of files or not can be adjusted on a per +bucket basis using the "Lifecycle settings" on the B2 control panel or +when creating the bucket using the --b2-lifecycle flag or after creation +using the rclone backend lifecycle command. + +You may opt in to a "hard delete" of files with the --b2-hard-delete +flag which permanently removes files on deletion instead of hiding them. Old versions of files, where available, are visible using the --b2-versions flag. @@ -28802,12 +29835,13 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - client_id = - client_secret = - token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} - -------------------- + Configuration complete. + Options: + - type: box + - client_id: + - client_secret: + - token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -28888,11 +29922,11 @@ Here is how to do it. Choose a number from below, or type in an existing value 1 > remote remote> remote - -------------------- - [remote] - type = box - token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} - -------------------- + Configuration complete. + Options: + - type: box + - token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} + Keep this "remote" remote? Edit remote Value "client_id" = "" Edit? (y/n)> @@ -28920,11 +29954,11 @@ Here is how to do it. Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - type = box - token = {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} - -------------------- + Configuration complete. + Options: + - type: box + - token: {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -30510,12 +31544,12 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - type = sharefile - endpoint = https://XXX.sharefile.com - token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} - -------------------- + Configuration complete. + Options: + - type: sharefile + - endpoint: https://XXX.sharefile.com + - token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31816,11 +32850,11 @@ This will guide you through an interactive setup process: "dir=remote:path with space" "dir2=remote2:path with space" Enter a fs.SpaceSepList value. upstreams> images=s3:imagesbucket files=drive:important/files - -------------------- - [remote] - type = combine - upstreams = images=s3:imagesbucket files=drive:important/files - -------------------- + Configuration complete. + Options: + - type: combine + - upstreams: images=s3:imagesbucket files=drive:important/files + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -31944,12 +32978,13 @@ This will guide you through an interactive setup process: Please visit: https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX - -------------------- - [remote] - app_key = - app_secret = - token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX - -------------------- + Configuration complete. + Options: + - type: dropbox + - app_key: + - app_secret: + - token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -32303,7 +33338,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 1000. -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 100 @@ -32498,12 +33533,12 @@ This will guide you through an interactive setup process: n) No (default) y/n> n Remote config - -------------------- - [remote] - type = filefabric - url = https://yourfabric.smestorage.com/ - permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx - -------------------- + Configuration complete. + Options: + - type: filefabric + - url: https://yourfabric.smestorage.com/ + - permanent_token: xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -32703,6 +33738,180 @@ Properties: - Type: string - Required: false +Files.com + +Files.com is a cloud storage service that provides a secure and easy way +to store and share files. + +The initial setup for filescom involves authenticating with your +Files.com account. You can do this by providing your site subdomain, +username, and password. Alternatively, you can authenticate using an API +Key from Files.com. rclone config walks you through it. + +Configuration + +Here is an example of how to make a remote called remote. First run: + + rclone config + +This will guide you through an interactive setup process: + + No remotes found, make a new one? + n) New remote + s) Set configuration password + q) Quit config + n/s/q> n + + Enter name for new remote. + name> remote + + Option Storage. + Type of storage to configure. + Choose a number from below, or type in your own value. + [snip] + XX / Files.com + \ "filescom" + [snip] + Storage> filescom + + Option site. + Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + Enter a value. Press Enter to leave empty. + site> mysite + + Option username. + The username used to authenticate with Files.com. + Enter a value. Press Enter to leave empty. + username> user + + Option password. + The password used to authenticate with Files.com. + Choose an alternative below. Press Enter for the default (n). + y) Yes, type in my own password + g) Generate random password + n) No, leave this optional password blank (default) + y/g/n> y + Enter the password: + password: + Confirm the password: + password: + + Edit advanced config? + y) Yes + n) No (default) + y/n> n + + Configuration complete. + Options: + - type: filescom + - site: mysite + - username: user + - password: *** ENCRYPTED *** + Keep this "remote" remote? + y) Yes this is OK (default) + e) Edit this remote + d) Delete this remote + y/e/d> y + +Once configured you can use rclone. + +See all files in the top level: + + rclone lsf remote: + +Make a new directory in the root: + + rclone mkdir remote:dir + +Recursively List the contents: + + rclone ls remote: + +Sync /home/local/directory to the remote directory, deleting any excess +files in the directory. + + rclone sync --interactive /home/local/directory remote:dir + +Standard options + +Here are the Standard options specific to filescom (Files.com). + +--filescom-site + +Your site subdomain (e.g. mysite) or custom domain (e.g. +myfiles.customdomain.com). + +Properties: + +- Config: site +- Env Var: RCLONE_FILESCOM_SITE +- Type: string +- Required: false + +--filescom-username + +The username used to authenticate with Files.com. + +Properties: + +- Config: username +- Env Var: RCLONE_FILESCOM_USERNAME +- Type: string +- Required: false + +--filescom-password + +The password used to authenticate with Files.com. + +NB Input to this must be obscured - see rclone obscure. + +Properties: + +- Config: password +- Env Var: RCLONE_FILESCOM_PASSWORD +- Type: string +- Required: false + +Advanced options + +Here are the Advanced options specific to filescom (Files.com). + +--filescom-api-key + +The API key used to authenticate with Files.com. + +Properties: + +- Config: api_key +- Env Var: RCLONE_FILESCOM_API_KEY +- Type: string +- Required: false + +--filescom-encoding + +The encoding for the backend. + +See the encoding section in the overview for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_FILESCOM_ENCODING +- Type: Encoding +- Default: + Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot + +--filescom-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_FILESCOM_DESCRIPTION +- Type: string +- Required: false + FTP FTP is the File Transfer Protocol. Rclone FTP support is provided using @@ -32769,12 +33978,12 @@ For an anonymous FTP server, see below. Enter a boolean value (true or false). Press Enter for the default ("false"). explicit_tls> Remote config - -------------------- - [remote] - type = ftp - host = ftp.example.com - pass = *** ENCRYPTED *** - -------------------- + Configuration complete. + Options: + - type: ftp + - host: ftp.example.com + - pass: *** ENCRYPTED *** + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -33208,6 +34417,256 @@ of 1000000000 means that file time precision of 1 second is available. A value of 3153600000000000000 (or another large number) means "unsupported". +Gofile + +Gofile is a content storage and distribution platform. Its aim is to +provide as much service as possible for free or at a very low price. + +The initial setup for Gofile involves logging in to the web interface +and going to the "My Profile" section. Copy the "Account API token" for +use in the config file. + +Note that if you wish to connect rclone to Gofile you will need a +premium account. + +Configuration + +Here is an example of how to make a remote called remote. First run: + + rclone config + +This will guide you through an interactive setup process: + + No remotes found, make a new one? + n) New remote + s) Set configuration password + q) Quit config + n/s/q> n + + Enter name for new remote. + name> remote + + Option Storage. + Type of storage to configure. + Choose a number from below, or type in your own value. + XX / Gofile + \ (gofile) + Storage> gofile + + Option access_token. + API Access token + You can get this from the web control panel. + Enter a value. Press Enter to leave empty. + access_token> YOURACCESSTOKEN + + Edit advanced config? + y) Yes + n) No (default) + y/n> n + + Configuration complete. + Options: + - type: gofile + - access_token: YOURACCESSTOKEN + Keep this "remote" remote? + y) Yes this is OK (default) + e) Edit this remote + d) Delete this remote + y/e/d> y + +Once configured you can then use rclone like this, + +List directories and files in the top level of your Gofile + + rclone lsf remote: + +To copy a local directory to an Gofile directory called backup + + rclone copy /home/source remote:backup + +Modification times and hashes + +Gofile supports modification times with a resolution of 1 second. + +Gofile supports MD5 hashes, so you can use the --checksum flag. + +Restricted filename characters + +In addition to the default restricted characters set the following +characters are also replaced: + + Character Value Replacement + ----------- ------- ------------- + ! 0x21 ! + " 0x22 " + * 0x2A * + : 0x3A : + < 0x3C < + > 0x3E > + ? 0x3F ? + \ 0x5C \ + | 0x7C | + +File names can also not start or end with the following characters. +These only get replaced if they are the first or last character in the +name: + + Character Value Replacement + ----------- ------- ------------- + . 0x2E . + +Invalid UTF-8 bytes will also be replaced, as they can't be used in JSON +strings. + +Public Links + +Gofile supports rclone link to make public links to files or +directories. If you specify a directory it will download as a zip file. +You can use the --expire flag to specify the time the link should be +valid. Note that rclone link --unlink removes all the public links for a +file. + +Root folder ID + +You can set the root_folder_id for rclone. This is the directory +(identified by its Folder ID) that rclone considers to be the root of +your Gofile drive. + +Normally you will leave this blank and rclone will determine the correct +root to use itself and fill in the value in the config file. + +However you can set this to restrict rclone to a specific folder +hierarchy. + +In order to do this you will have to find the Folder ID of the directory +you wish rclone to display. + +You can do this with rclone + + $ rclone lsf -Fip --dirs-only remote: + d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/ + f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/ + d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/ + +The ID to use is the part before the ; so you could set + + root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0 + +To restrict rclone to the Files directory. + +Standard options + +Here are the Standard options specific to gofile (Gofile). + +--gofile-access-token + +API Access token + +You can get this from the web control panel. + +Properties: + +- Config: access_token +- Env Var: RCLONE_GOFILE_ACCESS_TOKEN +- Type: string +- Required: false + +Advanced options + +Here are the Advanced options specific to gofile (Gofile). + +--gofile-root-folder-id + +ID of the root folder + +Leave this blank normally, rclone will fill it in automatically. + +If you want rclone to be restricted to a particular folder you can fill +it in - see the docs for more info. + +Properties: + +- Config: root_folder_id +- Env Var: RCLONE_GOFILE_ROOT_FOLDER_ID +- Type: string +- Required: false + +--gofile-account-id + +Account ID + +Leave this blank normally, rclone will fill it in automatically. + +Properties: + +- Config: account_id +- Env Var: RCLONE_GOFILE_ACCOUNT_ID +- Type: string +- Required: false + +--gofile-list-chunk + +Number of items to list in each call + +Properties: + +- Config: list_chunk +- Env Var: RCLONE_GOFILE_LIST_CHUNK +- Type: int +- Default: 1000 + +--gofile-encoding + +The encoding for the backend. + +See the encoding section in the overview for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_GOFILE_ENCODING +- Type: Encoding +- Default: + Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation + +--gofile-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_GOFILE_DESCRIPTION +- Type: string +- Required: false + +Limitations + +Gofile only supports filenames up to 255 characters in length, where a +character is a unicode character. + +Directories should not be cached for more than 24h otherwise files in +the directory may not be downloadable. In practice this means when using +a VFS based rclone command such as rclone mount you should make sure +--dir-cache-time is less than 24h. + +Note that Gofile is currently limited to a total of 100,000 items. If +you attempt to upload more than that you will get error-limit-100000. +This limit may be lifted in the future. + +Duplicated files + +Gofile is capable of having files with duplicated file names. For +instance two files called hello.txt in the same directory. + +Rclone cannot sync that to a normal file system but it can be fixed with +the rclone dedupe command. + +Duplicated files cause problems with the syncing and you will see +messages in the log about duplicates. + +Use rclone dedupe to fix duplicated files. + Google Cloud Storage Paths are specified as remote:bucket (or remote: for the lsd command.) @@ -33331,16 +34790,16 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - type = google cloud storage - client_id = - client_secret = - token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} - project_number = 12345678 - object_acl = private - bucket_acl = private - -------------------- + Configuration complete. + Options: + - type: google cloud storage + - client_id: + - client_secret: + - token: {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} + - project_number: 12345678 + - object_acl: private + - bucket_acl: private + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -33993,15 +35452,16 @@ This will guide you through an interactive setup process: y) Yes n) No y/n> n - -------------------- - [remote] - client_id = - client_secret = - scope = drive - root_folder_id = - service_account_file = - token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} - -------------------- + Configuration complete. + Options: + type: drive + - client_id: + - client_secret: + - scope: drive + - root_folder_id: + - service_account_file: + - token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -34125,12 +35585,12 @@ credentials file into the rclone config file, you can set service_account_credentials with the actual contents of the file instead, or set the equivalent environment variable. -Use case - Google Apps/G-suite account and individual Drive +Use case - Google Workspace account and individual Drive -Let's say that you are the administrator of a Google Apps (old) or -G-suite account. The goal is to store data on an individual's Drive -account, who IS a member of the domain. We'll call the domain -example.com, and the user foo@example.com. +Let's say that you are the administrator of a Google Workspace. The goal +is to read or write data on an individual's Drive account, who IS a +member of the domain. We'll call the domain example.com, and the user +foo@example.com. There's a few steps we need to go through to accomplish this: @@ -34138,29 +35598,38 @@ There's a few steps we need to go through to accomplish this: - To create a service account and obtain its credentials, go to the Google Developer Console. -- You must have a project - create one if you don't. +- You must have a project - create one if you don't and make sure you + are on the selected project. - Then go to "IAM & admin" -> "Service Accounts". - Use the "Create Service Account" button. Fill in "Service account name" and "Service account ID" with something that identifies your client. - Select "Create And Continue". Step 2 and 3 are optional. -- These credentials are what rclone will use for authentication. If - you ever need to remove access, press the "Delete service account - key" button. +- Click on the newly created service account +- Click "Keys" and then "Add Key" and then "Create new key" +- Choose type "JSON" and click create +- This will download a small JSON file that rclone will use for + authentication. + +If you ever need to remove access, press the "Delete service account +key" button. 2. Allowing API access to example.com Google Drive -- Go to example.com's admin console +- Go to example.com's Workspace Admin Console - Go into "Security" (or use the search bar) -- Select "Show more" and then "Advanced settings" -- Select "Manage API client access" in the "Authentication" section -- In the "Client Name" field enter the service account's "Client ID" - +- Select "Access and data control" and then "API controls" +- Click "Manage domain-wide delegation" +- Click "Add new" +- In the "Client ID" field enter the service account's "Client ID" - this can be found in the Developer Console under "IAM & Admin" -> "Service Accounts", then "View Client ID" for the newly created service account. It is a ~21 character numerical string. -- In the next field, "One or More API Scopes", enter - https://www.googleapis.com/auth/drive to grant access to Google - Drive specifically. +- In the next field, "OAuth Scopes", enter + https://www.googleapis.com/auth/drive to grant read/write access to + Google Drive specifically. You can also use + https://www.googleapis.com/auth/drive.readonly for read only access. +- Click "Authorise" 3. Configure rclone, assuming a new install @@ -34168,12 +35637,12 @@ There's a few steps we need to go through to accomplish this: n/s/q> n # New name>gdrive # Gdrive is an example name - Storage> # Select the number shown for Google Drive + Storage> # Type drive client_id> # Can be left blank client_secret> # Can be left blank - scope> # Select your scope, 1 for example + scope> # Select the scope use used in step 2 root_folder_id> # Can be left blank - service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes! + service_account_file> /home/foo/myJSONfile.json # Path to the JSON file you downloaded in step 1. y/n> # Auto config, n 4. Verify that it's working @@ -34191,7 +35660,7 @@ Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using --drive-impersonate, do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service Account -you created/selected at step #1 - use rclone without specifying the +you created/selected at step 1 - use rclone without specifying the --drive-impersonate option, like this: rclone -v lsf gdrive:backup Shared drives (team drives) @@ -34219,13 +35688,14 @@ For example: 3 / Rclone Test 3 \ "zzzzzzzzzzzzzzzzzzzz" Enter a Shared Drive ID> 1 - -------------------- - [remote] - client_id = - client_secret = - token = {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} - team_drive = xxxxxxxxxxxxxxxxxxxx - -------------------- + Configuration complete. + Options: + - type: drive + - client_id: + - client_secret: + - token: {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} + - team_drive: xxxxxxxxxxxxxxxxxxxx + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -35898,11 +37368,11 @@ This will guide you through an interactive setup process: *** are stored in full resolution at original quality. These uploads *** will count towards storage in your Google Account. - -------------------- - [remote] - type = google photos - token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} - -------------------- + Configuration complete. + Options: + - type: google photos + - token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -36218,7 +37688,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 50. -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. - batch_mode: async - default batch_size is 50 @@ -36694,6 +38164,8 @@ The rclone hashsum (or md5sum or sha1sum) command will: Other operations +- any time a hash is requested, follow the logic from 1-4 from hashsum + above - whenever a file is uploaded or downloaded in full, capture the stream to calculate all supported hashes on the fly and update database @@ -36764,12 +38236,12 @@ This will guide you through an interactive setup process: n) No (default) y/n> n Remote config - -------------------- - [remote] - type = hdfs - namenode = namenode.hadoop:8020 - username = root - -------------------- + Configuration complete. + Options: + - type: hdfs + - namenode: namenode.hadoop:8020 + - username: root + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -37005,11 +38477,11 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - type = hidrive - token = {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} - -------------------- + Configuration complete. + Options: + - type: hidrive + - token: {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -37480,10 +38952,11 @@ This will guide you through an interactive setup process: \ "https://example.com" url> https://beta.rclone.org Remote config - -------------------- - [remote] - url = https://beta.rclone.org - -------------------- + Configuration complete. + Options: + - type: http + - url: https://beta.rclone.org + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -38114,12 +39587,12 @@ This will guide you through an interactive setup process. y) Yes n) No (default) y/n> n - -------------------- - [remote] - type = internetarchive - access_key_id = XXXX - secret_access_key = XXXX - -------------------- + Configuration complete. + Options: + - type: internetarchive + - access_key_id: XXXX + - secret_access_key: XXXX + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -38478,18 +39951,18 @@ This will guide you through an interactive setup process: 2 > Shared 3 > Sync config_mountpoint> 1 - -------------------- - [remote] - type = jottacloud - configVersion = 1 - client_id = jottacli - client_secret = - tokenURL = https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token - token = {........} - username = 2940e57271a93d987d6f8a21 - device = Jotta - mountpoint = Archive - -------------------- + Configuration complete. + Options: + - type: jottacloud + - configVersion: 1 + - client_id: jottacli + - client_secret: + - tokenURL: https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token + - token: {........} + - username: 2940e57271a93d987d6f8a21 + - device: Jotta + - mountpoint: Archive + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -39366,13 +40839,13 @@ This will guide you through an interactive setup process: n) No y/n> n Remote config - -------------------- - [remote] - type = mailru - user = username@mail.ru - pass = *** ENCRYPTED *** - speedup_enable = true - -------------------- + Configuration complete. + Options: + - type: mailru + - user: username@mail.ru + - pass: *** ENCRYPTED *** + - speedup_enable: true + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -39759,12 +41232,12 @@ This will guide you through an interactive setup process: Confirm the password: password: Remote config - -------------------- - [remote] - type = mega - user = you@example.com - pass = *** ENCRYPTED *** - -------------------- + Configuration complete. + Options: + - type: mega + - user: you@example.com + - pass: *** ENCRYPTED *** + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -40042,10 +41515,10 @@ want to: Remote config - -------------------- - [remote] - type = memory - -------------------- + Configuration complete. + Options: + - type: memory + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -40449,12 +41922,13 @@ This will guide you through an interactive setup process: Endpoint for the service - leave blank normally. endpoint> Remote config - -------------------- - [remote] - account = account_name - key = base64encodedkey== - endpoint = - -------------------- + Configuration complete. + Options: + - type: azureblob + - account: account_name + - key: base64encodedkey== + - endpoint: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -40717,6 +42191,13 @@ msi_client_id, or msi_mi_res_id parameters. If none of msi_object_id, msi_client_id, or msi_mi_res_id is set, this is is equivalent to using env_auth. +Anonymous + +If you want to access resources with public anonymous access then set +account only. You can do this without making an rclone config: + + rclone lsf :azureblob,account=ACCOUNT:CONTAINER + Standard options Here are the Standard options specific to azureblob (Microsoft Azure @@ -42112,13 +43593,13 @@ This will guide you through an interactive setup process: y) Yes n) No y/n> y - -------------------- - [remote] - type = onedrive - token = {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} - drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk - drive_type = business - -------------------- + Configuration complete. + Options: + - type: onedrive + - token: {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} + - drive_id: b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk + - drive_type: business + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -43273,11 +44754,12 @@ This will guide you through an interactive setup process: password: Confirm the password: password: - -------------------- - [remote] - username = - password = *** ENCRYPTED *** - -------------------- + Configuration complete. + Options: + - type: opendrive + - username: + - password: *** ENCRYPTED *** + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -44341,15 +45823,16 @@ This will guide you through an interactive setup process. Leave blank will use the default value "3". connection_retries> Remote config - -------------------- - [remote] - env_auth = false - access_key_id = access_key - secret_access_key = secret_key - endpoint = - zone = pek3a - connection_retries = - -------------------- + Configuration complete. + Options: + - type: qingstor + - env_auth: false + - access_key_id: access_key + - secret_access_key: secret_key + - endpoint: + - zone: pek3a + - connection_retries: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -44653,11 +46136,12 @@ This will guide you through an interactive setup process: Host name of Quatrix account. host> example.quatrix.it - -------------------- - [remote] - api_key = your_api_key - host = example.quatrix.it - -------------------- + Configuration complete. + Options: + - type: quatrix + - api_key: your_api_key + - host: example.quatrix.it + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -44703,12 +46187,12 @@ hostname was changed. Choose a number from below, or type in an existing value 1 > remote remote> remote - -------------------- - [remote] - type = quatrix - host = some_host.quatrix.it - api_key = your_api_key - -------------------- + Configuration complete. + Options: + - type: quatrix + - host: some_host.quatrix.it + - api_key: your_api_key + Keep this "remote" remote? Edit remote Option api_key. API key for accessing Quatrix account @@ -44718,12 +46202,12 @@ hostname was changed. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it). - -------------------- - [remote] - type = quatrix - host = some_host.quatrix.it - api_key = your_api_key - -------------------- + Configuration complete. + Options: + - type: quatrix + - host: some_host.quatrix.it + - api_key: your_api_key + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -45595,6 +47079,49 @@ Properties: - Type: bool - Default: false +--swift-fetch-until-empty-page + +When paginating, always fetch unless we received an empty page. + +Consider using this option if rclone listings show fewer objects than +expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations of the +Swift API that do not implement pagination as expected. See also +"partial_page_fetch_threshold". + +Properties: + +- Config: fetch_until_empty_page +- Env Var: RCLONE_SWIFT_FETCH_UNTIL_EMPTY_PAGE +- Type: bool +- Default: false + +--swift-partial-page-fetch-threshold + +When paginating, fetch if the current page is within this percentage of +the limit. + +Consider using this option if rclone listings show fewer objects than +expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations of the +Swift API that do not implement pagination as expected. See also +"fetch_until_empty_page". + +Properties: + +- Config: partial_page_fetch_threshold +- Env Var: RCLONE_SWIFT_PARTIAL_PAGE_FETCH_THRESHOLD +- Type: int +- Default: 0 + --swift-chunk-size Above this size files will be chunked. @@ -45815,12 +47342,13 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - client_id = - client_secret = - token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} - -------------------- + Configuration complete. + Options: + - type: pcloud + - client_id: + - client_secret: + - token: {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -46423,6 +47951,190 @@ Deleted files still visible with trashed-only Deleted files will still be visible with --pikpak-trashed-only even after the trash emptied. This goes away after few days. +Pixeldrain + +This is the backend for Pixeldrain's premium filesystem feature. This is +not the same as pixeldrain's free file sharing product. The filesystem +requires either a Pro subscription or the Prepaid plan. More information +on subscriptions. + +An overview of the filesystem's features and limitations is available in +the filesystem guide on pixeldrain. + +Usage with account + +To use the personal filesystem you will need a pixeldrain account and +either the Prepaid plan or one of the Patreon-based subscriptions. After +registering and subscribing, your personal filesystem will be available +at this link: https://pixeldrain.com/d/me. + +Go to the API keys page on your account and generate a new API key for +rclone. Then run rclone config and use the API key to create a new +backend. + +Example: + + No remotes found, make a new one? + n) New remote + d) Delete remote + c) Copy remote + s) Set configuration password + q) Quit config + n/d/c/s/q> n + + Enter name for new remote. + name> pixeldrainfs + + Option Storage. + Type of storage to configure. + Choose a number from below, or type in your own value. + ... + XX / Pixeldrain Filesystem + \ (pixeldrain) + ... + Storage> pixeldrain + + Option api_key. + API key for your pixeldrain account. + Found on https://pixeldrain.com/user/api_keys. + Enter a value. Press Enter to leave empty. + api_key> b1bb1e81-9b7b-406b-986a-c9b20be76e15 + + Option directory_id. + Root of the filesystem to use. Set to 'me' to use your personal filesystem. + Set to a shared directory ID to use a shared directory. + Enter a string value. Press Enter for the default (me). + directory_id> + + Edit advanced config? + y) Yes + n) No (default) + y/n> + + Configuration complete. + Options: + - type: pixeldrain + - api_key: b1bb1e81-9b7b-406b-986a-c9b20be76e15 + Keep this "pixeldrainfs" remote? + y) Yes this is OK (default) + e) Edit this remote + d) Delete this remote + y/e/d> + + Current remotes: + + Name Type + ==== ==== + pixeldrainfs pixeldrain + + e) Edit existing remote + n) New remote + d) Delete remote + r) Rename remote + c) Copy remote + s) Set configuration password + q) Quit config + e/n/d/r/c/s/q> q + +Usage without account + +It is possible to gain read-only access to publicly shared directories +through rclone. For this you only need a directory ID. The directory ID +can be found in the URL of a shared directory, the URL will look like +this https://pixeldrain.com/d/abcd1234 where abcd1234 is the directory +ID. Directory IDs in your own filesystem can also be listed with the lsf +command: + +rclone lsf Pixeldrain: --dirs-only -Fpi + +This will print directories in your Pixeldrain home directory and their +public IDs. + +Enter this directory ID in the rclone config and you will be able to +access the directory. + +Standard options + +Here are the Standard options specific to pixeldrain (Pixeldrain +Filesystem). + +--pixeldrain-api-key + +API key for your pixeldrain account. Found on +https://pixeldrain.com/user/api_keys. + +Properties: + +- Config: api_key +- Env Var: RCLONE_PIXELDRAIN_API_KEY +- Type: string +- Required: false + +--pixeldrain-root-folder-id + +Root of the filesystem to use. + +Set to 'me' to use your personal filesystem. Set to a shared directory +ID to use a shared directory. + +Properties: + +- Config: root_folder_id +- Env Var: RCLONE_PIXELDRAIN_ROOT_FOLDER_ID +- Type: string +- Default: "me" + +Advanced options + +Here are the Advanced options specific to pixeldrain (Pixeldrain +Filesystem). + +--pixeldrain-api-url + +The API endpoint to connect to. In the vast majority of cases it's fine +to leave this at default. It is only intended to be changed for testing +purposes. + +Properties: + +- Config: api_url +- Env Var: RCLONE_PIXELDRAIN_API_URL +- Type: string +- Default: "https://pixeldrain.com/api" + +--pixeldrain-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_PIXELDRAIN_DESCRIPTION +- Type: string +- Required: false + +Metadata + +Pixeldrain supports file modes and creation times. + +Here are the possible system metadata items for the pixeldrain backend. + + ------------------------------------------------------------------------------------------------- + Name Help Type Example Read Only + ----------- -------------- ----------- ------------------------------------- -------------------- + btime Time of file RFC 3339 2006-01-02T15:04:05.999999999Z07:00 N + birth + (creation) + + mode File mode octal, unix 755 N + style + + mtime Time of last RFC 3339 2006-01-02T15:04:05.999999999Z07:00 N + modification + ------------------------------------------------------------------------------------------------- + +See the metadata docs for more info. + premiumize.me Paths are specified as remote:path @@ -46469,11 +48181,11 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - type = premiumizeme - token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} - -------------------- + Configuration complete. + Options: + - type: premiumizeme + - token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -46699,12 +48411,12 @@ This will guide you through an interactive setup process: Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config - -------------------- - [remote] - type = protondrive - user = you@protonmail.com - pass = *** ENCRYPTED *** - -------------------- + Configuration complete. + Options: + - type: protondrive + - user: you@protonmail.com + - pass: *** ENCRYPTED *** + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -47267,12 +48979,12 @@ This will guide you through an interactive setup process: Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config - -------------------- - [remote] - type = protondrive - user = you@protonmail.com - pass = *** ENCRYPTED *** - -------------------- + Configuration complete. + Options: + - type: protondrive + - user: you@protonmail.com + - pass: *** ENCRYPTED *** + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48023,14 +49735,15 @@ This will guide you through an interactive setup process. Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config - -------------------- - [remote] - host = example.com - user = sftpuser - port = - pass = - key_file = - -------------------- + Configuration complete. + Options: + - type: sftp + - host: example.com + - user: sftpuser + - port: + - pass: + - key_file: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48386,7 +50099,16 @@ Properties: Raw PEM-encoded private key. -If specified, will override key_file parameter. +Note that this should be on a single line with line endings replaced +with '', eg + + key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- + +This will generate the single line correctly: + + awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa + +If specified, it will override the key_file parameter. Properties: @@ -48849,13 +50571,13 @@ Maximum number of SFTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. -If you are doing a sync or copy then make sure concurrency is one more +If you are doing a sync or copy then make sure connections is one more than the sum of --transfers and --checkers. If you use --check-first then it just needs to be one more than the maximum of --checkers and --transfers. -So for concurrency 3 you'd use --checkers 2 --transfers 2 --check-first +So for connections 3 you'd use --checkers 2 --transfers 2 --check-first or --checkers 1 --transfers 1. Properties: @@ -49459,11 +51181,11 @@ Setup with access grant Enter a string value. Press Enter for the default (""). access_grant> your-access-grant-received-by-someone-else Remote config - -------------------- - [remote] - type = storj - access_grant = your-access-grant-received-by-someone-else - -------------------- + Configuration complete. + Options: + - type: storj + - access_grant: your-access-grant-received-by-someone-else + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -49512,14 +51234,14 @@ Setup with API key and passphrase Enter a string value. Press Enter for the default (""). passphrase> your-human-readable-encryption-passphrase Remote config - -------------------- - [remote] - type = storj - satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 - api_key = your-api-key-for-your-storj-project - passphrase = your-human-readable-encryption-passphrase - access_grant = the-access-grant-generated-from-the-api-key-and-passphrase - -------------------- + Configuration complete. + Options: + - type: storj + - satellite_address: 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 + - api_key: your-api-key-for-your-storj-project + - passphrase: your-human-readable-encryption-passphrase + - access_grant: the-access-grant-generated-from-the-api-key-and-passphrase + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -49825,11 +51547,11 @@ This will guide you through an interactive setup process: Username (email address)> nick@craig-wood.com Your Sugarsync password is only required during setup and will not be stored. password: - -------------------- - [remote] - type = sugarsync - refresh_token = https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX - -------------------- + Configuration complete. + Options: + - type: sugarsync + - refresh_token: https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -50049,11 +51771,6 @@ policy mfs (most free space) as a member of an rclone union remote. See List of backends that do not support rclone about and rclone about -Tardigrade - -The Tardigrade backend has been renamed to be the Storj backend. Old -configuration files will continue to work. - Uloz.to Paths are specified as remote:path @@ -50544,11 +52261,11 @@ This will guide you through an interactive setup process: Enter a signed integer. Press Enter for the default ("120"). cache_time> Remote config - -------------------- - [remote] - type = union - upstreams = remote1:dir1 remote2:dir2 remote3:dir3 - -------------------- + Configuration complete. + Options: + - type: union + - upstreams: remote1:dir1 remote2:dir2 remote3:dir3 + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -50920,15 +52637,15 @@ This will guide you through an interactive setup process: Bearer token instead of user/pass (e.g. a Macaroon) bearer_token> Remote config - -------------------- - [remote] - type = webdav - url = https://example.com/remote.php/webdav/ - vendor = nextcloud - user = user - pass = *** ENCRYPTED *** - bearer_token = - -------------------- + Configuration complete. + Options: + - type: webdav + - url: https://example.com/remote.php/webdav/ + - vendor: nextcloud + - user: user + - pass: *** ENCRYPTED *** + - bearer_token: + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -51147,6 +52864,18 @@ Properties: - Type: bool - Default: false +--webdav-unix-socket + +Path to a unix domain socket to dial to, instead of opening a TCP +connection directly + +Properties: + +- Config: unix_socket +- Env Var: RCLONE_WEBDAV_UNIX_SOCKET +- Type: string +- Required: false + --webdav-description Description of the remote. @@ -51379,12 +53108,13 @@ This will guide you through an interactive setup process: Log in and authorize rclone for access Waiting for code... Got code - -------------------- - [remote] - client_id = - client_secret = - token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} - -------------------- + Configuration complete. + Options: + - type: yandex + - client_id: + - client_secret: + - token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} + Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -51541,6 +53271,18 @@ Properties: - Type: Encoding - Default: Slash,Del,Ctl,InvalidUtf8,Dot +--yandex-spoof-ua + +Set the user agent to match an official version of the yandex disk +client. May help with upload performance. + +Properties: + +- Config: spoof_ua +- Env Var: RCLONE_YANDEX_SPOOF_UA +- Type: bool +- Default: true + --yandex-description Description of the remote. @@ -51629,12 +53371,12 @@ This will guide you through an interactive setup process: 1 / General \ "4u2869d2aa6fca04f4f2f896b6539243b85b1" Enter a Workspace ID> 1 - -------------------- - [remote] - type = zoho - token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} - root_folder_id = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - -------------------- + Configuration complete. + Options: + - type: zoho + - token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} + - root_folder_id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -51952,11 +53694,9 @@ requires absolute paths and the use of prefix \\?\, e.g. \\?\D:\some\very\long\path. For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more below). - -Note that Windows supports using the same prefix \\?\ to specify path to +Using the same prefix \\?\ it is also possible to specify path to volumes identified by their GUID, e.g. -\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path. This is not -supported in rclone, due to an issue in go. +\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path. Long paths @@ -52310,6 +54050,34 @@ Properties: - Type: bool - Default: false +--local-no-clone + +Disable reflink cloning for server-side copies. + +Normally, for local-to-local transfers, rclone will "clone" the file +when possible, and fall back to "copying" only when cloning is not +supported. + +Cloning creates a shallow copy (or "reflink") which initially shares +blocks with the original file. Unlike a "hardlink", the two files are +independent and neither will affect the other if subsequently modified. + +Cloning is usually preferable to copying, as it is much faster and is +deduplicated by default (i.e. having two identical files does not +consume more storage than having just one.) However, for use cases where +data redundancy is preferable, --local-no-clone can be used to disable +cloning and force "deep" copies. + +Currently, cloning is only supported when using APFS on macOS (support +for other platforms may be added in the future.) + +Properties: + +- Config: no_clone +- Env Var: RCLONE_LOCAL_NO_CLONE +- Type: bool +- Default: false + --local-no-preallocate Disable preallocation of disk space for transferred files. @@ -52497,6 +54265,172 @@ Options: Changelog +v1.68.0 - 2024-09-08 + +See commits + +- New backends + - Files.com (Sam Harrison) + - Gofile (Nick Craig-Wood) + - Pixeldrain (Fornax) +- Changed backends + - S3 backend updated to use AWS SDKv2 as v1 is now unsupported. + - The matrix of providers and auth methods is huge and there + could be problems with obscure combinations. + - Please report problems in a new issue on Github. +- New commands + - config encryption: set, remove and check to manage config file + encryption (Nick Craig-Wood) +- New Features + - build + - Update to go1.23 and make go1.21 the minimum required + version (Nick Craig-Wood) + - Update all dependencies (Nick Craig-Wood) + - Disable wasm/js build due to go bug #64856 (Nick Craig-Wood) + - Enable custom linting rules with ruleguard via gocritic + (albertony) + - Update logging statements to make --use-json-log work always + (albertony) + - Adding new code quality tests and fixing the fallout + (albertony) + - config + - Internal config re-organised to be more consistent and make + it available from the rc (Nick Craig-Wood) + - Avoid remotes with empty names from the environment + (albertony) + - Make listing of remotes more consistent (albertony) + - Make getting config values more consistent (albertony) + - Use --password-command to set config file password if + supplied (Nick Craig-Wood) + - doc fixes (albertony, crystalstall, David Seifert, Eng Zer Jun, + Ernie Hershey, Florian Klink, John Oxley, kapitainsky, Mathieu + Moreau, Nick Craig-Wood, nipil, Pétr Bozsó, Russ Bubley, Sam + Harrison, Thearas, URenko, Will Miles, yuval-cloudinary) + - fs: Allow semicolons as well as spaces in --bwlimit timetable + parsing (Kyle Reynolds) + - help + - Global flags help command now takes glob filter (albertony) + - Make help command output less distracting (albertony) + - lib/encoder: Add Raw encoding for use where no encoding at all + is required, eg --local-encoding Raw (URenko) + - listremotes: Added options for filtering, ordering and json + output (albertony) + - nfsmount + - Make the --sudo flag work for umount as well as mount (Nick + Craig-Wood) + - Add -o tcp option to NFS mount options to fix mounting under + Linux (Nick Craig-Wood) + - operations: copy: generate stable partial suffix (Georg Welzel) + - rc + - Add options/info call to enumerate options (Nick Craig-Wood) + - Add option blocks parameter to options/get and options/info + (Nick Craig-Wood) + - Add vfs/queue to show the status of the upload queue (Nick + Craig-Wood) + - Add vfs/queue-set-expiry to adjust expiry of items in the + VFS queue (Nick Craig-Wood) + - Add --unix-socket option to rc command (Florian Klink) + - Prevent unmount rc command from sending a STOPPING=1 + sd-notify message (AThePeanut4) + - rcserver: Implement prometheus metrics on a dedicated port (Oleg + Kunitsyn) + - serve dlna + - Also look at "Subs" subdirectory (Florian Klink) + - Don't swallow video.{idx,sub} (Florian Klink) + - Set more correct mime type (Florian Klink) + - serve nfs + - Implement on disk cache for file handles selected with + --nfs-cache-type (Nick Craig-Wood) + - Add tracing to filesystem calls (Nick Craig-Wood) + - Mask unimplemented error from chmod (Nick Craig-Wood) + - Unify the nfs library logging with rclone's logging better + (Nick Craig-Wood) + - Fix incorrect user id and group id exported to NFS (Nick + Craig-Wood) + - serve s3 + - Implement --auth-proxy (Sawjan Gurung) + - Update to AWS SDKv2 by updating github.com/rclone/gofakes3 + (Nick Craig-Wood) +- Bug Fixes + - bisync: Fix sync time problems with backends that round time (eg + Dropbox) (nielash) + - serve dlna: Fix panic: invalid argument to Int63n (Nick + Craig-Wood) +- VFS + - Add --vfs-read-chunk-streams to parallel read chunks from files + (Nick Craig-Wood) + - This can increase mount performance on high bandwidth or + large latency links + - Fix cache encoding with special characters (URenko) +- Local + - Fix encoding of root path fix (URenko) + - Add server-side copy (using clone) with xattrs on macOS + (nielash) + - --local-no-clone flag to disable cloning for server-side + copies (nielash) + - Support setting custom --metadata during server-side Copy + (nielash) +- Azure Blob + - Allow anonymous access for public resources (Nick Craig-Wood) +- B2 + - Include custom upload headers in large file info (Pat Patterson) +- Drive + - Fix copying Google Docs to a backend which only supports SHA1 + (Nick Craig-Wood) +- Fichier + - Fix detection of Flood Detected error (Nick Craig-Wood) + - Fix server side move (Nick Craig-Wood) +- HTTP + - Reload client certificates on expiry (Saleh Dindar) + - Support listening on passed FDs (Florian Klink) +- Jottacloud + - Fix setting of metadata on server side move (albertony) +- Onedrive + - Fix nil pointer error when uploading small files (Nick + Craig-Wood) +- Pcloud + - Implement SetModTime (Georg Welzel) + - Implement OpenWriterAt feature to enable multipart uploads + (Georg Welzel) +- Pikpak + - Improve data consistency by ensuring async tasks complete + (wiserain) + - Implement custom hash to replace wrong sha1 (wiserain) + - Fix error with copyto command (wiserain) + - Optimize file move by removing unnecessary readMetaData() call + (wiserain) + - Non-buffered hash calculation for local source files (wiserain) + - Optimize upload by pre-fetching gcid from API (wiserain) + - Correct file transfer progress for uploads by hash (wiserain) + - Update to using AWS SDK v2 (wiserain) +- S3 + - Update to using AWS SDK v2 (Nick Craig-Wood) + - Add --s3-sdk-log-mode to control SDKv2 debugging (Nick + Craig-Wood) + - Fix incorrect region for Magalu provider (Filipe Herculano) + - Allow restoring from intelligent-tiering storage class (Pawel + Palucha) +- SFTP + - Use uint32 for mtime to save memory (Tomasz Melcer) + - Ignore useless errors when closing the connection pool (Nick + Craig-Wood) + - Support listening on passed FDs (Florian Klink) +- Swift + - Add workarounds for bad listings in Ceph RGW (Paul Collins) + - Add total/free space info in about command. + (fsantagostinobietti) +- Ulozto + - Fix upload of > 2GB files on 32 bit platforms (Tobias Markus) +- WebDAV + - Add --webdav-unix-socket-path to connect to a unix socket + (Florian Klink) +- Yandex + - Implement custom user agent to help with upload speeds + (Sebastian Bünger) +- Zoho + - Fix inefficiencies uploading with new API to avoid throttling + (Nick Craig-Wood) + v1.67.0 - 2024-06-14 See commits @@ -59765,9 +61699,10 @@ garbage collector work harder, reducing memory size at the expense of CPU usage. The most common cause of rclone using lots of memory is a single -directory with thousands or millions of files in. Rclone has to load -this entirely into memory as rclone objects. Each rclone object takes -0.5k-1k of memory. +directory with millions of files in. Rclone has to load this entirely +into memory as rclone objects. Each rclone object takes 0.5k-1k of +memory. There is a workaround for this which involves a bit of +scripting. Rclone changes fullwidth Unicode punctuation marks in file names @@ -60680,6 +62615,31 @@ email addresses removed from here need to be added to bin/.ignore-emails to make - Michał Dzienisiewicz michal.piotr.dz@gmail.com - Florian Klink flokli@flokli.de - Bill Fraser bill@wfraser.dev +- Thearas thearas850@gmail.com +- Filipe Herculano fifo_@live.com +- Russ Bubley russ.bubley@googlemail.com +- Paul Collins paul.collins@canonical.com +- Tomasz Melcer liori@exroot.org +- itsHenry 2671230065@qq.com +- Ke Wang me@ke.wang +- AThePeanut4 49614525+AThePeanut4@users.noreply.github.com +- Tobias Markus tobbi.bugs@googlemail.com +- Ernie Hershey github@ernie.org +- Will Miles wmiles@sgl.com +- David Seifert 16636962+SoapGentoo@users.noreply.github.com +- Fornax wimbrand96@gmail.com +- Sam Harrison sam.harrison@files.com +- Péter Bozsó 3806723+peterbozso@users.noreply.github.com +- Georg Welzel gwelzel@mailbox.org +- John Oxley john.oxley@gmail.com joxley@meta.com +- Pawel Palucha pawel.palucha@aetion.com +- crystalstall crystalruby@qq.com +- nipil nipil@users.noreply.github.com +- yuval-cloudinary 46710068+yuval-cloudinary@users.noreply.github.com +- Mathieu Moreau mrx23dot@users.noreply.github.com +- fsantagostinobietti + 6057026+fsantagostinobietti@users.noreply.github.com +- Oleg Kunitsyn 114359669+hiddenmarten@users.noreply.github.com Contact the rclone project diff --git a/Makefile b/Makefile index 4a9d2240f..2e55c5a0c 100644 --- a/Makefile +++ b/Makefile @@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt commanddocs: rclone - XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/ + -@rmdir -p '$$HOME/.config/rclone' + XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/ + @[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1) backenddocs: rclone bin/make_backend_docs.py + -@rmdir -p '$$HOME/.config/rclone' XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py + @[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1) rcdocs: rclone bin/make_rc_docs.sh diff --git a/README.md b/README.md index 557adb80d..d1e859244 100644 --- a/README.md +++ b/README.md @@ -62,9 +62,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and * Google Drive [:page_facing_up:](https://rclone.org/drive/) * Google Photos [:page_facing_up:](https://rclone.org/googlephotos/) * HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/) + * Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box) * HiDrive [:page_facing_up:](https://rclone.org/hidrive/) * HTTP [:page_facing_up:](https://rclone.org/http/) * Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs) + * iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/) * ImageKit [:page_facing_up:](https://rclone.org/imagekit/) * Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/) * Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/) @@ -91,6 +93,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and * OpenStack Swift [:page_facing_up:](https://rclone.org/swift/) * Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/) * Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/) + * Outscale [:page_facing_up:](https://rclone.org/s3/#outscale) * ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud) * pCloud [:page_facing_up:](https://rclone.org/pcloud/) * Petabox [:page_facing_up:](https://rclone.org/s3/#petabox) @@ -104,6 +107,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and * Quatrix [:page_facing_up:](https://rclone.org/quatrix/) * Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/) * RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp) + * rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net) * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Seafile [:page_facing_up:](https://rclone.org/seafile/) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) diff --git a/RELEASE.md b/RELEASE.md index 0ac43ea82..7b1dad77f 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -168,6 +168,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a To make a full build then set the tags correctly and add `--push` +Note that you can't only build one architecture - you need to build them all. + ``` -docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push . +docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push . ``` diff --git a/VERSION b/VERSION index 6640f481f..6439851b6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v1.68.0 +v1.69.0 diff --git a/backend/all/all.go b/backend/all/all.go index a9191c98d..1abd6770f 100644 --- a/backend/all/all.go +++ b/backend/all/all.go @@ -26,6 +26,7 @@ import ( _ "github.com/rclone/rclone/backend/hdfs" _ "github.com/rclone/rclone/backend/hidrive" _ "github.com/rclone/rclone/backend/http" + _ "github.com/rclone/rclone/backend/iclouddrive" _ "github.com/rclone/rclone/backend/imagekit" _ "github.com/rclone/rclone/backend/internetarchive" _ "github.com/rclone/rclone/backend/jottacloud" diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 358886404..c7e43ecb4 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -209,6 +209,22 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + ` keys instead of setting ` + "`service_principal_file`" + `. `, Advanced: true, + }, { + Name: "disable_instance_discovery", + Help: `Skip requesting Microsoft Entra instance metadata + +This should be set true only by applications authenticating in +disconnected clouds, or private clouds such as Azure Stack. + +It determines whether rclone requests Microsoft Entra instance +metadata from ` + "`https://login.microsoft.com/`" + ` before +authenticating. + +Setting this to true will skip this request, making you responsible +for ensuring the configured authority is valid and trustworthy. +`, + Default: false, + Advanced: true, }, { Name: "use_msi", Help: `Use a managed service identity to authenticate (only works in Azure). @@ -243,6 +259,20 @@ msi_client_id, or msi_mi_res_id parameters.`, Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.", Default: false, Advanced: true, + }, { + Name: "use_az", + Help: `Use Azure CLI tool az for authentication + +Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/) +as the sole means of authentication. + +Setting this can be useful if you wish to use the az CLI on a host with +a System Managed Identity that you do not want to use. + +Don't set env_auth at the same time. +`, + Default: false, + Advanced: true, }, { Name: "endpoint", Help: "Endpoint for the service.\n\nLeave blank normally.", @@ -438,10 +468,12 @@ type Options struct { Username string `config:"username"` Password string `config:"password"` ServicePrincipalFile string `config:"service_principal_file"` + DisableInstanceDiscovery bool `config:"disable_instance_discovery"` UseMSI bool `config:"use_msi"` MSIObjectID string `config:"msi_object_id"` MSIClientID string `config:"msi_client_id"` MSIResourceID string `config:"msi_mi_res_id"` + UseAZ bool `config:"use_az"` Endpoint string `config:"endpoint"` ChunkSize fs.SizeSuffix `config:"chunk_size"` UploadConcurrency int `config:"upload_concurrency"` @@ -725,7 +757,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e } // Read credentials from the environment options := azidentity.DefaultAzureCredentialOptions{ - ClientOptions: policyClientOptions, + ClientOptions: policyClientOptions, + DisableInstanceDiscovery: opt.DisableInstanceDiscovery, } cred, err = azidentity.NewDefaultAzureCredential(&options) if err != nil { @@ -875,6 +908,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if err != nil { return nil, fmt.Errorf("failed to acquire MSI token: %w", err) } + case opt.UseAZ: + var options = azidentity.AzureCLICredentialOptions{} + cred, err = azidentity.NewAzureCLICredential(&options) + if err != nil { + return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err) + } case opt.Account != "": // Anonymous access anonymous = true diff --git a/backend/box/box.go b/backend/box/box.go index 4f38955eb..7183f35e5 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -43,6 +43,7 @@ import ( "github.com/rclone/rclone/lib/jwtutil" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/rest" "github.com/youmark/pkcs8" "golang.org/x/oauth2" @@ -256,7 +257,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string { } func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) { - block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey)) if len(rest) > 0 { return nil, fmt.Errorf("box: extra data included in private key: %w", err) @@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, return shouldRetry(ctx, resp, err) }) if err != nil { - //fmt.Printf("...Error %v\n", err) + // fmt.Printf("...Error %v\n", err) return "", err } // fmt.Printf("...Id %q\n", *info.Id) @@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, return nil, err } + // check if dest already exists + item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size()) + if err != nil { + return nil, err + } + if item != nil { // dest already exists, need to copy to temp name and then move + tempSuffix := "-rclone-copy-" + random.String(8) + fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix) + tempObj, err := f.Copy(ctx, src, remote+tempSuffix) + if err != nil { + return nil, err + } + fs.Debugf(remote+tempSuffix, "moving to real name %v", remote) + err = f.deleteObject(ctx, item.ID) + if err != nil { + return nil, err + } + return f.Move(ctx, tempObj, remote) + } + // Copy the object opts := rest.Opts{ Method: "POST", diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go index 3bbf46647..6092a1a1d 100644 --- a/backend/cache/cache_internal_test.go +++ b/backend/cache/cache_internal_test.go @@ -10,7 +10,6 @@ import ( goflag "flag" "fmt" "io" - "log" "math/rand" "os" "path" @@ -93,7 +92,7 @@ func TestMain(m *testing.M) { goflag.Parse() var rc int - log.Printf("Running with the following params: \n remote: %v", remoteName) + fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName) runInstance = newRun() rc = m.Run() os.Exit(rc) @@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) { // update in the wrapped fs originalSize, err := runInstance.size(t, rootFs, "data.bin") require.NoError(t, err) - log.Printf("original size: %v", originalSize) + fs.Logf(nil, "original size: %v", originalSize) o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin")) require.NoError(t, err) @@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) { err = o.Update(context.Background(), bytes.NewReader(data2), objInfo) require.NoError(t, err) require.Equal(t, int64(len(data2)), o.Size()) - log.Printf("updated size: %v", len(data2)) + fs.Logf(nil, "updated size: %v", len(data2)) // get a new instance from the cache if runInstance.wrappedIsExternal { @@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) { err = runInstance.retryBlock(func() error { li, err := runInstance.list(t, rootFs, "test") if err != nil { - log.Printf("err: %v", err) + fs.Logf(nil, "err: %v", err) return err } if len(li) != 2 { - log.Printf("not expected listing /test: %v", li) + fs.Logf(nil, "not expected listing /test: %v", li) return fmt.Errorf("not expected listing /test: %v", li) } li, err = runInstance.list(t, rootFs, "test/one") if err != nil { - log.Printf("err: %v", err) + fs.Logf(nil, "err: %v", err) return err } if len(li) != 0 { - log.Printf("not expected listing /test/one: %v", li) + fs.Logf(nil, "not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li) } li, err = runInstance.list(t, rootFs, "test/second") if err != nil { - log.Printf("err: %v", err) + fs.Logf(nil, "err: %v", err) return err } if len(li) != 1 { - log.Printf("not expected listing /test/second: %v", li) + fs.Logf(nil, "not expected listing /test/second: %v", li) return fmt.Errorf("not expected listing /test/second: %v", li) } if fi, ok := li[0].(os.FileInfo); ok { if fi.Name() != "data.bin" { - log.Printf("not expected name: %v", fi.Name()) + fs.Logf(nil, "not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name()) } } else if di, ok := li[0].(fs.DirEntry); ok { if di.Remote() != "test/second/data.bin" { - log.Printf("not expected remote: %v", di.Remote()) + fs.Logf(nil, "not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote()) } } else { - log.Printf("unexpected listing: %v", li) + fs.Logf(nil, "unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li) } - log.Printf("complete listing: %v", li) + fs.Logf(nil, "complete listing: %v", li) return nil }, 12, time.Second*10) require.NoError(t, err) @@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) { err = runInstance.retryBlock(func() error { found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"))) if !found { - log.Printf("not found /test") + fs.Logf(nil, "not found /test") return fmt.Errorf("not found /test") } found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"))) if !found { - log.Printf("not found /test/one") + fs.Logf(nil, "not found /test/one") return fmt.Errorf("not found /test/one") } found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2"))) if !found { - log.Printf("not found /test/one/test2") + fs.Logf(nil, "not found /test/one/test2") return fmt.Errorf("not found /test/one/test2") } li, err := runInstance.list(t, rootFs, "test/one") if err != nil { - log.Printf("err: %v", err) + fs.Logf(nil, "err: %v", err) return err } if len(li) != 1 { - log.Printf("not expected listing /test/one: %v", li) + fs.Logf(nil, "not expected listing /test/one: %v", li) return fmt.Errorf("not expected listing /test/one: %v", li) } if fi, ok := li[0].(os.FileInfo); ok { if fi.Name() != "test2" { - log.Printf("not expected name: %v", fi.Name()) + fs.Logf(nil, "not expected name: %v", fi.Name()) return fmt.Errorf("not expected name: %v", fi.Name()) } } else if di, ok := li[0].(fs.DirEntry); ok { if di.Remote() != "test/one/test2" { - log.Printf("not expected remote: %v", di.Remote()) + fs.Logf(nil, "not expected remote: %v", di.Remote()) return fmt.Errorf("not expected remote: %v", di.Remote()) } } else { - log.Printf("unexpected listing: %v", li) + fs.Logf(nil, "unexpected listing: %v", li) return fmt.Errorf("unexpected listing: %v", li) } - log.Printf("complete listing /test/one/test2") + fs.Logf(nil, "complete listing /test/one/test2") return nil }, 12, time.Second*10) require.NoError(t, err) @@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) { di, err := runInstance.list(t, rootFs, "test/dir1/dir2") require.NoError(t, err) - log.Printf("len: %v", len(di)) + fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 1) time.Sleep(time.Second * 30) di, err = runInstance.list(t, rootFs, "test/dir1/dir2") require.NoError(t, err) - log.Printf("len: %v", len(di)) + fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 1) di, err = runInstance.list(t, rootFs, "test/dir1") require.NoError(t, err) - log.Printf("len: %v", len(di)) + fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 4) di, err = runInstance.list(t, rootFs, "test") require.NoError(t, err) - log.Printf("len: %v", len(di)) + fs.Logf(nil, "len: %v", len(di)) require.Len(t, di, 4) } @@ -829,7 +828,7 @@ func newRun() *run { } else { r.tmpUploadDir = uploadDir } - log.Printf("Temp Upload Dir: %v", r.tmpUploadDir) + fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir) return r } diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 3feef0ef2..f39800af0 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -120,6 +120,7 @@ var ( "text/html": ".html", "text/plain": ".txt", "text/tab-separated-values": ".tsv", + "text/markdown": ".md", } _mimeTypeToExtensionLinks = map[string]string{ "application/x-link-desktop": ".desktop", diff --git a/backend/drive/drive_internal_test.go b/backend/drive/drive_internal_test.go index 998d9ba43..023dd42da 100644 --- a/backend/drive/drive_internal_test.go +++ b/backend/drive/drive_internal_test.go @@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) { wantErr error }{ {"doc", []string{".doc"}, nil}, - {" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil}, + {" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil}, {"docx,svg,Docx", []string{".docx", ".svg"}, nil}, {"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)}, } { diff --git a/backend/dropbox/dropbox.go b/backend/dropbox/dropbox.go index 28eaa30be..f2f1fc852 100644 --- a/backend/dropbox/dropbox.go +++ b/backend/dropbox/dropbox.go @@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e oldToken = strings.TrimSpace(oldToken) if ok && oldToken != "" && oldToken[0] != '{' { fs.Infof(name, "Converting token to new format") - newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) + newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken) err := config.SetValueAndSave(name, config.ConfigToken, newToken) if err != nil { return nil, fmt.Errorf("NewFS convert token: %w", err) diff --git a/backend/fichier/api.go b/backend/fichier/api.go index 8e1bdbb61..c9c5f8cdd 100644 --- a/backend/fichier/api.go +++ b/backend/fichier/api.go @@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err return false, err // No such user case 186: return false, err // IP blocked? - case 374: + case 374, 412: // Flood detected seems to be #412 now fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err) time.Sleep(30 * time.Second) default: diff --git a/backend/fichier/fichier.go b/backend/fichier/fichier.go index 4a2caf5bf..5ebc8a6db 100644 --- a/backend/fichier/fichier.go +++ b/backend/fichier/fichier.go @@ -441,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, fs.Debugf(src, "Can't move - not same remote type") return nil, fs.ErrorCantMove } + srcFs := srcObj.fs // Find current directory ID - _, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false) + srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false) if err != nil { return nil, err } // Create temporary object - dstObj, leaf, directoryID, err := f.createObject(ctx, remote) + dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote) if err != nil { return nil, err } // If it is in the correct directory, just rename it var url string - if currentDirectoryID == directoryID { - resp, err := f.renameFile(ctx, srcObj.file.URL, leaf) + if srcDirectoryID == dstDirectoryID { + // No rename needed + if srcLeaf == dstLeaf { + return src, nil + } + resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf) if err != nil { return nil, fmt.Errorf("couldn't rename file: %w", err) } @@ -466,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, } url = resp.URLs[0].URL } else { - folderID, err := strconv.Atoi(directoryID) + dstFolderID, err := strconv.Atoi(dstDirectoryID) if err != nil { return nil, err } - resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf) + rename := dstLeaf + // No rename needed + if srcLeaf == dstLeaf { + rename = "" + } + resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename) if err != nil { return nil, fmt.Errorf("couldn't move file: %w", err) } diff --git a/backend/filescom/filescom.go b/backend/filescom/filescom.go index b14c8cff6..8eb32b320 100644 --- a/backend/filescom/filescom.go +++ b/backend/filescom/filescom.go @@ -247,18 +247,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) { if opt.Site != "" { - config.Subdomain = opt.Site - - _, err = url.Parse(config.Endpoint()) - if err != nil { - config.Subdomain = "" + if strings.Contains(opt.Site, ".") { config.EndpointOverride = opt.Site + } else { + config.Subdomain = opt.Site + } - _, err = url.Parse(config.Endpoint()) - if err != nil { - err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site) - return - } + _, err = url.ParseRequestURI(config.Endpoint()) + if err != nil { + err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site) + return } } @@ -355,15 +353,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e continue } - item, err = f.readMetaDataForPath(ctx, remote) - if err != nil { - if files_sdk.IsNotExist(err) { - continue - } - - return nil, err - } - if item.IsDir() { d := fs.NewDir(remote, item.ModTime()) entries = append(entries, d) diff --git a/backend/ftp/ftp.go b/backend/ftp/ftp.go index 75f6e9cdb..4c9db0a9c 100644 --- a/backend/ftp/ftp.go +++ b/backend/ftp/ftp.go @@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password Default: "", Help: `Socks 5 proxy host. - Supports the format user:pass@host:port, user@host:port, host:port. +Supports the format user:pass@host:port, user@host:port, host:port. - Example: +Example: - myUser:myPass@localhost:9005 - `, + myUser:myPass@localhost:9005 +`, + Advanced: true, + }, { + Name: "no_check_upload", + Default: false, + Help: `Don't check the upload is OK + +Normally rclone will try to check the upload exists after it has +uploaded a file to make sure the size and modification time are as +expected. + +This flag stops rclone doing these checks. This enables uploading to +folders which are write only. + +You will likely need to use the --inplace flag also if uploading to +a write only folder. +`, Advanced: true, }, { Name: config.ConfigEncoding, @@ -232,6 +248,7 @@ type Options struct { AskPassword bool `config:"ask_password"` Enc encoder.MultiEncoder `config:"encoding"` SocksProxy string `config:"socks_proxy"` + NoCheckUpload bool `config:"no_check_upload"` } // Fs represents a remote FTP server @@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op return fmt.Errorf("update stor: %w", err) } o.fs.putFtpConnection(&c, nil) + if o.fs.opt.NoCheckUpload { + o.info = &FileInfo{ + Name: o.remote, + Size: uint64(src.Size()), + ModTime: src.ModTime(ctx), + precise: true, + IsDir: false, + } + return nil + } if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil { return fmt.Errorf("SetModTime: %w", err) } diff --git a/backend/gofile/gofile.go b/backend/gofile/gofile.go index 4b009e8cb..8f7b98b89 100644 --- a/backend/gofile/gofile.go +++ b/backend/gofile/gofile.go @@ -1105,6 +1105,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, return nil, fs.ErrorCantMove } + // Find existing object + srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) + if err != nil { + return nil, err + } + // Create temporary object dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size) if err != nil { @@ -1112,7 +1118,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, } // Do the move - info, err := f.moveTo(ctx, srcObj.id, path.Base(srcObj.remote), dstLeaf, srcObj.dirID, dstDirectoryID) + info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID) if err != nil { return nil, err } @@ -1463,6 +1469,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read if o.id == "" { return nil, errors.New("can't download - no id") } + if o.url == "" { + // On upload an Object is returned with no url, so fetch it here if needed + err = o.readMetaData(ctx) + if err != nil { + return nil, fmt.Errorf("read metadata: %w", err) + } + } fs.FixRangeOption(options, o.size) var resp *http.Response opts := rest.Opts{ diff --git a/backend/googlecloudstorage/googlecloudstorage.go b/backend/googlecloudstorage/googlecloudstorage.go index 07f589ab8..5953e24b8 100644 --- a/backend/googlecloudstorage/googlecloudstorage.go +++ b/backend/googlecloudstorage/googlecloudstorage.go @@ -60,16 +60,14 @@ const ( minSleep = 10 * time.Millisecond ) -var ( - // Description of how to auth for this app - storageConfig = &oauth2.Config{ - Scopes: []string{storage.DevstorageReadWriteScope}, - Endpoint: google.Endpoint, - ClientID: rcloneClientID, - ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), - RedirectURL: oauthutil.RedirectURL, - } -) +// Description of how to auth for this app +var storageConfig = &oauth2.Config{ + Scopes: []string{storage.DevstorageReadWriteScope}, + Endpoint: google.Endpoint, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.RedirectURL, +} // Register with Fs func init() { @@ -106,6 +104,12 @@ func init() { Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", Hide: fs.OptionHideBoth, Sensitive: true, + }, { + Name: "access_token", + Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.", + Hide: fs.OptionHideConfigurator, + Sensitive: true, + Advanced: true, }, { Name: "anonymous", Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.", @@ -379,6 +383,7 @@ type Options struct { Enc encoder.MultiEncoder `config:"encoding"` EnvAuth bool `config:"env_auth"` DirectoryMarkers bool `config:"directory_markers"` + AccessToken string `config:"access_token"` } // Fs represents a remote storage server @@ -535,6 +540,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if err != nil { return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err) } + } else if opt.AccessToken != "" { + ts := oauth2.Token{AccessToken: opt.AccessToken} + oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts)) } else { oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) if err != nil { @@ -944,7 +952,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { return e } return f.createDirectoryMarker(ctx, bucket, dir) - } // mkdirParent creates the parent bucket/directory if it doesn't exist diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go index ebae7ebb2..263a5610b 100644 --- a/backend/googlephotos/googlephotos.go +++ b/backend/googlephotos/googlephotos.go @@ -28,7 +28,6 @@ import ( "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" - "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/lib/batcher" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" @@ -160,6 +159,34 @@ listings and transferred. Without this flag, archived media will not be visible in directory listings and won't be transferred.`, Advanced: true, + }, { + Name: "proxy", + Default: "", + Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images + +The Google API will deliver images and video which aren't full +resolution, and/or have EXIF data missing. + +However if you ue the gphotosdl proxy tnen you can download original, +unchanged images. + +This runs a headless browser in the background. + +Download the software from [gphotosdl](https://github.com/rclone/gphotosdl) + +First run with + + gphotosdl -login + +Then once you have logged into google photos close the browser window +and run + + gphotosdl + +Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make +rclone use the proxy. +`, "|", "`"), + Advanced: true, }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, @@ -181,6 +208,7 @@ type Options struct { BatchMode string `config:"batch_mode"` BatchSize int `config:"batch_size"` BatchTimeout fs.Duration `config:"batch_timeout"` + Proxy string `config:"proxy"` } // Fs represents a remote storage server @@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med // NewObject finds the Object at remote. If it can't be found // it returns the error fs.ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { - defer log.Trace(f, "remote=%q", remote)("") + // defer log.Trace(f, "remote=%q", remote)("") return f.newObjectWithInfo(ctx, remote, nil) } @@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries // This should return ErrDirNotFound if the directory isn't // found. func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { - defer log.Trace(f, "dir=%q", dir)("err=%v", &err) + // defer log.Trace(f, "dir=%q", dir)("err=%v", &err) match, prefix, pattern := patterns.match(f.root, dir, false) if pattern == nil || pattern.isFile { return nil, fs.ErrorDirNotFound @@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - defer log.Trace(f, "src=%+v", src)("") + // defer log.Trace(f, "src=%+v", src)("") // Temporary Object under construction o := &Object{ fs: f, @@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap // Mkdir creates the album if it doesn't exist func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { - defer log.Trace(f, "dir=%q", dir)("err=%v", &err) + // defer log.Trace(f, "dir=%q", dir)("err=%v", &err) match, prefix, pattern := patterns.match(f.root, dir, false) if pattern == nil { return fs.ErrorDirNotFound @@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { // // Returns an error if it isn't empty func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { - defer log.Trace(f, "dir=%q")("err=%v", &err) + // defer log.Trace(f, "dir=%q")("err=%v", &err) match, _, pattern := patterns.match(f.root, dir, false) if pattern == nil { return fs.ErrorDirNotFound @@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { // Size returns the size of an object in bytes func (o *Object) Size() int64 { - defer log.Trace(o, "")("") + // defer log.Trace(o, "")("") if !o.fs.opt.ReadSize || o.bytes >= 0 { return o.bytes } @@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) { // It attempts to read the objects mtime and if that isn't present the // LastModified returned in the http headers func (o *Object) ModTime(ctx context.Context) time.Time { - defer log.Trace(o, "")("") + // defer log.Trace(o, "")("") err := o.readMetaData(ctx) if err != nil { fs.Debugf(o, "ModTime: Failed to read metadata: %v", err) @@ -965,16 +993,20 @@ func (o *Object) downloadURL() string { // Open an object for read func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { - defer log.Trace(o, "")("") + // defer log.Trace(o, "")("") err = o.readMetaData(ctx) if err != nil { fs.Debugf(o, "Open: Failed to read metadata: %v", err) return nil, err } + url := o.downloadURL() + if o.fs.opt.Proxy != "" { + url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id + } var resp *http.Response opts := rest.Opts{ Method: "GET", - RootURL: o.downloadURL(), + RootURL: url, Options: options, } err = o.fs.pacer.Call(func() (bool, error) { @@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a // // The new object may have been created if an error is returned func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { - defer log.Trace(o, "src=%+v", src)("err=%v", &err) + // defer log.Trace(o, "src=%+v", src)("err=%v", &err) match, _, pattern := patterns.match(o.fs.root, o.remote, true) if pattern == nil || !pattern.isFile || !pattern.canUpload { return errCantUpload diff --git a/backend/iclouddrive/api/client.go b/backend/iclouddrive/api/client.go new file mode 100644 index 000000000..7cdf74baf --- /dev/null +++ b/backend/iclouddrive/api/client.go @@ -0,0 +1,166 @@ +// Package api provides functionality for interacting with the iCloud API. +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/lib/rest" +) + +const ( + baseEndpoint = "https://www.icloud.com" + homeEndpoint = "https://www.icloud.com" + setupEndpoint = "https://setup.icloud.com/setup/ws/1" + authEndpoint = "https://idmsa.apple.com/appleauth/auth" +) + +type sessionSave func(*Session) + +// Client defines the client configuration +type Client struct { + appleID string + password string + srv *rest.Client + Session *Session + sessionSaveCallback sessionSave + + drive *DriveService +} + +// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback. +// +// Parameters: +// - appleID: the Apple ID of the user. +// - password: the password of the user. +// - trustToken: the trust token for the session. +// - clientID: the client id for the session. +// - cookies: the cookies for the session. +// - sessionSaveCallback: the callback function to save the session. +func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) { + icloud := &Client{ + appleID: appleID, + password: password, + srv: rest.NewClient(fshttp.NewClient(context.Background())), + Session: NewSession(), + sessionSaveCallback: sessionSaveCallback, + } + + icloud.Session.TrustToken = trustToken + icloud.Session.Cookies = cookies + icloud.Session.ClientID = clientID + return icloud, nil +} + +// DriveService returns the DriveService instance associated with the Client. +func (c *Client) DriveService() (*DriveService, error) { + var err error + if c.drive == nil { + c.drive, err = NewDriveService(c) + if err != nil { + return nil, err + } + } + return c.drive, nil +} + +// Request makes a request and retries it if the session is invalid. +// +// This function is the main entry point for making requests to the iCloud +// API. If the initial request returns a 401 (Unauthorized), it will try to +// reauthenticate and retry the request. +func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { + resp, err = c.Session.Request(ctx, opts, request, response) + if err != nil && resp != nil { + // try to reauth + if resp.StatusCode == 401 || resp.StatusCode == 421 { + err = c.Authenticate(ctx) + if err != nil { + return nil, err + } + + if c.Session.Requires2FA() { + return nil, errors.New("trust token expired, please reauth") + } + return c.RequestNoReAuth(ctx, opts, request, response) + } + } + return resp, err +} + +// RequestNoReAuth makes a request without re-authenticating. +// +// This function is useful when you have a session that is already +// authenticated, but you need to make a request without triggering +// a re-authentication. +func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { + // Make the request without re-authenticating + resp, err = c.Session.Request(ctx, opts, request, response) + return resp, err +} + +// Authenticate authenticates the client with the iCloud API. +func (c *Client) Authenticate(ctx context.Context) error { + if c.Session.Cookies != nil { + if err := c.Session.ValidateSession(ctx); err == nil { + fs.Debugf("icloud", "Valid session, no need to reauth") + return nil + } + c.Session.Cookies = nil + } + + fs.Debugf("icloud", "Authenticating as %s\n", c.appleID) + err := c.Session.SignIn(ctx, c.appleID, c.password) + + if err == nil { + err = c.Session.AuthWithToken(ctx) + if err == nil && c.sessionSaveCallback != nil { + c.sessionSaveCallback(c.Session) + } + } + return err +} + +// SignIn signs in the client using the provided context and credentials. +func (c *Client) SignIn(ctx context.Context) error { + return c.Session.SignIn(ctx, c.appleID, c.password) +} + +// IntoReader marshals the provided values into a JSON encoded reader +func IntoReader(values any) (*bytes.Reader, error) { + m, err := json.Marshal(values) + if err != nil { + return nil, err + } + return bytes.NewReader(m), nil +} + +// RequestError holds info on a result state, icloud can return a 200 but the result is unknown +type RequestError struct { + Status string + Text string +} + +// Error satisfy the error interface. +func (e *RequestError) Error() string { + return fmt.Sprintf("%s: %s", e.Text, e.Status) +} + +func newRequestError(Status string, Text string) *RequestError { + return &RequestError{ + Status: strings.ToLower(Status), + Text: Text, + } +} + +// newErr orf makes a new error from sprintf parameters. +func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError { + return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...)) +} diff --git a/backend/iclouddrive/api/drive.go b/backend/iclouddrive/api/drive.go new file mode 100644 index 000000000..5d735ebbd --- /dev/null +++ b/backend/iclouddrive/api/drive.go @@ -0,0 +1,913 @@ +package api + +import ( + "bytes" + "context" + "io" + "mime" + "net/http" + "net/url" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/rest" +) + +const ( + defaultZone = "com.apple.CloudDocs" + statusOk = "OK" + statusEtagConflict = "ETAG_CONFLICT" +) + +// DriveService represents an iCloud Drive service. +type DriveService struct { + icloud *Client + RootID string + endpoint string + docsEndpoint string +} + +// NewDriveService creates a new DriveService instance. +func NewDriveService(icloud *Client) (*DriveService, error) { + return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil +} + +// GetItemByDriveID retrieves a DriveItem by its Drive ID. +func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) { + items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren) + if err != nil { + return nil, resp, err + } + return items[0], resp, err +} + +// GetItemsByDriveID retrieves DriveItems by their Drive IDs. +func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) { + var err error + _items := []map[string]any{} + for _, id := range ids { + _items = append(_items, map[string]any{ + "drivewsid": id, + "partialData": false, + "includeHierarchy": false, + }) + } + + var body *bytes.Reader + var path string + if !includeChildren { + values := []map[string]any{{ + "items": _items, + }} + body, err = IntoReader(values) + if err != nil { + return nil, nil, err + } + path = "/retrieveItemDetails" + } else { + values := _items + body, err = IntoReader(values) + if err != nil { + return nil, nil, err + } + path = "/retrieveItemDetailsInFolders" + } + + opts := rest.Opts{ + Method: "POST", + Path: path, + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.endpoint, + Body: body, + } + var items []*DriveItem + resp, err := d.icloud.Request(ctx, opts, nil, &items) + if err != nil { + return nil, resp, err + } + + return items, resp, err +} + +// GetDocByPath retrieves a document by its path. +func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) { + values := url.Values{} + values.Set("unified_format", "false") + body, err := IntoReader(path) + if err != nil { + return nil, nil, err + } + opts := rest.Opts{ + Method: "POST", + Path: "/ws/" + defaultZone + "/list/lookup_by_path", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Parameters: values, + Body: body, + } + var item []*Document + resp, err := d.icloud.Request(ctx, opts, nil, &item) + if err != nil { + return nil, resp, err + } + + return item[0], resp, err +} + +// GetItemByPath retrieves a DriveItem by its path. +func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) { + values := url.Values{} + values.Set("unified_format", "true") + + body, err := IntoReader(path) + if err != nil { + return nil, nil, err + } + opts := rest.Opts{ + Method: "POST", + Path: "/ws/" + defaultZone + "/list/lookup_by_path", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Parameters: values, + Body: body, + } + var item []*DriveItem + resp, err := d.icloud.Request(ctx, opts, nil, &item) + if err != nil { + return nil, resp, err + } + + return item[0], resp, err +} + +// GetDocByItemID retrieves a document by its item ID. +func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) { + values := url.Values{} + values.Set("document_id", id) + values.Set("unified_format", "false") // important + opts := rest.Opts{ + Method: "GET", + Path: "/ws/" + defaultZone + "/list/lookup_by_id", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Parameters: values, + } + var item *Document + resp, err := d.icloud.Request(ctx, opts, nil, &item) + if err != nil { + return nil, resp, err + } + + return item, resp, err +} + +// GetItemRawByItemID retrieves a DriveItemRaw by its item ID. +func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/v1/item/" + id, + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + } + var item *DriveItemRaw + resp, err := d.icloud.Request(ctx, opts, nil, &item) + if err != nil { + return nil, resp, err + } + + return item, resp, err +} + +// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID. +func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) { + values := url.Values{} + values.Set("limit", strconv.FormatInt(limit, 10)) + + opts := rest.Opts{ + Method: "GET", + Path: "/v1/enumerate/" + id, + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Parameters: values, + } + + items := struct { + Items []*DriveItemRaw `json:"drive_item"` + }{} + + resp, err := d.icloud.Request(ctx, opts, nil, &items) + if err != nil { + return nil, resp, err + } + + return items.Items, resp, err +} + +// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService. +func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) { + _, zone, docid := DeconstructDriveID(id) + values := url.Values{} + values.Set("document_id", docid) + + if zone == "" { + zone = defaultZone + } + + opts := rest.Opts{ + Method: "GET", + Path: "/ws/" + zone + "/download/by_id", + Parameters: values, + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + } + + var filer *FileRequest + resp, err := d.icloud.Request(ctx, opts, nil, &filer) + + if err != nil { + return "", resp, err + } + + var url string + if filer.DataToken != nil { + url = filer.DataToken.URL + } else { + url = filer.PackageToken.URL + } + + return url, resp, err +} + +// DownloadFile downloads a file from the given URL using the provided options. +func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) { + opts := &rest.Opts{ + Method: "GET", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: url, + Options: opt, + } + + resp, err := d.icloud.srv.Call(ctx, opts) + if err != nil { + // icloud has some weird http codes + if resp.StatusCode == 330 { + loc, err := resp.Location() + if err == nil { + return d.DownloadFile(ctx, loc.String(), opt) + } + } + + return resp, err + } + return d.icloud.srv.Call(ctx, opts) +} + +// MoveItemToTrashByItemID moves an item to the trash based on the item ID. +func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) { + doc, resp, err := d.GetDocByItemID(ctx, id) + if err != nil { + return nil, resp, err + } + return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force) +} + +// MoveItemToTrashByID moves an item to the trash based on the item ID. +func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) { + values := map[string]any{ + "items": []map[string]any{{ + "drivewsid": drivewsid, + "etag": etag, + "clientId": drivewsid, + }}} + + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/moveItemsToTrash", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.endpoint, + Body: body, + } + + item := struct { + Items []*DriveItem `json:"items"` + }{} + resp, err := d.icloud.Request(ctx, opts, nil, &item) + + if err != nil { + return nil, resp, err + } + + if item.Items[0].Status != statusOk { + // rerun with latest etag + if force && item.Items[0].Status == "ETAG_CONFLICT" { + return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false) + } + + err = newRequestError(item.Items[0].Status, "unknown request status") + } + + return item.Items[0], resp, err +} + +// CreateNewFolderByItemID creates a new folder by item ID. +func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) { + doc, resp, err := d.GetDocByItemID(ctx, id) + if err != nil { + return nil, resp, err + } + return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name) +} + +// CreateNewFolderByDriveID creates a new folder by its Drive ID. +func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) { + values := map[string]any{ + "destinationDrivewsId": drivewsid, + "folders": []map[string]any{{ + "clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(), + "name": name, + }}, + } + + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/createFolders", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.endpoint, + Body: body, + } + var fResp *CreateFoldersResponse + resp, err := d.icloud.Request(ctx, opts, nil, &fResp) + if err != nil { + return nil, resp, err + } + status := fResp.Folders[0].Status + if status != statusOk { + err = newRequestError(status, "unknown request status") + } + + return fResp.Folders[0], resp, err +} + +// RenameItemByItemID renames a DriveItem by its item ID. +func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) { + doc, resp, err := d.GetDocByItemID(ctx, id) + if err != nil { + return nil, resp, err + } + return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force) +} + +// RenameItemByDriveID renames a DriveItem by its drive ID. +func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) { + values := map[string]any{ + "items": []map[string]any{{ + "drivewsid": id, + "name": name, + "etag": etag, + // "extension": split[1], + }}, + } + + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/renameItems", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.endpoint, + Body: body, + } + var items *DriveItem + resp, err := d.icloud.Request(ctx, opts, nil, &items) + + if err != nil { + return nil, resp, err + } + + status := items.Items[0].Status + if status != statusOk { + // rerun with latest etag + if force && status == "ETAG_CONFLICT" { + return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false) + } + err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL) + } + + return items.Items[0], resp, err +} + +// MoveItemByItemID moves an item by its item ID to a destination item ID. +func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) { + docSrc, resp, err := d.GetDocByItemID(ctx, id) + if err != nil { + return nil, resp, err + } + docDst, resp, err := d.GetDocByItemID(ctx, dstID) + if err != nil { + return nil, resp, err + } + return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force) +} + +// MoveItemByDocID moves an item by its doc ID. +// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) { +// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force) +// } + +// MoveItemByDriveID moves an item by its drive ID. +func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) { + values := map[string]any{ + "destinationDrivewsId": dstID, + "items": []map[string]any{{ + "drivewsid": id, + "etag": etag, + "clientId": id, + }}, + } + + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/moveItems", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.endpoint, + Body: body, + } + + var items *DriveItem + resp, err := d.icloud.Request(ctx, opts, nil, &items) + + if err != nil { + return nil, resp, err + } + + status := items.Items[0].Status + if status != statusOk { + // rerun with latest etag + if force && status == "ETAG_CONFLICT" { + return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false) + } + err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL) + } + + return items.Items[0], resp, err +} + +// CopyDocByItemID copies a document by its item ID. +func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) { + // putting name in info doesnt work. extension does work so assume this is a bug in the endpoint + values := map[string]any{ + "info_to_update": map[string]any{}, + } + + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + opts := rest.Opts{ + Method: "POST", + Path: "/v1/item/copy/" + itemID, + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Body: body, + } + + var info *DriveItemRaw + resp, err := d.icloud.Request(ctx, opts, nil, &info) + if err != nil { + return nil, resp, err + } + return info, resp, err +} + +// CreateUpload creates an url for an upload. +func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) { + // first we need to request an upload url + values := map[string]any{ + "filename": name, + "type": "FILE", + "size": strconv.FormatInt(size, 10), + "content_type": GetContentTypeForFile(name), + } + body, err := IntoReader(values) + if err != nil { + return nil, nil, err + } + + opts := rest.Opts{ + Method: "POST", + Path: "/ws/" + defaultZone + "/upload/web", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Body: body, + } + var responseInfo []*UploadResponse + resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo) + if err != nil { + return nil, resp, err + } + return responseInfo[0], resp, err +} + +// Upload uploads a file to the given url +func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) { + // TODO: implement multipart upload + opts := rest.Opts{ + Method: "POST", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: uploadURL, + Body: in, + ContentLength: &size, + ContentType: GetContentTypeForFile(name), + // MultipartContentName: "files", + MultipartFileName: name, + } + var singleFileResponse *SingleFileResponse + resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse) + if err != nil { + return nil, resp, err + } + return singleFileResponse, resp, err +} + +// UpdateFile updates a file in the DriveService. +// +// ctx: the context.Context object for the request. +// r: a pointer to the UpdateFileInfo struct containing the information for the file update. +// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any. +func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) { + body, err := IntoReader(r) + if err != nil { + return nil, nil, err + } + opts := rest.Opts{ + Method: "POST", + Path: "/ws/" + defaultZone + "/update/documents", + ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}), + RootURL: d.docsEndpoint, + Body: body, + } + var responseInfo *DocumentUpdateResponse + resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo) + if err != nil { + return nil, resp, err + } + + doc := responseInfo.Results[0].Document + item := DriveItem{ + Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID, + Docwsid: doc.DocumentID, + Itemid: doc.ItemID, + Etag: doc.Etag, + ParentID: doc.ParentID, + DateModified: time.Unix(r.Mtime, 0), + DateCreated: time.Unix(r.Mtime, 0), + Type: doc.Type, + Name: doc.Name, + Size: doc.Size, + } + + return &item, resp, err +} + +// UpdateFileInfo represents the information for an update to a file in the DriveService. +type UpdateFileInfo struct { + AllowConflict bool `json:"allow_conflict"` + Btime int64 `json:"btime"` + Command string `json:"command"` + CreateShortGUID bool `json:"create_short_guid"` + Data struct { + Receipt string `json:"receipt,omitempty"` + ReferenceSignature string `json:"reference_signature,omitempty"` + Signature string `json:"signature,omitempty"` + Size int64 `json:"size,omitempty"` + WrappingKey string `json:"wrapping_key,omitempty"` + } `json:"data,omitempty"` + DocumentID string `json:"document_id"` + FileFlags FileFlags `json:"file_flags"` + Mtime int64 `json:"mtime"` + Path struct { + Path string `json:"path"` + StartingDocumentID string `json:"starting_document_id"` + } `json:"path"` +} + +// FileFlags defines the file flags for a document. +type FileFlags struct { + IsExecutable bool `json:"is_executable"` + IsHidden bool `json:"is_hidden"` + IsWritable bool `json:"is_writable"` +} + +// NewUpdateFileInfo creates a new UpdateFileInfo object with default values. +// +// Returns an UpdateFileInfo object. +func NewUpdateFileInfo() UpdateFileInfo { + return UpdateFileInfo{ + Command: "add_file", + CreateShortGUID: true, + AllowConflict: true, + FileFlags: FileFlags{ + IsExecutable: true, + IsHidden: false, + IsWritable: false, + }, + } +} + +// DriveItemRaw is a raw drive item. +// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified. +type DriveItemRaw struct { + ItemID string `json:"item_id"` + ItemInfo *DriveItemRawInfo `json:"item_info"` +} + +// SplitName splits the name of a DriveItemRaw into its name and extension. +// +// It returns the name and extension as separate strings. If the name ends with a dot, +// it means there is no extension, so an empty string is returned for the extension. +// If the name does not contain a dot, it means +func (d *DriveItemRaw) SplitName() (string, string) { + name := d.ItemInfo.Name + // ends with a dot, no extension + if strings.HasSuffix(name, ".") { + return name, "" + } + lastInd := strings.LastIndex(name, ".") + + if lastInd == -1 { + return name, "" + } + return name[:lastInd], name[lastInd+1:] +} + +// ModTime returns the modification time of the DriveItemRaw. +// +// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value. +// If the parsing fails, it returns the zero value of time.Time. +// The returned time.Time value represents the modification time of the DriveItemRaw. +func (d *DriveItemRaw) ModTime() time.Time { + i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64) + if err != nil { + return time.Time{} + } + return time.UnixMilli(i) +} + +// CreatedTime returns the creation time of the DriveItemRaw. +// +// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value. +// If the parsing fails, it returns the zero value of time.Time. +// The returned time.Time +func (d *DriveItemRaw) CreatedTime() time.Time { + i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64) + if err != nil { + return time.Time{} + } + return time.UnixMilli(i) +} + +// DriveItemRawInfo is the raw information about a drive item. +type DriveItemRawInfo struct { + Name string `json:"name"` + // Extension is absolutely borked on endpoints so dont use it. + Extension string `json:"extension"` + Size int64 `json:"size,string"` + Type string `json:"type"` + Version string `json:"version"` + ModifiedAt string `json:"modified_at"` + CreatedAt string `json:"created_at"` + Urls struct { + URLDownload string `json:"url_download"` + } `json:"urls"` +} + +// IntoDriveItem converts a DriveItemRaw into a DriveItem. +// +// It takes no parameters. +// It returns a pointer to a DriveItem. +func (d *DriveItemRaw) IntoDriveItem() *DriveItem { + name, extension := d.SplitName() + return &DriveItem{ + Itemid: d.ItemID, + Name: name, + Extension: extension, + Type: d.ItemInfo.Type, + Etag: d.ItemInfo.Version, + DateModified: d.ModTime(), + DateCreated: d.CreatedTime(), + Size: d.ItemInfo.Size, + Urls: d.ItemInfo.Urls, + } +} + +// DocumentUpdateResponse is the response of a document update request. +type DocumentUpdateResponse struct { + Status struct { + StatusCode int `json:"status_code"` + ErrorMessage string `json:"error_message"` + } `json:"status"` + Results []struct { + Status struct { + StatusCode int `json:"status_code"` + ErrorMessage string `json:"error_message"` + } `json:"status"` + OperationID interface{} `json:"operation_id"` + Document *Document `json:"document"` + } `json:"results"` +} + +// Document represents a document on iCloud. +type Document struct { + Status struct { + StatusCode int `json:"status_code"` + ErrorMessage string `json:"error_message"` + } `json:"status"` + DocumentID string `json:"document_id"` + ItemID string `json:"item_id"` + Urls struct { + URLDownload string `json:"url_download"` + } `json:"urls"` + Etag string `json:"etag"` + ParentID string `json:"parent_id"` + Name string `json:"name"` + Type string `json:"type"` + Deleted bool `json:"deleted"` + Mtime int64 `json:"mtime"` + LastEditorName string `json:"last_editor_name"` + Data DocumentData `json:"data"` + Size int64 `json:"size"` + Btime int64 `json:"btime"` + Zone string `json:"zone"` + FileFlags struct { + IsExecutable bool `json:"is_executable"` + IsWritable bool `json:"is_writable"` + IsHidden bool `json:"is_hidden"` + } `json:"file_flags"` + LastOpenedTime int64 `json:"lastOpenedTime"` + RestorePath interface{} `json:"restorePath"` + HasChainedParent bool `json:"hasChainedParent"` +} + +// DriveID returns the drive ID of the Document. +func (d *Document) DriveID() string { + if d.Zone == "" { + d.Zone = defaultZone + } + return d.Type + "::" + d.Zone + "::" + d.DocumentID +} + +// DocumentData represents the data of a document. +type DocumentData struct { + Signature string `json:"signature"` + Owner string `json:"owner"` + Size int64 `json:"size"` + ReferenceSignature string `json:"reference_signature"` + WrappingKey string `json:"wrapping_key"` + PcsInfo string `json:"pcsInfo"` +} + +// SingleFileResponse is the response of a single file request. +type SingleFileResponse struct { + SingleFile *SingleFileInfo `json:"singleFile"` +} + +// SingleFileInfo represents the information of a single file. +type SingleFileInfo struct { + ReferenceSignature string `json:"referenceChecksum"` + Size int64 `json:"size"` + Signature string `json:"fileChecksum"` + WrappingKey string `json:"wrappingKey"` + Receipt string `json:"receipt"` +} + +// UploadResponse is the response of an upload request. +type UploadResponse struct { + URL string `json:"url"` + DocumentID string `json:"document_id"` +} + +// FileRequestToken represents the token of a file request. +type FileRequestToken struct { + URL string `json:"url"` + Token string `json:"token"` + Signature string `json:"signature"` + WrappingKey string `json:"wrapping_key"` + ReferenceSignature string `json:"reference_signature"` +} + +// FileRequest represents the request of a file. +type FileRequest struct { + DocumentID string `json:"document_id"` + ItemID string `json:"item_id"` + OwnerDsid int64 `json:"owner_dsid"` + DataToken *FileRequestToken `json:"data_token,omitempty"` + PackageToken *FileRequestToken `json:"package_token,omitempty"` + DoubleEtag string `json:"double_etag"` +} + +// CreateFoldersResponse is the response of a create folders request. +type CreateFoldersResponse struct { + Folders []*DriveItem `json:"folders"` +} + +// DriveItem represents an item on iCloud. +type DriveItem struct { + DateCreated time.Time `json:"dateCreated"` + Drivewsid string `json:"drivewsid"` + Docwsid string `json:"docwsid"` + Itemid string `json:"item_id"` + Zone string `json:"zone"` + Name string `json:"name"` + ParentID string `json:"parentId"` + Hierarchy []DriveItem `json:"hierarchy"` + Etag string `json:"etag"` + Type string `json:"type"` + AssetQuota int64 `json:"assetQuota"` + FileCount int64 `json:"fileCount"` + ShareCount int64 `json:"shareCount"` + ShareAliasCount int64 `json:"shareAliasCount"` + DirectChildrenCount int64 `json:"directChildrenCount"` + Items []*DriveItem `json:"items"` + NumberOfItems int64 `json:"numberOfItems"` + Status string `json:"status"` + Extension string `json:"extension,omitempty"` + DateModified time.Time `json:"dateModified,omitempty"` + DateChanged time.Time `json:"dateChanged,omitempty"` + Size int64 `json:"size,omitempty"` + LastOpenTime time.Time `json:"lastOpenTime,omitempty"` + Urls struct { + URLDownload string `json:"url_download"` + } `json:"urls"` +} + +// IsFolder returns true if the item is a folder. +func (d *DriveItem) IsFolder() bool { + return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY" +} + +// DownloadURL returns the download URL of the item. +func (d *DriveItem) DownloadURL() string { + return d.Urls.URLDownload +} + +// FullName returns the full name of the item. +// name + extension +func (d *DriveItem) FullName() string { + if d.Extension != "" { + return d.Name + "." + d.Extension + } + return d.Name +} + +// GetDocIDFromDriveID returns the DocumentID from the drive ID. +func GetDocIDFromDriveID(id string) string { + split := strings.Split(id, "::") + return split[len(split)-1] +} + +// DeconstructDriveID returns the document type, zone, and document ID from the drive ID. +func DeconstructDriveID(id string) (docType, zone, docid string) { + split := strings.Split(id, "::") + if len(split) < 3 { + return "", "", id + } + return split[0], split[1], split[2] +} + +// ConstructDriveID constructs a drive ID from the given components. +func ConstructDriveID(id string, zone string, t string) string { + return strings.Join([]string{t, zone, id}, "::") +} + +// GetContentTypeForFile detects content type for given file name. +func GetContentTypeForFile(name string) string { + // detect MIME type by looking at the filename only + mimeType := mime.TypeByExtension(filepath.Ext(name)) + if mimeType == "" { + // api requires a mime type passed in + mimeType = "text/plain" + } + return strings.Split(mimeType, ";")[0] +} diff --git a/backend/iclouddrive/api/session.go b/backend/iclouddrive/api/session.go new file mode 100644 index 000000000..7ee350675 --- /dev/null +++ b/backend/iclouddrive/api/session.go @@ -0,0 +1,412 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/url" + "slices" + "strings" + + "github.com/oracle/oci-go-sdk/v65/common" + + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/lib/rest" +) + +// Session represents an iCloud session +type Session struct { + SessionToken string `json:"session_token"` + Scnt string `json:"scnt"` + SessionID string `json:"session_id"` + AccountCountry string `json:"account_country"` + TrustToken string `json:"trust_token"` + ClientID string `json:"client_id"` + Cookies []*http.Cookie `json:"cookies"` + AccountInfo AccountInfo `json:"account_info"` + + srv *rest.Client `json:"-"` +} + +// String returns the session as a string +// func (s *Session) String() string { +// jsession, _ := json.Marshal(s) +// return string(jsession) +// } + +// Request makes a request +func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) { + resp, err := s.srv.CallJSON(ctx, &opts, &request, &response) + + if err != nil { + return resp, err + } + + if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" { + s.AccountCountry = val + } + if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" { + s.SessionID = val + } + if val := resp.Header.Get("X-Apple-Session-Token"); val != "" { + s.SessionToken = val + } + if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" { + s.TrustToken = val + } + if val := resp.Header.Get("scnt"); val != "" { + s.Scnt = val + } + + return resp, nil +} + +// Requires2FA returns true if the session requires 2FA +func (s *Session) Requires2FA() bool { + return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired +} + +// SignIn signs in the session +func (s *Session) SignIn(ctx context.Context, appleID, password string) error { + trustTokens := []string{} + if s.TrustToken != "" { + trustTokens = []string{s.TrustToken} + } + values := map[string]any{ + "accountName": appleID, + "password": password, + "rememberMe": true, + "trustTokens": trustTokens, + } + body, err := IntoReader(values) + if err != nil { + return err + } + opts := rest.Opts{ + Method: "POST", + Path: "/signin", + Parameters: url.Values{}, + ExtraHeaders: s.GetAuthHeaders(map[string]string{}), + RootURL: authEndpoint, + IgnoreStatus: true, // need to handle 409 for hsa2 + NoResponse: true, + Body: body, + } + opts.Parameters.Set("isRememberMeEnabled", "true") + _, err = s.Request(ctx, opts, nil, nil) + + return err + +} + +// AuthWithToken authenticates the session +func (s *Session) AuthWithToken(ctx context.Context) error { + values := map[string]any{ + "accountCountryCode": s.AccountCountry, + "dsWebAuthToken": s.SessionToken, + "extended_login": true, + "trustToken": s.TrustToken, + } + body, err := IntoReader(values) + if err != nil { + return err + } + opts := rest.Opts{ + Method: "POST", + Path: "/accountLogin", + ExtraHeaders: GetCommonHeaders(map[string]string{}), + RootURL: setupEndpoint, + Body: body, + } + + resp, err := s.Request(ctx, opts, nil, &s.AccountInfo) + if err == nil { + s.Cookies = resp.Cookies() + } + + return err +} + +// Validate2FACode validates the 2FA code +func (s *Session) Validate2FACode(ctx context.Context, code string) error { + values := map[string]interface{}{"securityCode": map[string]string{"code": code}} + body, err := IntoReader(values) + if err != nil { + return err + } + + headers := s.GetAuthHeaders(map[string]string{}) + headers["scnt"] = s.Scnt + headers["X-Apple-ID-Session-Id"] = s.SessionID + + opts := rest.Opts{ + Method: "POST", + Path: "/verify/trusteddevice/securitycode", + ExtraHeaders: headers, + RootURL: authEndpoint, + Body: body, + NoResponse: true, + } + + _, err = s.Request(ctx, opts, nil, nil) + if err == nil { + if err := s.TrustSession(ctx); err != nil { + return err + } + + return nil + } + + return fmt.Errorf("validate2FACode failed: %w", err) +} + +// TrustSession trusts the session +func (s *Session) TrustSession(ctx context.Context) error { + headers := s.GetAuthHeaders(map[string]string{}) + headers["scnt"] = s.Scnt + headers["X-Apple-ID-Session-Id"] = s.SessionID + + opts := rest.Opts{ + Method: "GET", + Path: "/2sv/trust", + ExtraHeaders: headers, + RootURL: authEndpoint, + NoResponse: true, + ContentLength: common.Int64(0), + } + + _, err := s.Request(ctx, opts, nil, nil) + if err != nil { + return fmt.Errorf("trustSession failed: %w", err) + } + + return s.AuthWithToken(ctx) +} + +// ValidateSession validates the session +func (s *Session) ValidateSession(ctx context.Context) error { + opts := rest.Opts{ + Method: "POST", + Path: "/validate", + ExtraHeaders: s.GetHeaders(map[string]string{}), + RootURL: setupEndpoint, + ContentLength: common.Int64(0), + } + _, err := s.Request(ctx, opts, nil, &s.AccountInfo) + if err != nil { + return fmt.Errorf("validateSession failed: %w", err) + } + + return nil +} + +// GetAuthHeaders returns the authentication headers for the session. +// +// It takes an `overwrite` map[string]string parameter which allows +// overwriting the default headers. It returns a map[string]string. +func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string { + headers := map[string]string{ + "Accept": "application/json", + "Content-Type": "application/json", + "X-Apple-OAuth-Client-Id": s.ClientID, + "X-Apple-OAuth-Client-Type": "firstPartyAuth", + "X-Apple-OAuth-Redirect-URI": "https://www.icloud.com", + "X-Apple-OAuth-Require-Grant-Code": "true", + "X-Apple-OAuth-Response-Mode": "web_message", + "X-Apple-OAuth-Response-Type": "code", + "X-Apple-OAuth-State": s.ClientID, + "X-Apple-Widget-Key": s.ClientID, + "Origin": homeEndpoint, + "Referer": fmt.Sprintf("%s/", homeEndpoint), + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", + } + for k, v := range overwrite { + headers[k] = v + } + return headers +} + +// GetHeaders Gets the authentication headers required for a request +func (s *Session) GetHeaders(overwrite map[string]string) map[string]string { + headers := GetCommonHeaders(map[string]string{}) + headers["Cookie"] = s.GetCookieString() + for k, v := range overwrite { + headers[k] = v + } + return headers +} + +// GetCookieString returns the cookie header string for the session. +func (s *Session) GetCookieString() string { + cookieHeader := "" + // we only care about name and value. + for _, cookie := range s.Cookies { + cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";" + } + return cookieHeader +} + +// GetCommonHeaders generates common HTTP headers with optional overwrite. +func GetCommonHeaders(overwrite map[string]string) map[string]string { + headers := map[string]string{ + "Content-Type": "application/json", + "Origin": baseEndpoint, + "Referer": fmt.Sprintf("%s/", baseEndpoint), + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", + } + for k, v := range overwrite { + headers[k] = v + } + return headers +} + +// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added. +func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) { + var hashes []string + for _, cookie := range right { + hashes = append(hashes, cookie.Raw) + } + for _, cookie := range left { + if !slices.Contains(hashes, cookie.Raw) { + right = append(right, cookie) + } + } + return right, nil +} + +// GetCookiesForDomain filters the provided cookies based on the domain of the given URL. +func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) { + var domainCookies []*http.Cookie + for _, cookie := range cookies { + if strings.HasSuffix(url.Host, cookie.Domain) { + domainCookies = append(domainCookies, cookie) + } + } + return domainCookies, nil +} + +// NewSession creates a new Session instance with default values. +func NewSession() *Session { + session := &Session{} + session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint) + //session.ClientID = "auth-" + uuid.New().String() + return session +} + +// AccountInfo represents an account info +type AccountInfo struct { + DsInfo *ValidateDataDsInfo `json:"dsInfo"` + HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"` + ICDPEnabled bool `json:"iCDPEnabled"` + Webservices map[string]*webService `json:"webservices"` + PcsEnabled bool `json:"pcsEnabled"` + TermsUpdateNeeded bool `json:"termsUpdateNeeded"` + ConfigBag struct { + Urls struct { + AccountCreateUI string `json:"accountCreateUI"` + AccountLoginUI string `json:"accountLoginUI"` + AccountLogin string `json:"accountLogin"` + AccountRepairUI string `json:"accountRepairUI"` + DownloadICloudTerms string `json:"downloadICloudTerms"` + RepairDone string `json:"repairDone"` + AccountAuthorizeUI string `json:"accountAuthorizeUI"` + VettingURLForEmail string `json:"vettingUrlForEmail"` + AccountCreate string `json:"accountCreate"` + GetICloudTerms string `json:"getICloudTerms"` + VettingURLForPhone string `json:"vettingUrlForPhone"` + } `json:"urls"` + AccountCreateEnabled bool `json:"accountCreateEnabled"` + } `json:"configBag"` + HsaTrustedBrowser bool `json:"hsaTrustedBrowser"` + AppsOrder []string `json:"appsOrder"` + Version int `json:"version"` + IsExtendedLogin bool `json:"isExtendedLogin"` + PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"` + IsRepairNeeded bool `json:"isRepairNeeded"` + HsaChallengeRequired bool `json:"hsaChallengeRequired"` + RequestInfo struct { + Country string `json:"country"` + TimeZone string `json:"timeZone"` + Region string `json:"region"` + } `json:"requestInfo"` + PcsDeleted bool `json:"pcsDeleted"` + ICloudInfo struct { + SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"` + } `json:"iCloudInfo"` + Apps map[string]*ValidateDataApp `json:"apps"` +} + +// ValidateDataDsInfo represents an validation info +type ValidateDataDsInfo struct { + HsaVersion int `json:"hsaVersion"` + LastName string `json:"lastName"` + ICDPEnabled bool `json:"iCDPEnabled"` + TantorMigrated bool `json:"tantorMigrated"` + Dsid string `json:"dsid"` + HsaEnabled bool `json:"hsaEnabled"` + IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"` + IroncadeMigrated bool `json:"ironcadeMigrated"` + Locale string `json:"locale"` + BrZoneConsolidated bool `json:"brZoneConsolidated"` + ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"` + IsManagedAppleID bool `json:"isManagedAppleID"` + IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"` + IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"` + ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"` + Gilligvited bool `json:"gilligvited"` + AppleIDAliases []interface{} `json:"appleIdAliases"` + UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"` + IsPaidDeveloper bool `json:"isPaidDeveloper"` + CountryCode string `json:"countryCode"` + NotificationID string `json:"notificationId"` + PrimaryEmailVerified bool `json:"primaryEmailVerified"` + ADsID string `json:"aDsID"` + Locked bool `json:"locked"` + ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"` + HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"` + PrimaryEmail string `json:"primaryEmail"` + AppleIDEntries []struct { + IsPrimary bool `json:"isPrimary"` + Type string `json:"type"` + Value string `json:"value"` + } `json:"appleIdEntries"` + GilliganEnabled bool `json:"gilligan-enabled"` + IsWebAccessAllowed bool `json:"isWebAccessAllowed"` + FullName string `json:"fullName"` + MailFlags struct { + IsThreadingAvailable bool `json:"isThreadingAvailable"` + IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"` + SCKMail bool `json:"sCKMail"` + IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"` + } `json:"mailFlags"` + LanguageCode string `json:"languageCode"` + AppleID string `json:"appleId"` + HasUnreleasedOS bool `json:"hasUnreleasedOS"` + AnalyticsOptInStatus bool `json:"analyticsOptInStatus"` + FirstName string `json:"firstName"` + ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"` + NotesMigrated bool `json:"notesMigrated"` + BeneficiaryInfo struct { + IsBeneficiary bool `json:"isBeneficiary"` + } `json:"beneficiaryInfo"` + HasPaymentInfo bool `json:"hasPaymentInfo"` + PcsDelet bool `json:"pcsDelet"` + AppleIDAlias string `json:"appleIdAlias"` + BrMigrated bool `json:"brMigrated"` + StatusCode int `json:"statusCode"` + FamilyEligible bool `json:"familyEligible"` +} + +// ValidateDataApp represents an app +type ValidateDataApp struct { + CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"` + IsQualifiedForBeta bool `json:"isQualifiedForBeta"` +} + +// WebService represents a web service +type webService struct { + PcsRequired bool `json:"pcsRequired"` + URL string `json:"url"` + UploadURL string `json:"uploadUrl"` + Status string `json:"status"` +} diff --git a/backend/iclouddrive/iclouddrive.go b/backend/iclouddrive/iclouddrive.go new file mode 100644 index 000000000..591d81718 --- /dev/null +++ b/backend/iclouddrive/iclouddrive.go @@ -0,0 +1,1174 @@ +//go:build !plan9 && !solaris + +// Package iclouddrive implements the iCloud Drive backend +package iclouddrive + +import ( + "bytes" + "context" + "path" + + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + + "github.com/rclone/rclone/backend/iclouddrive/api" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/dircache" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" +) + +/* +- dirCache operates on relative path to root +- path sanitization + - rule of thumb: sanitize before use, but store things as-is + - the paths cached in dirCache are after sanitizing + - the remote/dir passed in aren't, and are stored as-is +*/ + +const ( + configAppleID = "apple_id" + configPassword = "password" + configClientID = "client_id" + configCookies = "cookies" + configTrustToken = "trust_token" + + minSleep = 10 * time.Millisecond + maxSleep = 2 * time.Second + decayConstant = 2 +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "iclouddrive", + Description: "iCloud Drive", + Config: Config, + NewFs: NewFs, + Options: []fs.Option{{ + Name: configAppleID, + Help: "Apple ID.", + Required: true, + Sensitive: true, + }, { + Name: configPassword, + Help: "Password.", + Required: true, + IsPassword: true, + Sensitive: true, + }, { + Name: configTrustToken, + Help: "Trust token (internal use)", + IsPassword: false, + Required: false, + Sensitive: true, + Hide: fs.OptionHideBoth, + }, { + Name: configCookies, + Help: "cookies (internal use only)", + Required: false, + Advanced: false, + Sensitive: true, + Hide: fs.OptionHideBoth, + }, { + Name: configClientID, + Help: "Client id", + Required: false, + Advanced: true, + Default: "d39ba9916b7251055b22c7f910e2ea796ee65e98b2ddecea8f5dde8d9d1a815d", + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.Display | + //encoder.EncodeDot | + encoder.EncodeBackSlash | + encoder.EncodeInvalidUtf8), + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + AppleID string `config:"apple_id"` + Password string `config:"password"` + Photos bool `config:"photos"` + TrustToken string `config:"trust_token"` + Cookies string `config:"cookies"` + ClientID string `config:"client_id"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote icloud drive +type Fs struct { + name string // name of this remote + root string // the path we are working on. + rootID string + opt Options // parsed config options + features *fs.Features // optional features + dirCache *dircache.DirCache // Map of directory path to directory id + icloud *api.Client + service *api.DriveService + pacer *fs.Pacer // pacer for API calls +} + +// Object describes an icloud drive object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path (relative to the fs.root) + size int64 // size of the object (on server, after encryption) + modTime time.Time // modification time of the object + createdTime time.Time // creation time of the object + driveID string // item ID of the object + docID string // document ID of the object + itemID string // item ID of the object + etag string + downloadURL string +} + +// Config configures the iCloud remote. +func Config(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) { + var err error + appleid, _ := m.Get(configAppleID) + if appleid == "" { + return nil, errors.New("a apple ID is required") + } + + password, _ := m.Get(configPassword) + if password != "" { + password, err = obscure.Reveal(password) + if err != nil { + return nil, err + } + } + + trustToken, _ := m.Get(configTrustToken) + cookieRaw, _ := m.Get(configCookies) + clientID, _ := m.Get(configClientID) + cookies := ReadCookies(cookieRaw) + + switch config.State { + case "": + icloud, err := api.New(appleid, password, trustToken, clientID, cookies, nil) + if err != nil { + return nil, err + } + if err := icloud.Authenticate(ctx); err != nil { + return nil, err + } + m.Set(configCookies, icloud.Session.GetCookieString()) + if icloud.Session.Requires2FA() { + return fs.ConfigInput("2fa_do", "config_2fa", "Two-factor authentication: please enter your 2FA code") + } + return nil, nil + case "2fa_do": + code := config.Result + if code == "" { + return fs.ConfigError("authenticate", "2FA codes can't be blank") + } + + icloud, err := api.New(appleid, password, trustToken, clientID, cookies, nil) + if err != nil { + return nil, err + } + if err := icloud.SignIn(ctx); err != nil { + return nil, err + } + + if err := icloud.Session.Validate2FACode(ctx, code); err != nil { + return nil, err + } + + m.Set(configTrustToken, icloud.Session.TrustToken) + m.Set(configCookies, icloud.Session.GetCookieString()) + return nil, nil + + case "2fa_error": + if config.Result == "true" { + return fs.ConfigGoto("2fa") + } + return nil, errors.New("2fa authentication failed") + } + return nil, fmt.Errorf("unknown state %q", config.State) +} + +// find item by path. Will not return any children for the item +func (f *Fs) findItem(ctx context.Context, dir string) (item *api.DriveItem, found bool, err error) { + var resp *http.Response + if err = f.pacer.Call(func() (bool, error) { + item, resp, err = f.service.GetItemByPath(ctx, path.Join(f.root, dir)) + return shouldRetry(ctx, resp, err) + }); err != nil { + if item == nil && resp.StatusCode == 404 { + return nil, false, nil + } + return nil, false, err + } + + return item, true, nil +} + +func (f *Fs) findLeafItem(ctx context.Context, pathID string, leaf string) (item *api.DriveItem, found bool, err error) { + items, err := f.listAll(ctx, pathID) + if err != nil { + return nil, false, err + } + for _, item := range items { + if strings.EqualFold(item.FullName(), leaf) { + return item, true, nil + } + } + + return nil, false, nil + +} + +// FindLeaf finds a directory of name leaf in the folder with ID pathID +func (f *Fs) FindLeaf(ctx context.Context, pathID string, leaf string) (pathIDOut string, found bool, err error) { + item, found, err := f.findLeafItem(ctx, pathID, leaf) + + if err != nil { + return "", found, err + } + + if !found { + return "", false, err + } + + if !item.IsFolder() { + return "", false, fs.ErrorIsFile + } + + return f.IDJoin(item.Drivewsid, item.Etag), true, nil +} + +// Features implements fs.Fs. +func (f *Fs) Features() *fs.Features { + return f.features +} + +// Hashes are not exposed anywhere +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error { + root := path.Join(f.root, dir) + if root == "" { + return errors.New("can't purge root directory") + } + + directoryID, etag, err := f.FindDir(ctx, dir, false) + if err != nil { + return err + } + + if check { + item, found, err := f.findItem(ctx, dir) + if err != nil { + return err + } + + if found && item.DirectChildrenCount > 0 { + return fs.ErrorDirectoryNotEmpty + } + } + + var _ *api.DriveItem + var resp *http.Response + if err = f.pacer.Call(func() (bool, error) { + _, resp, err = f.service.MoveItemToTrashByID(ctx, directoryID, etag, true) + return retryResultUnknown(ctx, resp, err) + }); err != nil { + return err + } + + // flush everything from the left of the dir + f.dirCache.FlushDir(dir) + + return nil +} + +// Purge all files in the directory specified +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context, dir string) error { + if dir == "" { + return fs.ErrorCantPurge + } + return f.purgeCheck(ctx, dir, false) +} + +func (f *Fs) listAll(ctx context.Context, dirID string) (items []*api.DriveItem, err error) { + var item *api.DriveItem + var resp *http.Response + + if err = f.pacer.Call(func() (bool, error) { + id, _ := f.parseNormalizedID(dirID) + item, resp, err = f.service.GetItemByDriveID(ctx, id, true) + return shouldRetry(ctx, resp, err) + }); err != nil { + return nil, err + } + + items = item.Items + + for i, item := range items { + item.Name = f.opt.Enc.ToStandardName(item.Name) + item.Extension = f.opt.Enc.ToStandardName(item.Extension) + items[i] = item + } + + return items, nil +} + +// List implements fs.Fs. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + dirRemoteID, err := f.dirCache.FindDir(ctx, dir, false) + if err != nil { + return nil, err + } + + entries = make(fs.DirEntries, 0) + items, err := f.listAll(ctx, dirRemoteID) + + if err != nil { + return nil, err + } + + for _, item := range items { + id := item.Drivewsid + name := item.FullName() + remote := path.Join(dir, name) + if item.IsFolder() { + jid := f.putFolderCache(id, item.Etag, remote) + d := fs.NewDir(remote, item.DateModified).SetID(jid).SetSize(item.AssetQuota) + entries = append(entries, d) + } else { + o, err := f.NewObjectFromDriveItem(ctx, remote, item) + if err != nil { + return nil, err + } + entries = append(entries, o) + } + } + + return entries, nil +} + +// Mkdir implements fs.Fs. +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + _, _, err := f.FindDir(ctx, dir, true) + return err +} + +// Name implements fs.Fs. +func (f *Fs) Name() string { + return f.name +} + +// Precision implements fs.Fs. +func (f *Fs) Precision() time.Duration { + return time.Second +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given. +// +// It returns the destination Object and a possible error. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +// +//nolint:all +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + // ICloud cooy endpoint is broken. Once they fixed it this can be re-enabled. + return nil, fs.ErrorCantCopy + + // note: so many calls its only just faster then a reupload for big files. + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + + file, pathID, _, err := f.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + var resp *http.Response + var info *api.DriveItemRaw + + // make a copy + if err = f.pacer.Call(func() (bool, error) { + info, resp, err = f.service.CopyDocByItemID(ctx, srcObj.itemID) + return retryResultUnknown(ctx, resp, err) + }); err != nil { + return nil, err + } + + // renaming in CopyDocByID endpoint does not work :/ so do it the hard way + + // get new document + var doc *api.Document + if err = f.pacer.Call(func() (bool, error) { + doc, resp, err = f.service.GetDocByItemID(ctx, info.ItemID) + return shouldRetry(ctx, resp, err) + }); err != nil { + return nil, err + } + + // get parentdrive id + var dirDoc *api.Document + if err = f.pacer.Call(func() (bool, error) { + dirDoc, resp, err = f.service.GetDocByItemID(ctx, pathID) + return shouldRetry(ctx, resp, err) + }); err != nil { + return nil, err + } + + // build request + // cant use normal rename as file needs to be "activated" first + + r := api.NewUpdateFileInfo() + r.DocumentID = doc.DocumentID + r.Path.Path = file + r.Path.StartingDocumentID = dirDoc.DocumentID + r.Data.Signature = doc.Data.Signature + r.Data.ReferenceSignature = doc.Data.ReferenceSignature + r.Data.WrappingKey = doc.Data.WrappingKey + r.Data.Size = doc.Data.Size + r.Mtime = srcObj.modTime.UnixMilli() + r.Btime = srcObj.modTime.UnixMilli() + + var item *api.DriveItem + if err = f.pacer.Call(func() (bool, error) { + item, resp, err = f.service.UpdateFile(ctx, &r) + return retryResultUnknown(ctx, resp, err) + }); err != nil { + return nil, err + } + + o, err := f.NewObjectFromDriveItem(ctx, remote, item) + if err != nil { + return nil, err + } + obj := o.(*Object) + + // cheat unit tests + obj.modTime = srcObj.modTime + obj.createdTime = srcObj.createdTime + + return obj, nil +} + +// Put in to the remote path with the modTime given of the given size +// +// When called from outside an Fs by rclone, src.Size() will always be >= 0. +// But for unknown-sized objects (indicated by src.Size() == -1), Put should either +// return an error or upload it properly (rather than e.g. calling panic). +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + size := src.Size() + if size < 0 { + return nil, errors.New("file size unknown") + } + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + // object is found + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + // object not found, so we need to create it + remote := src.Remote() + size := src.Size() + modTime := src.ModTime(ctx) + + obj, err := f.createObject(ctx, remote, modTime, size) + if err != nil { + return nil, err + } + return obj, obj.Update(ctx, in, src, options...) + default: + // real error caught + return nil, err + } +} + +// DirCacheFlush resets the directory cache - used in testing as an +// optional interface +func (f *Fs) DirCacheFlush() { + f.dirCache.ResetRoot() +} + +// parseNormalizedID parses a normalized ID (may be in the form `driveID#itemID` or just `itemID`) +// and returns itemID, driveID, rootURL. +// Such a normalized ID can come from (*Item).GetID() +// +// Parameters: +// - rid: the normalized ID to be parsed +// +// Returns: +// - id: the itemID extracted from the normalized ID +// - etag: the driveID extracted from the normalized ID, or an empty string if not present +func (f *Fs) parseNormalizedID(rid string) (id string, etag string) { + split := strings.Split(rid, "#") + if len(split) == 1 { + return split[0], "" + } + return split[0], split[1] +} + +// FindPath finds the leaf and directoryID from a normalized path +func (f *Fs) FindPath(ctx context.Context, remote string, create bool) (leaf, directoryID, etag string, err error) { + leaf, jDirectoryID, err := f.dirCache.FindPath(ctx, remote, create) + if err != nil { + return "", "", "", err + } + directoryID, etag = f.parseNormalizedID(jDirectoryID) + return leaf, directoryID, etag, nil +} + +// FindDir finds the directory passed in returning the directory ID +// starting from pathID +func (f *Fs) FindDir(ctx context.Context, path string, create bool) (pathID string, etag string, err error) { + jDirectoryID, err := f.dirCache.FindDir(ctx, path, create) + if err != nil { + return "", "", err + } + directoryID, etag := f.parseNormalizedID(jDirectoryID) + return directoryID, etag, nil +} + +// IDJoin joins the given ID and ETag into a single string with a "#" delimiter. +func (f *Fs) IDJoin(id string, etag string) string { + if strings.Contains(id, "#") { + // already contains an etag, replace + id, _ = f.parseNormalizedID(id) + } + + return strings.Join([]string{id, etag}, "#") +} + +func (f *Fs) putFolderCache(id, etag, remote string) string { + jid := f.IDJoin(id, etag) + f.dirCache.Put(remote, f.IDJoin(id, etag)) + return jid +} + +// Rmdir implements fs.Fs. +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.purgeCheck(ctx, dir, true) +} + +// Root implements fs.Fs. +func (f *Fs) Root() string { + return f.opt.Enc.ToStandardPath(f.root) +} + +// String implements fs.Fs. +func (f *Fs) String() string { + return f.root +} + +// CreateDir makes a directory with pathID as parent and name leaf +// +// This should be implemented by the backend and will be called by the +// dircache package when appropriate. +func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (string, error) { + var item *api.DriveItem + var err error + var found bool + var resp *http.Response + if err = f.pacer.Call(func() (bool, error) { + id, _ := f.parseNormalizedID(pathID) + item, resp, err = f.service.CreateNewFolderByDriveID(ctx, id, f.opt.Enc.FromStandardName(leaf)) + + // check if it went oke + if requestError, ok := err.(*api.RequestError); ok { + if requestError.Status == "unknown" { + fs.Debugf(requestError, " checking if dir is created with separate call.") + time.Sleep(1 * time.Second) // sleep to give icloud time to clear up its mind + item, found, err = f.findLeafItem(ctx, pathID, leaf) + if err != nil { + return false, err + } + + if !found { + // lets assume it failed and retry + return true, err + } + + // success, clear err + err = nil + } + } + + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return "", err + } + + return f.IDJoin(item.Drivewsid, item.Etag), err +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + + srcID, jsrcDirectoryID, srcLeaf, jdstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote) + if err != nil { + return err + } + + srcDirectoryID, srcEtag := f.parseNormalizedID(jsrcDirectoryID) + dstDirectoryID, _ := f.parseNormalizedID(jdstDirectoryID) + + _, err = f.move(ctx, srcID, srcDirectoryID, srcLeaf, srcEtag, dstDirectoryID, dstLeaf) + if err != nil { + return err + } + + srcFs.dirCache.FlushDir(srcRemote) + + return nil +} + +func (f *Fs) move(ctx context.Context, ID, srcDirectoryID, srcLeaf, srcEtag, dstDirectoryID, dstLeaf string) (*api.DriveItem, error) { + var resp *http.Response + var item *api.DriveItem + var err error + + // move + if srcDirectoryID != dstDirectoryID { + if err = f.pacer.Call(func() (bool, error) { + id, _ := f.parseNormalizedID(ID) + item, resp, err = f.service.MoveItemByDriveID(ctx, id, srcEtag, dstDirectoryID, true) + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return nil, err + } + ID = item.Drivewsid + srcEtag = item.Etag + } + + // rename + if srcLeaf != dstLeaf { + if err = f.pacer.Call(func() (bool, error) { + id, _ := f.parseNormalizedID(ID) + item, resp, err = f.service.RenameItemByDriveID(ctx, id, srcEtag, dstLeaf, true) + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return item, err + } + } + + return item, err +} + +// Move moves the src object to the specified remote. +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + srcLeaf, srcDirectoryID, _, err := srcObj.fs.FindPath(ctx, srcObj.remote, true) + if err != nil { + return nil, err + } + + dstLeaf, dstDirectoryID, _, err := f.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + item, err := f.move(ctx, srcObj.driveID, srcDirectoryID, srcLeaf, srcObj.etag, dstDirectoryID, dstLeaf) + if err != nil { + return src, err + } + + return f.NewObjectFromDriveItem(ctx, remote, item) +} + +// Creates from the parameters passed in a half finished Object which +// must have setMetaData called on it +// +// Returns the object, leaf, directoryID and error. +// +// Used to create new objects +func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, err error) { + // Create the directory for the object if it doesn't exist + _, _, _, err = f.FindPath(ctx, remote, true) + if err != nil { + return + } + // Temporary Object under construction + o = &Object{ + fs: f, + remote: remote, + modTime: modTime, + size: size, + } + return o, nil +} + +// ReadCookies parses the raw cookie string and returns an array of http.Cookie objects. +func ReadCookies(raw string) []*http.Cookie { + header := http.Header{} + header.Add("Cookie", raw) + request := http.Request{Header: header} + return request.Cookies() +} + +var retryErrorCodes = []int{ + 400, // icloud is a mess, sometimes returns 400 on a perfectly fine request. So just retry + 408, // Request Timeout + 409, // Conflict, retry could fix it. + 429, // Rate exceeded. + 500, // Get occasional 500 Internal Server Error + 502, // Server overload + 503, // Service Unavailable + 504, // Gateway Time-out +} + +func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { + if fserrors.ContextError(ctx, &err) { + return false, err + } + + return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err +} + +func ignoreResultUnknown(ctx context.Context, resp *http.Response, err error) (bool, error) { + if requestError, ok := err.(*api.RequestError); ok { + if requestError.Status == "unknown" { + fs.Debugf(requestError, " ignoring.") + return false, nil + } + } + return shouldRetry(ctx, resp, err) +} + +func retryResultUnknown(ctx context.Context, resp *http.Response, err error) (bool, error) { + if requestError, ok := err.(*api.RequestError); ok { + if requestError.Status == "unknown" { + fs.Debugf(requestError, " retrying.") + return true, err + } + } + return shouldRetry(ctx, resp, err) +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + if opt.Password != "" { + var err error + opt.Password, err = obscure.Reveal(opt.Password) + if err != nil { + return nil, fmt.Errorf("couldn't decrypt user password: %w", err) + } + } + + if opt.TrustToken == "" { + return nil, fmt.Errorf("missing icloud trust token: try refreshing it with \"rclone config reconnect %s:\"", name) + } + + cookies := ReadCookies(opt.Cookies) + + callback := func(session *api.Session) { + m.Set(configCookies, session.GetCookieString()) + } + + icloud, err := api.New( + opt.AppleID, + opt.Password, + opt.TrustToken, + opt.ClientID, + cookies, + callback, + ) + if err != nil { + return nil, err + } + + if err := icloud.Authenticate(ctx); err != nil { + return nil, err + } + + if icloud.Session.Requires2FA() { + return nil, errors.New("trust token expired, please reauth") + } + + root = strings.Trim(root, "/") + + f := &Fs{ + name: name, + root: root, + icloud: icloud, + rootID: "FOLDER::com.apple.CloudDocs::root", + opt: *opt, + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + } + f.features = (&fs.Features{ + CanHaveEmptyDirectories: true, + PartialUploads: false, + }).Fill(ctx, f) + + rootID := f.rootID + f.service, err = icloud.DriveService() + if err != nil { + return nil, err + } + + f.dirCache = dircache.New( + root, + rootID, + f, + ) + + err = f.dirCache.FindRoot(ctx, false) + if err != nil { + // Assume it is a file + newRoot, remote := dircache.SplitPath(root) + tempF := *f + tempF.dirCache = dircache.New(newRoot, rootID, &tempF) + tempF.root = newRoot + // Make new Fs which is the parent + err = tempF.dirCache.FindRoot(ctx, false) + if err != nil { + // No root so return old f + return f, nil + } + + _, err := tempF.NewObject(ctx, remote) + if err != nil { + if err == fs.ErrorObjectNotFound { + // File doesn't exist so return old f + return f, nil + } + + return nil, err + } + + f.dirCache = tempF.dirCache + f.root = tempF.root + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + + return f, nil +} + +// NewObject creates a new fs.Object from a given remote string. +// +// ctx: The context.Context for the function. +// remote: The remote string representing the object's location. +// Returns an fs.Object and an error. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.NewObjectFromDriveItem(ctx, remote, nil) +} + +// NewObjectFromDriveItem creates a new fs.Object from a given remote string and DriveItem. +// +// ctx: The context.Context for the function. +// remote: The remote string representing the object's location. +// item: The optional DriveItem to use for initializing the Object. If nil, the function will read the metadata from the remote location. +// Returns an fs.Object and an error. +func (f *Fs) NewObjectFromDriveItem(ctx context.Context, remote string, item *api.DriveItem) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + if item != nil { + err := o.setMetaData(item) + if err != nil { + return nil, err + } + } else { + item, err := f.readMetaData(ctx, remote) + + if err != nil { + return nil, err + } + + err = o.setMetaData(item) + if err != nil { + return nil, err + } + } + + return o, nil +} + +func (f *Fs) readMetaData(ctx context.Context, path string) (item *api.DriveItem, err error) { + leaf, ID, _, err := f.FindPath(ctx, path, false) + + if err != nil { + if err == fs.ErrorDirNotFound { + return nil, fs.ErrorObjectNotFound + } + return nil, err + } + + item, found, err := f.findLeafItem(ctx, ID, leaf) + + if err != nil { + return nil, err + } + + if !found { + return nil, fs.ErrorObjectNotFound + } + + return item, nil +} + +func (o *Object) setMetaData(item *api.DriveItem) (err error) { + if item.IsFolder() { + return fs.ErrorIsDir + } + o.size = item.Size + o.modTime = item.DateModified + o.createdTime = item.DateCreated + o.driveID = item.Drivewsid + o.docID = item.Docwsid + o.itemID = item.Itemid + o.etag = item.Etag + o.downloadURL = item.DownloadURL() + return nil +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + return o.driveID +} + +// Fs implements fs.Object. +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Hash implements fs.Object. +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// ModTime implements fs.Object. +func (o *Object) ModTime(context.Context) time.Time { + return o.modTime +} + +// Open implements fs.Object. +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + fs.FixRangeOption(options, o.size) + + // Drive does not support empty files, so we cheat + if o.size == 0 { + return io.NopCloser(bytes.NewBufferString("")), nil + } + + var resp *http.Response + var err error + + if err = o.fs.pacer.Call(func() (bool, error) { + var url string + + //var doc *api.Document + //if o.docID == "" { + //doc, resp, err = o.fs.service.GetDocByItemID(ctx, o.itemID) + //} + + // Can not get the download url on a item to work, so do it the hard way. + url, _, err = o.fs.service.GetDownloadURLByDriveID(ctx, o.driveID) + + resp, err = o.fs.service.DownloadFile(ctx, url, options) + return shouldRetry(ctx, resp, err) + }); err != nil { + return nil, err + } + + return resp.Body, err +} + +// Remote implements fs.Object. +func (o *Object) Remote() string { + return o.remote +} + +// Remove implements fs.Object. +func (o *Object) Remove(ctx context.Context) error { + if o.itemID == "" { + return nil + } + + var resp *http.Response + var err error + if err = o.fs.pacer.Call(func() (bool, error) { + _, resp, err = o.fs.service.MoveItemToTrashByID(ctx, o.driveID, o.etag, true) + return retryResultUnknown(ctx, resp, err) + }); err != nil { + return err + } + + return nil +} + +// SetModTime implements fs.Object. +func (o *Object) SetModTime(ctx context.Context, t time.Time) error { + return fs.ErrorCantSetModTime +} + +// Size implements fs.Object. +func (o *Object) Size() int64 { + return o.size +} + +// Storable implements fs.Object. +func (o *Object) Storable() bool { + return true +} + +// String implements fs.Object. +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Update implements fs.Object. +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + size := src.Size() + if size < 0 { + return errors.New("file size unknown") + } + + remote := o.Remote() + modTime := src.ModTime(ctx) + + leaf, dirID, _, err := o.fs.FindPath(ctx, path.Clean(remote), true) + if err != nil { + return err + } + + // Move current file to trash + if o.driveID != "" { + err = o.Remove(ctx) + if err != nil { + return err + } + } + + name := o.fs.opt.Enc.FromStandardName(leaf) + var resp *http.Response + + // Create document + var uploadInfo *api.UploadResponse + if err = o.fs.pacer.Call(func() (bool, error) { + uploadInfo, resp, err = o.fs.service.CreateUpload(ctx, size, name) + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return err + } + + // Upload content + var upload *api.SingleFileResponse + if err = o.fs.pacer.Call(func() (bool, error) { + upload, resp, err = o.fs.service.Upload(ctx, in, size, name, uploadInfo.URL) + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return err + } + + //var doc *api.Document + //if err = o.fs.pacer.Call(func() (bool, error) { + // doc, resp, err = o.fs.service.GetDocByItemID(ctx, dirID) + // return ignoreResultUnknown(ctx, resp, err) + //}); err != nil { + // return err + //} + + r := api.NewUpdateFileInfo() + r.DocumentID = uploadInfo.DocumentID + r.Path.Path = name + r.Path.StartingDocumentID = api.GetDocIDFromDriveID(dirID) + //r.Path.StartingDocumentID = doc.DocumentID + r.Data.Receipt = upload.SingleFile.Receipt + r.Data.Signature = upload.SingleFile.Signature + r.Data.ReferenceSignature = upload.SingleFile.ReferenceSignature + r.Data.WrappingKey = upload.SingleFile.WrappingKey + r.Data.Size = upload.SingleFile.Size + r.Mtime = modTime.Unix() * 1000 + r.Btime = modTime.Unix() * 1000 + + // Update metadata + var item *api.DriveItem + if err = o.fs.pacer.Call(func() (bool, error) { + item, resp, err = o.fs.service.UpdateFile(ctx, &r) + return ignoreResultUnknown(ctx, resp, err) + }); err != nil { + return err + } + + err = o.setMetaData(item) + if err != nil { + return err + } + + o.modTime = modTime + o.size = src.Size() + + return nil +} + +// Check interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Mover = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Object = &Object{} + _ fs.IDer = (*Object)(nil) +) diff --git a/backend/iclouddrive/iclouddrive_test.go b/backend/iclouddrive/iclouddrive_test.go new file mode 100644 index 000000000..13db708ea --- /dev/null +++ b/backend/iclouddrive/iclouddrive_test.go @@ -0,0 +1,18 @@ +//go:build !plan9 && !solaris + +package iclouddrive_test + +import ( + "testing" + + "github.com/rclone/rclone/backend/iclouddrive" + "github.com/rclone/rclone/fstest/fstests" +) + +// TestIntegration runs integration tests against the remote +func TestIntegration(t *testing.T) { + fstests.Run(t, &fstests.Opt{ + RemoteName: "TestICloudDrive:", + NilObject: (*iclouddrive.Object)(nil), + }) +} diff --git a/backend/iclouddrive/iclouddrive_unsupported.go b/backend/iclouddrive/iclouddrive_unsupported.go new file mode 100644 index 000000000..2eeb6b639 --- /dev/null +++ b/backend/iclouddrive/iclouddrive_unsupported.go @@ -0,0 +1,7 @@ +// Build for iclouddrive for unsupported platforms to stop go complaining +// about "no buildable Go source files " + +//go:build plan9 || solaris + +// Package iclouddrive implements the iCloud Drive backend +package iclouddrive \ No newline at end of file diff --git a/backend/jottacloud/jottacloud.go b/backend/jottacloud/jottacloud.go index 04e4bca10..8ad457c74 100644 --- a/backend/jottacloud/jottacloud.go +++ b/backend/jottacloud/jottacloud.go @@ -1555,7 +1555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, } info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote) - if err != nil && meta != nil { + if err == nil && meta != nil { createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime") if !createTimeMeta { createTime = srcObj.createTime diff --git a/backend/local/clone_darwin.go b/backend/local/clone_darwin.go index d3410ed60..bf6e5445f 100644 --- a/backend/local/clone_darwin.go +++ b/backend/local/clone_darwin.go @@ -6,6 +6,7 @@ package local import ( "context" "fmt" + "path/filepath" "runtime" "github.com/go-darwin/apfs" @@ -22,7 +23,7 @@ import ( // // If it isn't possible then return fs.ErrorCantCopy func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { - if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone { + if runtime.GOOS != "darwin" || f.opt.NoClone { return nil, fs.ErrorCantCopy } srcObj, ok := src.(*Object) @@ -30,6 +31,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, fs.Debugf(src, "Can't clone - not same remote type") return nil, fs.ErrorCantCopy } + if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files + return nil, fs.ErrorCantCopy + } // Fetch metadata if --metadata is in use meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx)) @@ -44,11 +48,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, return nil, err } - err = Clone(srcObj.path, f.localPath(remote)) + srcPath := srcObj.path + if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead + srcPath, err = filepath.EvalSymlinks(srcPath) + if err != nil { + return nil, err + } + } + + err = Clone(srcPath, f.localPath(remote)) if err != nil { return nil, err } - fs.Debugf(remote, "server-side cloned!") // Set metadata if --metadata is in use if meta != nil { diff --git a/backend/local/local_internal_test.go b/backend/local/local_internal_test.go index 82447fa6e..ea0fdc765 100644 --- a/backend/local/local_internal_test.go +++ b/backend/local/local_internal_test.go @@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) { r.WriteFile(filePath, "content updated", time.Now()) _, err = in.Read(buf) require.NoError(t, err) - } // Test corrupted on transfer @@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) { assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5) // Reupload it with different contents but same size and timestamp - var b = bytes.NewBufferString("CONTENT") + b := bytes.NewBufferString("CONTENT") src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f) err = o.Update(ctx, b, src) require.NoError(t, err) @@ -395,7 +394,6 @@ func TestMetadata(t *testing.T) { assert.Equal(t, "wedges", m["potato"]) } }) - } func TestFilter(t *testing.T) { @@ -572,4 +570,35 @@ func TestCopySymlink(t *testing.T) { linkContents, err := os.Readlink(dstPath) require.NoError(t, err) assert.Equal(t, "file.txt", linkContents) + + // Set fs into "-L/--copy-links" mode + f.opt.FollowSymlinks = true + f.opt.TranslateSymlinks = false + f.lstat = os.Stat + + // Create dst + require.NoError(t, f.Mkdir(ctx, "dst2")) + + // Do copy from src into dst + src, err = f.NewObject(ctx, "src/link.txt") + require.NoError(t, err) + require.NotNil(t, src) + dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src) + require.NoError(t, err) + require.NotNil(t, dst) + + // Test that we made a NON-symlink and it has the right contents + dstPath = filepath.Join(r.LocalName, "dst2", "link.txt") + fi, err := os.Lstat(dstPath) + require.NoError(t, err) + assert.True(t, fi.Mode()&os.ModeSymlink == 0) + want := fstest.NewItem("dst2/link.txt", "hello world", when) + fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "") + + // Test that copying a normal file also works + dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst) + require.NoError(t, err) + require.NotNil(t, dst) + want = fstest.NewItem("dst2/file.txt", "hello world", when) + fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "") } diff --git a/backend/local/metadata.go b/backend/local/metadata.go index 097abedf1..7ab69af30 100644 --- a/backend/local/metadata.go +++ b/backend/local/metadata.go @@ -2,6 +2,7 @@ package local import ( "fmt" + "math" "os" "runtime" "strconv" @@ -72,12 +73,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i value, ok := m[key] if ok { var err error - result64, err := strconv.ParseInt(value, base, 64) + parsed, err := strconv.ParseInt(value, base, 0) if err != nil { fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err) ok = false } - result = int(result64) + result = int(parsed) } return result, ok } @@ -128,9 +129,14 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) { } mode, hasMode := o.parseMetadataInt(m, "mode", 8) if hasMode { - err = os.Chmod(o.path, os.FileMode(mode)) - if err != nil { - outErr = fmt.Errorf("failed to change permissions: %w", err) + if mode >= 0 { + umode := uint(mode) + if umode <= math.MaxUint32 { + err = os.Chmod(o.path, os.FileMode(umode)) + if err != nil { + outErr = fmt.Errorf("failed to change permissions: %w", err) + } + } } } // FIXME not parsing rdev yet diff --git a/backend/onedrive/metadata.go b/backend/onedrive/metadata.go index ab6569a9a..12ba522f6 100644 --- a/backend/onedrive/metadata.go +++ b/backend/onedrive/metadata.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "net/http" + "slices" "strings" "time" @@ -14,7 +15,6 @@ import ( "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/dircache" "github.com/rclone/rclone/lib/errcount" - "golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version ) const ( diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 6aad7d177..3ceac3f55 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -942,7 +942,8 @@ func errorHandler(resp *http.Response) error { // Decode error response errResponse := new(api.Error) err := rest.DecodeJSON(resp, &errResponse) - if err != nil { + // Redirects have no body so don't report an error + if err != nil && resp.Header.Get("Location") == "" { fs.Debugf(nil, "Couldn't decode error response: %v", err) } if errResponse.ErrorInfo.Code == "" { @@ -1544,9 +1545,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error { // Precision return the precision of this Fs func (f *Fs) Precision() time.Duration { - if f.driveType == driveTypePersonal { - return time.Millisecond - } + // While this is true for some OneDrive personal accounts, it + // isn't true for all of them. See #8101 for details + // + // if f.driveType == driveTypePersonal { + // return time.Millisecond + // } return time.Second } diff --git a/backend/onedrive/onedrive_internal_test.go b/backend/onedrive/onedrive_internal_test.go index 0ef1b708f..5ac2141d0 100644 --- a/backend/onedrive/onedrive_internal_test.go +++ b/backend/onedrive/onedrive_internal_test.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "slices" "testing" "time" @@ -16,7 +17,6 @@ import ( "github.com/rclone/rclone/lib/random" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version ) // go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index d73933980..7a5ec560b 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, return dstObj, nil } +// About gets quota information +func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { + var uInfo usersInfoResponse + var resp *http.Response + + err = f.pacer.Call(func() (bool, error) { + opts := rest.Opts{ + Method: "GET", + Path: "/users/info.json/" + f.session.SessionID, + } + resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo) + return f.shouldRetry(ctx, resp, err) + }) + + if err != nil { + return nil, err + } + + usage = &fs.Usage{ + Used: fs.NewUsageValue(uInfo.StorageUsed), + Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB + Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed), + } + return usage, nil +} + // Move src to this remote using server-side move operations. // // This is stored with the remote path given. @@ -1147,6 +1173,7 @@ var ( _ fs.Mover = (*Fs)(nil) _ fs.DirMover = (*Fs)(nil) _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) _ fs.Object = (*Object)(nil) _ fs.IDer = (*Object)(nil) _ fs.ParentIDer = (*Object)(nil) diff --git a/backend/opendrive/types.go b/backend/opendrive/types.go index d47a35628..fa897e0d7 100644 --- a/backend/opendrive/types.go +++ b/backend/opendrive/types.go @@ -231,3 +231,10 @@ type permissions struct { type uploadFileChunkReply struct { TotalWritten int64 `json:"TotalWritten"` } + +// usersInfoResponse describes OpenDrive users/info.json response +type usersInfoResponse struct { + // This response contains many other values but these are the only ones currently in use + StorageUsed int64 `json:"StorageUsed,string"` + MaxStorage int64 `json:"MaxStorage,string"` +} diff --git a/backend/pcloud/api/types.go b/backend/pcloud/api/types.go index c1b5dc217..6f2d6361b 100644 --- a/backend/pcloud/api/types.go +++ b/backend/pcloud/api/types.go @@ -109,6 +109,37 @@ type Hashes struct { SHA256 string `json:"sha256"` } +// FileTruncateResponse is the response from /file_truncate +type FileTruncateResponse struct { + Error +} + +// FileCloseResponse is the response from /file_close +type FileCloseResponse struct { + Error +} + +// FileOpenResponse is the response from /file_open +type FileOpenResponse struct { + Error + Fileid int64 `json:"fileid"` + FileDescriptor int64 `json:"fd"` +} + +// FileChecksumResponse is the response from /file_checksum +type FileChecksumResponse struct { + Error + MD5 string `json:"md5"` + SHA1 string `json:"sha1"` + SHA256 string `json:"sha256"` +} + +// FilePWriteResponse is the response from /file_pwrite +type FilePWriteResponse struct { + Error + Bytes int64 `json:"bytes"` +} + // UploadFileResponse is the response from /uploadfile type UploadFileResponse struct { Error diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index 64a48c25c..dc475cb9d 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -14,6 +14,7 @@ import ( "net/http" "net/url" "path" + "strconv" "strings" "time" @@ -146,7 +147,8 @@ we have to rely on user password authentication for it.`, Help: "Your pcloud password.", IsPassword: true, Advanced: true, - }}...), + }, + }...), }) } @@ -161,15 +163,16 @@ type Options struct { // Fs represents a remote pcloud type Fs struct { - name string // name of this remote - root string // the path we are working on - opt Options // parsed options - features *fs.Features // optional features - srv *rest.Client // the connection to the server - cleanupSrv *rest.Client // the connection used for the cleanup method - dirCache *dircache.DirCache // Map of directory path to directory id - pacer *fs.Pacer // pacer for API calls - tokenRenewer *oauthutil.Renew // renew the token on expiry + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + ts *oauthutil.TokenSource // the token source, used to create new clients + srv *rest.Client // the connection to the server + cleanupSrv *rest.Client // the connection used for the cleanup method + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls + tokenRenewer *oauthutil.Renew // renew the token on expiry } // Object describes a pcloud object @@ -317,6 +320,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e name: name, root: root, opt: *opt, + ts: ts, srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname), pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } @@ -326,6 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e f.features = (&fs.Features{ CaseInsensitive: false, CanHaveEmptyDirectories: true, + PartialUploads: true, }).Fill(ctx, f) if !canCleanup { f.features.CleanUp = nil @@ -333,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e f.srv.SetErrorHandler(errorHandler) // Renew the token in the background - f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error { + f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error { _, err := f.readMetaDataForPath(ctx, "") return err }) @@ -375,6 +380,56 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e return f, nil } +// OpenWriterAt opens with a handle for random access writes +// +// Pass in the remote desired and the size if known. +// +// It truncates any existing object +func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { + client, err := f.newSingleConnClient(ctx) + if err != nil { + return nil, fmt.Errorf("create client: %w", err) + } + // init an empty file + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, fmt.Errorf("resolve src: %w", err) + } + openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf) + if err != nil { + return nil, fmt.Errorf("open file: %w", err) + } + + writer := &writerAt{ + ctx: ctx, + client: client, + fs: f, + size: size, + remote: remote, + fd: openResult.FileDescriptor, + fileID: openResult.Fileid, + } + + return writer, nil +} + +// Create a new http client, accepting keep-alive headers, limited to single connection. +// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection. +// File descriptors are only valid within the same connection and auto-closed when the connection is closed, +// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions. +func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) { + baseClient := fshttp.NewClient(ctx) + baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) { + t.MaxConnsPerHost = 1 + t.DisableKeepAlives = false + }) + // Set our own http client in the context + ctx = oauthutil.Context(ctx, baseClient) + // create a new oauth client, re-use the token source + oAuthClient := oauth2.NewClient(ctx, f.ts) + return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil +} + // Return an Object from a path // // If it can't be found it returns the error fs.ErrorObjectNotFound. @@ -1094,9 +1149,42 @@ func (o *Object) ModTime(ctx context.Context) time.Time { // SetModTime sets the modification time of the local fs object func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { - // Pcloud doesn't have a way of doing this so returning this - // error will cause the file to be re-uploaded to set the time. - return fs.ErrorCantSetModTime + filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true) + if err != nil { + return err + } + fileID := fileIDtoNumber(o.id) + filename = o.fs.opt.Enc.FromStandardName(filename) + opts := rest.Opts{ + Method: "PUT", + Path: "/copyfile", + Parameters: url.Values{}, + TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding + ExtraHeaders: map[string]string{ + "Connection": "keep-alive", + }, + } + opts.Parameters.Set("fileid", fileID) + opts.Parameters.Set("folderid", dirIDtoNumber(directoryID)) + opts.Parameters.Set("toname", filename) + opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID)) + opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10)) + opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10)) + + result := &api.ItemResult{} + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result) + err = result.Error.Update(err) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return fmt.Errorf("update mtime: copyfile: %w", err) + } + if err := o.setMetaData(&result.Metadata); err != nil { + return err + } + + return nil } // Storable returns a boolean showing whether this object storable diff --git a/backend/pcloud/writer_at.go b/backend/pcloud/writer_at.go new file mode 100644 index 000000000..ee3380a71 --- /dev/null +++ b/backend/pcloud/writer_at.go @@ -0,0 +1,216 @@ +package pcloud + +import ( + "bytes" + "context" + "crypto/sha1" + "encoding/hex" + "fmt" + "net/url" + "strconv" + "time" + + "github.com/rclone/rclone/backend/pcloud/api" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/rest" +) + +// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud. +type writerAt struct { + ctx context.Context + client *rest.Client + fs *Fs + size int64 + remote string + fd int64 + fileID int64 +} + +// Close implements WriterAt.Close. +func (c *writerAt) Close() error { + // close fd + if _, err := c.fileClose(c.ctx); err != nil { + return fmt.Errorf("close fd: %w", err) + } + + // Avoiding race conditions: Depending on the tcp connection, there might be + // caching issues when checking the size immediately after write. + // Hence we try avoiding them by checking the resulting size on a different connection. + if c.size < 0 { + // Without knowing the size, we cannot do size checks. + // Falling back to a sleep of 1s for sake of hope. + time.Sleep(1 * time.Second) + return nil + } + sizeOk := false + sizeLastSeen := int64(0) + for retry := 0; retry < 5; retry++ { + fs.Debugf(c.remote, "checking file size: try %d/5", retry) + obj, err := c.fs.NewObject(c.ctx, c.remote) + if err != nil { + return fmt.Errorf("get uploaded obj: %w", err) + } + sizeLastSeen = obj.Size() + if obj.Size() == c.size { + sizeOk = true + break + } + time.Sleep(1 * time.Second) + } + + if !sizeOk { + return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size) + } + + return nil +} + +// WriteAt implements fs.WriteAt. +func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) { + contentLength := len(buffer) + + inSHA1Bytes := sha1.Sum(buffer) + inSHA1 := hex.EncodeToString(inSHA1Bytes[:]) + + // get target hash + outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength)) + if err != nil { + return 0, err + } + outSHA1 := outChecksum.SHA1 + + if outSHA1 == "" || inSHA1 == "" { + return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1) + } + + // check hash of buffer, skip if fits + if inSHA1 == outSHA1 { + return contentLength, nil + } + + // upload buffer with offset if necessary + if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil { + return 0, err + } + + return contentLength, nil +} + +// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.] +// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html +func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) { + opts := rest.Opts{ + Method: "PUT", + Path: "/file_open", + Parameters: url.Values{}, + TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding + ExtraHeaders: map[string]string{ + "Connection": "keep-alive", + }, + } + filename = srcFs.opt.Enc.FromStandardName(filename) + opts.Parameters.Set("name", filename) + opts.Parameters.Set("folderid", dirIDtoNumber(directoryID)) + opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE + + result := &api.FileOpenResponse{} + err := srcFs.pacer.CallNoRetry(func() (bool, error) { + resp, err := c.CallJSON(ctx, &opts, nil, result) + err = result.Error.Update(err) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("open new file descriptor: %w", err) + } + return result, nil +} + +// Call pcloud file_checksum, see [API Doc.] +// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html +func (c *writerAt) fileChecksum( + ctx context.Context, + offset, count int64, +) (*api.FileChecksumResponse, error) { + opts := rest.Opts{ + Method: "PUT", + Path: "/file_checksum", + Parameters: url.Values{}, + TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding + ExtraHeaders: map[string]string{ + "Connection": "keep-alive", + }, + } + opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) + opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) + opts.Parameters.Set("count", strconv.FormatInt(count, 10)) + + result := &api.FileChecksumResponse{} + err := c.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err := c.client.CallJSON(ctx, &opts, nil, result) + err = result.Error.Update(err) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err) + } + return result, nil +} + +// Call pcloud file_pwrite, see [API Doc.] +// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html +func (c *writerAt) filePWrite( + ctx context.Context, + offset int64, + buf []byte, +) (*api.FilePWriteResponse, error) { + contentLength := int64(len(buf)) + opts := rest.Opts{ + Method: "PUT", + Path: "/file_pwrite", + Body: bytes.NewReader(buf), + ContentLength: &contentLength, + Parameters: url.Values{}, + TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding + Close: false, + ExtraHeaders: map[string]string{ + "Connection": "keep-alive", + }, + } + opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) + opts.Parameters.Set("offset", strconv.FormatInt(offset, 10)) + + result := &api.FilePWriteResponse{} + err := c.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err := c.client.CallJSON(ctx, &opts, nil, result) + err = result.Error.Update(err) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err) + } + return result, nil +} + +// Call pcloud file_close, see [API Doc.] +// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html +func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) { + opts := rest.Opts{ + Method: "PUT", + Path: "/file_close", + Parameters: url.Values{}, + TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding + Close: true, + } + opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10)) + + result := &api.FileCloseResponse{} + err := c.fs.pacer.CallNoRetry(func() (bool, error) { + resp, err := c.client.CallJSON(ctx, &opts, nil, result) + err = result.Error.Update(err) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("close file descriptor: %w", err) + } + return result, nil +} diff --git a/backend/pikpak/api/types.go b/backend/pikpak/api/types.go index 96c16ac24..a81bda3c1 100644 --- a/backend/pikpak/api/types.go +++ b/backend/pikpak/api/types.go @@ -513,6 +513,72 @@ type RequestDecompress struct { DefaultParent bool `json:"default_parent,omitempty"` } +// ------------------------------------------------------------ authorization + +// CaptchaToken is a response to requestCaptchaToken api call +type CaptchaToken struct { + CaptchaToken string `json:"captcha_token"` + ExpiresIn int64 `json:"expires_in"` // currently 300s + // API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval + Expiry time.Time `json:"expiry,omitempty"` + URL string `json:"url,omitempty"` // a link for users to solve captcha +} + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *CaptchaToken) expired() bool { + if t.Expiry.IsZero() { + return false + } + + expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta + return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *CaptchaToken) Valid() bool { + return t != nil && t.CaptchaToken != "" && !t.expired() +} + +// CaptchaTokenRequest is to request for captcha token +type CaptchaTokenRequest struct { + Action string `json:"action,omitempty"` + CaptchaToken string `json:"captcha_token,omitempty"` + ClientID string `json:"client_id,omitempty"` + DeviceID string `json:"device_id,omitempty"` + Meta *CaptchaTokenMeta `json:"meta,omitempty"` +} + +// CaptchaTokenMeta contains meta info for CaptchaTokenRequest +type CaptchaTokenMeta struct { + CaptchaSign string `json:"captcha_sign,omitempty"` + ClientVersion string `json:"client_version,omitempty"` + PackageName string `json:"package_name,omitempty"` + Timestamp string `json:"timestamp,omitempty"` + UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName + UserName string `json:"username,omitempty"` + Email string `json:"email,omitempty"` + PhoneNumber string `json:"phone_number,omitempty"` +} + +// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token +type Token struct { + TokenType string `json:"token_type"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + Sub string `json:"sub"` +} + +// Expiry returns expiry from expires in, so it should be called on retrieval +// e must be non-nil. +func (e *Token) Expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + // ------------------------------------------------------------ // NOT implemented YET diff --git a/backend/pikpak/helper.go b/backend/pikpak/helper.go index fd1561d7a..e6f779da9 100644 --- a/backend/pikpak/helper.go +++ b/backend/pikpak/helper.go @@ -3,8 +3,10 @@ package pikpak import ( "bytes" "context" + "crypto/md5" "crypto/sha1" "encoding/hex" + "encoding/json" "errors" "fmt" "io" @@ -14,10 +16,13 @@ import ( "os" "strconv" "strings" + "sync" "time" "github.com/rclone/rclone/backend/pikpak/api" "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/lib/rest" ) @@ -262,15 +267,20 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e if err != nil { return } + if src.Size() == 0 { + // If src is zero-length, the API will return + // Error "cid and file_size is required" (400) + // In this case, we can simply return cid == gcid + return cid, nil + } params := url.Values{} params.Set("cid", cid) params.Set("file_size", strconv.FormatInt(src.Size(), 10)) opts := rest.Opts{ - Method: "GET", - Path: "/drive/v1/resource/cid", - Parameters: params, - ExtraHeaders: map[string]string{"x-device-id": f.deviceID}, + Method: "GET", + Path: "/drive/v1/resource/cid", + Parameters: params, } info := struct { @@ -368,11 +378,23 @@ func calcGcid(r io.Reader, size int64) (string, error) { return hex.EncodeToString(totalHash.Sum(nil)), nil } +// unWrapObjectInfo returns the underlying Object unwrapped as much as +// possible or nil even if it is an OverrideRemote +func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object { + if o, ok := oi.(fs.Object); ok { + return fs.UnWrapObject(o) + } else if do, ok := oi.(*fs.OverrideRemote); ok { + // Unwrap if it is an operations.OverrideRemote + return do.UnWrap() + } + return nil +} + // calcCid calculates Cid from source // // Cid is a simplified version of Gcid func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) { - srcObj := fs.UnWrapObjectInfo(src) + srcObj := unWrapObjectInfo(src) if srcObj == nil { return "", fmt.Errorf("failed to unwrap object from src: %s", src) } @@ -408,6 +430,8 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) { return } +// ------------------------------------------------------------ authorization + // randomly generates device id used for request header 'x-device-id' // // original javascript implementation @@ -428,3 +452,206 @@ func genDeviceID() string { } return string(base) } + +var md5Salt = []string{ + "C9qPpZLN8ucRTaTiUMWYS9cQvWOE", + "+r6CQVxjzJV6LCV", + "F", + "pFJRC", + "9WXYIDGrwTCz2OiVlgZa90qpECPD6olt", + "/750aCr4lm/Sly/c", + "RB+DT/gZCrbV", + "", + "CyLsf7hdkIRxRm215hl", + "7xHvLi2tOYP0Y92b", + "ZGTXXxu8E/MIWaEDB+Sm/", + "1UI3", + "E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO", + "ihtqpG6FMt65+Xk+tWUH2", + "NhXXU9rg4XXdzo7u5o", +} + +func md5Sum(text string) string { + hash := md5.Sum([]byte(text)) + return hex.EncodeToString(hash[:]) +} + +func calcCaptchaSign(deviceID string) (timestamp, sign string) { + timestamp = fmt.Sprint(time.Now().UnixMilli()) + str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp) + for _, salt := range md5Salt { + str = md5Sum(str + salt) + } + sign = "1." + str + return +} + +func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) { + req = &api.CaptchaTokenRequest{ + Action: action, + CaptchaToken: oldToken, // can be empty initially + ClientID: clientID, + DeviceID: opt.DeviceID, + Meta: new(api.CaptchaTokenMeta), + } + switch action { + case "POST:/v1/auth/signin": + req.Meta.UserName = opt.Username + default: + timestamp, captchaSign := calcCaptchaSign(opt.DeviceID) + req.Meta.CaptchaSign = captchaSign + req.Meta.Timestamp = timestamp + req.Meta.ClientVersion = clientVersion + req.Meta.PackageName = packageName + req.Meta.UserID = opt.UserID + } + return +} + +// CaptchaTokenSource stores updated captcha tokens in the config file +type CaptchaTokenSource struct { + mu sync.Mutex + m configmap.Mapper + opt *Options + token *api.CaptchaToken + ctx context.Context + rst *pikpakClient +} + +// initialize CaptchaTokenSource from rclone.conf if possible +func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource { + token := new(api.CaptchaToken) + tokenString, ok := m.Get("captcha_token") + if !ok || tokenString == "" { + fs.Debugf(nil, "failed to read captcha token out of config file") + } else { + if err := json.Unmarshal([]byte(tokenString), token); err != nil { + fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err) + } + } + return &CaptchaTokenSource{ + m: m, + opt: opt, + token: token, + ctx: ctx, + rst: newPikpakClient(getClient(ctx, opt), opt), + } +} + +// requestToken retrieves captcha token from API +func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) { + opts := rest.Opts{ + Method: "POST", + RootURL: "https://user.mypikpak.com/v1/shield/captcha/init", + } + var info *api.CaptchaToken + _, err = cts.rst.CallJSON(ctx, &opts, &req, &info) + if err == nil && info.ExpiresIn != 0 { + // populate to Expiry + info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second) + cts.token = info // update with a new one + } + return +} + +func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) { + oldToken := "" + if cts.token != nil { + oldToken = cts.token.CaptchaToken + } + action := "GET:/drive/v1/about" + if opts.RootURL == "" && opts.Path != "" { + action = fmt.Sprintf("%s:%s", opts.Method, opts.Path) + } else if u, err := url.Parse(opts.RootURL); err == nil { + action = fmt.Sprintf("%s:%s", opts.Method, u.Path) + } + req := newCaptchaTokenRequest(action, oldToken, cts.opt) + if err := cts.requestToken(cts.ctx, req); err != nil { + return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err) + } + + // put it into rclone.conf + tokenBytes, err := json.Marshal(cts.token) + if err != nil { + return "", fmt.Errorf("failed to marshal captcha token: %w", err) + } + cts.m.Set("captcha_token", string(tokenBytes)) + return cts.token.CaptchaToken, nil +} + +// Invalidate resets existing captcha token for a forced refresh +func (cts *CaptchaTokenSource) Invalidate() { + cts.mu.Lock() + cts.token.CaptchaToken = "" + cts.mu.Unlock() +} + +// Token returns a valid captcha token +func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) { + cts.mu.Lock() + defer cts.mu.Unlock() + if cts.token.Valid() { + return cts.token.CaptchaToken, nil + } + return cts.refreshToken(opts) +} + +// pikpakClient wraps rest.Client with a handle of captcha token +type pikpakClient struct { + opt *Options + client *rest.Client + captcha *CaptchaTokenSource +} + +// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with +// * error handler +// * root url +// * default headers +func newPikpakClient(c *http.Client, opt *Options) *pikpakClient { + client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL) + for key, val := range map[string]string{ + "Referer": "https://mypikpak.com/", + "x-client-id": clientID, + "x-client-version": clientVersion, + "x-device-id": opt.DeviceID, + // "x-device-model": "firefox%2F129.0", + // "x-device-name": "PC-Firefox", + // "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID), + // "x-net-work-type": "NONE", + // "x-os-version": "Win32", + // "x-platform-version": "1", + // "x-protocol-version": "301", + // "x-provider-name": "NONE", + // "x-sdk-version": "8.0.3", + } { + client.SetHeader(key, val) + } + return &pikpakClient{ + client: client, + opt: opt, + } +} + +// This should be called right after pikpakClient initialized +func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient { + c.captcha = newCaptchaTokenSource(ctx, c.opt, m) + return c +} + +func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { + if c.captcha != nil { + token, err := c.captcha.Token(opts) + if err != nil || token == "" { + return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err)) + } + if opts.ExtraHeaders == nil { + opts.ExtraHeaders = make(map[string]string) + } + opts.ExtraHeaders["x-captcha-token"] = token + } + return c.client.CallJSON(ctx, opts, request, response) +} + +func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) { + return c.client.Call(ctx, opts) +} diff --git a/backend/pikpak/pikpak.go b/backend/pikpak/pikpak.go index 1a5d82a33..b70ecf92a 100644 --- a/backend/pikpak/pikpak.go +++ b/backend/pikpak/pikpak.go @@ -23,6 +23,7 @@ package pikpak import ( "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -51,6 +52,7 @@ import ( "github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/obscure" "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/dircache" @@ -64,15 +66,17 @@ import ( // Constants const ( - rcloneClientID = "YNxT9w7GMdWvEOKa" - rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E" - minSleep = 100 * time.Millisecond - maxSleep = 2 * time.Second - taskWaitTime = 500 * time.Millisecond - decayConstant = 2 // bigger for slower decay, exponential - rootURL = "https://api-drive.mypikpak.com" - minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize) - defaultUploadConcurrency = manager.DefaultUploadConcurrency + clientID = "YUMx5nI8ZU8Ap8pm" + clientVersion = "2.0.0" + packageName = "mypikpak.com" + defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0" + minSleep = 100 * time.Millisecond + maxSleep = 2 * time.Second + taskWaitTime = 500 * time.Millisecond + decayConstant = 2 // bigger for slower decay, exponential + rootURL = "https://api-drive.mypikpak.com" + minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize) + defaultUploadConcurrency = manager.DefaultUploadConcurrency ) // Globals @@ -85,43 +89,53 @@ var ( TokenURL: "https://user.mypikpak.com/v1/auth/token", AuthStyle: oauth2.AuthStyleInParams, }, - ClientID: rcloneClientID, - ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), - RedirectURL: oauthutil.RedirectURL, + ClientID: clientID, + RedirectURL: oauthutil.RedirectURL, } ) -// Returns OAuthOptions modified for pikpak -func pikpakOAuthOptions() []fs.Option { - opts := []fs.Option{} - for _, opt := range oauthutil.SharedOptions { - if opt.Name == config.ConfigClientID { - opt.Advanced = true - } else if opt.Name == config.ConfigClientSecret { - opt.Advanced = true - } - opts = append(opts, opt) - } - return opts -} - // pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error { - // override default client id/secret - if id, ok := m.Get("client_id"); ok && id != "" { - oauthConfig.ClientID = id - } - if secret, ok := m.Get("client_secret"); ok && secret != "" { - oauthConfig.ClientSecret = secret + if opt.Username == "" { + return errors.New("no username") } pass, err := obscure.Reveal(opt.Password) if err != nil { return fmt.Errorf("failed to decode password - did you obscure it?: %w", err) } - t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass) + // new device id if necessary + if len(opt.DeviceID) != 32 { + opt.DeviceID = genDeviceID() + m.Set("device_id", opt.DeviceID) + fs.Infof(nil, "Using new device id %q", opt.DeviceID) + } + opts := rest.Opts{ + Method: "POST", + RootURL: "https://user.mypikpak.com/v1/auth/signin", + } + req := map[string]string{ + "username": opt.Username, + "password": pass, + "client_id": clientID, + } + var token api.Token + rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m) + _, err = rst.CallJSON(ctx, &opts, req, &token) + if apiErr, ok := err.(*api.Error); ok { + if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 { + rst.captcha.Invalidate() + _, err = rst.CallJSON(ctx, &opts, req, &token) + } + } if err != nil { return fmt.Errorf("failed to retrieve token using username/password: %w", err) } + t := &oauth2.Token{ + AccessToken: token.AccessToken, + TokenType: token.TokenType, + RefreshToken: token.RefreshToken, + Expiry: token.Expiry(), + } return oauthutil.PutToken(name, m, t, false) } @@ -160,7 +174,7 @@ func init() { } return nil, fmt.Errorf("unknown state %q", config.State) }, - Options: append(pikpakOAuthOptions(), []fs.Option{{ + Options: []fs.Option{{ Name: "user", Help: "Pikpak username.", Required: true, @@ -170,6 +184,18 @@ func init() { Help: "Pikpak password.", Required: true, IsPassword: true, + }, { + Name: "device_id", + Help: "Device ID used for authorization.", + Advanced: true, + Sensitive: true, + }, { + Name: "user_agent", + Default: defaultUserAgent, + Advanced: true, + Help: fmt.Sprintf(`HTTP user agent for pikpak. + +Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent), }, { Name: "root_folder_id", Help: `ID of the root folder. @@ -248,7 +274,7 @@ this may help to speed up the transfers.`, encoder.EncodeRightSpace | encoder.EncodeRightPeriod | encoder.EncodeInvalidUtf8), - }}...), + }}, }) } @@ -256,6 +282,9 @@ this may help to speed up the transfers.`, type Options struct { Username string `config:"user"` Password string `config:"pass"` + UserID string `config:"user_id"` // only available during runtime + DeviceID string `config:"device_id"` + UserAgent string `config:"user_agent"` RootFolderID string `config:"root_folder_id"` UseTrash bool `config:"use_trash"` TrashedOnly bool `config:"trashed_only"` @@ -271,11 +300,10 @@ type Fs struct { root string // the path we are working on opt Options // parsed options features *fs.Features // optional features - rst *rest.Client // the connection to the server + rst *pikpakClient // the connection to the server dirCache *dircache.DirCache // Map of directory path to directory id pacer *fs.Pacer // pacer for API calls rootFolderID string // the id of the root folder - deviceID string // device id used for api requests client *http.Client // authorized client m configmap.Mapper tokenMu *sync.Mutex // when renewing tokens @@ -429,6 +457,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b } else if apiErr.Reason == "file_space_not_enough" { // "file_space_not_enough" (8): Storage space is not enough return false, fserrors.FatalError(err) + } else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 { + // "captcha_invalid" (9): Verification code is invalid + // This error occurred on the POST:/drive/v1/files endpoint + // when a zero-byte file was uploaded with an invalid captcha token + f.rst.captcha.Invalidate() + return true, err } } @@ -452,13 +486,36 @@ func errorHandler(resp *http.Response) error { return errResponse } +// getClient makes an http client according to the options +func getClient(ctx context.Context, opt *Options) *http.Client { + // Override few config settings and create a client + newCtx, ci := fs.AddConfig(ctx) + ci.UserAgent = opt.UserAgent + return fshttp.NewClient(newCtx) +} + // newClientWithPacer sets a new http/rest client with a pacer to Fs func (f *Fs) newClientWithPacer(ctx context.Context) (err error) { - f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig) + var ts *oauthutil.TokenSource + f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt)) if err != nil { return fmt.Errorf("failed to create oauth client: %w", err) } - f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler) + token, err := ts.Token() + if err != nil { + return err + } + // parse user_id from oauth access token for later use + if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 { + jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===") + info := struct { + UserID string `json:"sub,omitempty"` + }{} + if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil { + f.opt.UserID = info.UserID + } + } + f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m) f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))) return nil } @@ -491,10 +548,19 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err CanHaveEmptyDirectories: true, // can have empty directories NoMultiThreading: true, // can't have multiple threads downloading }).Fill(ctx, f) - f.deviceID = genDeviceID() + + // new device id if necessary + if len(f.opt.DeviceID) != 32 { + f.opt.DeviceID = genDeviceID() + m.Set("device_id", f.opt.DeviceID) + fs.Infof(nil, "Using new device id %q", f.opt.DeviceID) + } if err := f.newClientWithPacer(ctx); err != nil { - return nil, err + // re-authorize if necessary + if strings.Contains(err.Error(), "invalid_grant") { + return f, f.reAuthorize(ctx) + } } return f, nil @@ -1707,7 +1773,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi gcid, err := o.fs.getGcid(ctx, src) if err != nil || gcid == "" { fs.Debugf(o, "calculating gcid: %v", err) - if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal { + if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal { // No buffering; directly calculate gcid from source rc, err := srcObj.Open(ctx) if err != nil { diff --git a/backend/protondrive/protondrive.go b/backend/protondrive/protondrive.go index 415db44a8..821e3f9c7 100644 --- a/backend/protondrive/protondrive.go +++ b/backend/protondrive/protondrive.go @@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e // No root so return old f return f, nil } - _, err := tempF.newObjectWithLink(ctx, remote, nil) + _, err := tempF.newObject(ctx, remote) if err != nil { if err == fs.ErrorObjectNotFound { // File doesn't exist so return old f @@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error { // ErrorIsDir if possible without doing any extra work, // otherwise ErrorObjectNotFound. func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { - return f.newObjectWithLink(ctx, remote, nil) + return f.newObject(ctx, remote) } func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) { @@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er return link, nil } -// readMetaDataForRemote reads the metadata from the remote -func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) { - link, err := f.getObjectLink(ctx, remote) - if err != nil { - return nil, nil, err - } - +// readMetaDataForLink reads the metadata from the remote +func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) { var fileSystemAttrs *protonDriveAPI.FileSystemAttrs + var err error if err = f.pacer.Call(func() (bool, error) { fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link) return shouldRetry(ctx, err) }); err != nil { - return nil, nil, err + return nil, err } - return link, fileSystemAttrs, nil + return fileSystemAttrs, nil } -// readMetaData gets the metadata if it hasn't already been fetched +// Return an Object from a path and link // -// it also sets the info -func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) { - if o.link != nil { - return nil - } - - link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link) - if err != nil { - return err +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, } o.id = link.LinkID @@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error o.mimetype = link.MIMEType o.link = link + fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link) + if err != nil { + return nil, err + } if fileSystemAttrs != nil { o.modTime = fileSystemAttrs.ModificationTime o.originalSize = &fileSystemAttrs.Size @@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error o.digests = &fileSystemAttrs.Digests } - return nil + return o, nil } -// Return an Object from a path +// Return an Object from a path only // // If it can't be found it returns the error fs.ErrorObjectNotFound. -func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) { - o := &Object{ - fs: f, - remote: remote, - } - - err := o.readMetaData(ctx, link) +func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) { + link, err := f.getObjectLink(ctx, remote) if err != nil { return nil, err } - return o, nil + return f.newObjectWithLink(ctx, remote, link) } // List the objects and directories in dir into entries. The diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 8278d4d08..38c62c811 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -136,6 +136,9 @@ var providerOption = fs.Option{ }, { Value: "Netease", Help: "Netease Object Storage (NOS)", + }, { + Value: "Outscale", + Help: "OUTSCALE Object Storage (OOS)", }, { Value: "Petabox", Help: "Petabox Object Storage", @@ -488,6 +491,26 @@ func init() { Value: "eu-south-2", Help: "Logrono, Spain", }}, + }, { + Name: "region", + Help: "Region where your bucket will be created and your data stored.\n", + Provider: "Outscale", + Examples: []fs.OptionExample{{ + Value: "eu-west-2", + Help: "Paris, France", + }, { + Value: "us-east-2", + Help: "New Jersey, USA", + }, { + Value: "us-west-1", + Help: "California, USA", + }, { + Value: "cloudgouv-eu-west-1", + Help: "SecNumCloud, Paris, France", + }, { + Value: "ap-northeast-1", + Help: "Tokyo, Japan", + }}, }, { Name: "region", Help: "Region where your bucket will be created and your data stored.\n", @@ -1344,6 +1367,26 @@ func init() { Value: "s3.ap-southeast-1.lyvecloud.seagate.com", Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)", Provider: "LyveCloud", + }, { + Value: "oos.eu-west-2.outscale.com", + Help: "Outscale EU West 2 (Paris)", + Provider: "Outscale", + }, { + Value: "oos.us-east-2.outscale.com", + Help: "Outscale US east 2 (New Jersey)", + Provider: "Outscale", + }, { + Value: "oos.us-west-1.outscale.com", + Help: "Outscale EU West 1 (California)", + Provider: "Outscale", + }, { + Value: "oos.cloudgouv-eu-west-1.outscale.com", + Help: "Outscale SecNumCloud (Paris)", + Provider: "Outscale", + }, { + Value: "oos.ap-northeast-1.outscale.com", + Help: "Outscale AP Northeast 1 (Japan)", + Provider: "Outscale", }, { Value: "s3.wasabisys.com", Help: "Wasabi US East 1 (N. Virginia)", @@ -1798,7 +1841,7 @@ func init() { }, { Name: "location_constraint", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.", - Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox", + Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox", }, { Name: "acl", Help: `Canned ACL used when creating buckets and storing or copying objects. @@ -2606,6 +2649,35 @@ knows about - please make a bug report if not. `, Default: fs.Tristate{}, Advanced: true, + }, { + Name: "directory_bucket", + Help: strings.ReplaceAll(`Set to use AWS Directory Buckets + +If you are using an AWS Directory Bucket then set this flag. + +This will ensure no |Content-Md5| headers are sent and ensure |ETag| +headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will +be set on all objects whether single or multipart uploaded. + +This also sets |no_check_bucket = true|. + +Note that Directory Buckets do not support: + +- Versioning +- |Content-Encoding: gzip| + +Rclone limitations with Directory Buckets: + +- rclone does not support creating Directory Buckets with |rclone mkdir| +- ... or removing them with |rclone rmdir| yet +- Directory Buckets do not appear when doing |rclone lsf| at the top level. +- Rclone can't remove auto created directories yet. In theory this should + work with |directory_markers = true| but it doesn't. +- Directories don't seem to appear in recursive (ListR) listings. +`, "|", "`"), + Default: false, + Advanced: true, + Provider: "AWS", }, { Name: "sdk_log_mode", Help: strings.ReplaceAll(`Set to debug the SDK @@ -2780,6 +2852,7 @@ type Options struct { UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"` UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"` SDKLogMode sdkLogMode `config:"sdk_log_mode"` + DirectoryBucket bool `config:"directory_bucket"` } // Fs represents a remote s3 server @@ -3052,9 +3125,16 @@ func (s3logger) Logf(classification logging.Classification, format string, v ... func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) { ci := fs.GetConfig(ctx) var awsConfig aws.Config + // Make the default static auth + v := aws.Credentials{ + AccessKeyID: opt.AccessKeyID, + SecretAccessKey: opt.SecretAccessKey, + SessionToken: opt.SessionToken, + } + awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v} // Try to fill in the config from the environment if env_auth=true - if opt.EnvAuth { + if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" { configOpts := []func(*awsconfig.LoadOptions) error{} // Set the name of the profile if supplied if opt.Profile != "" { @@ -3079,13 +3159,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli case opt.SecretAccessKey == "": return nil, errors.New("secret_access_key not found") default: - // Make the static auth - v := aws.Credentials{ - AccessKeyID: opt.AccessKeyID, - SecretAccessKey: opt.SecretAccessKey, - SessionToken: opt.SessionToken, - } - awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v} + // static credentials are already set } } @@ -3215,7 +3289,7 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) { // API to get user region endpoint against the Access Key details: https://www.idrive.com/e2/guides/get_region_endpoint resp, err := client.Post("https://api.idrivee2.com/api/service/get_region_end_point", "application/json", - strings.NewReader(`{"access_key": "`+value+`"}`)) + strings.NewReader(`{"access_key": `+strconv.Quote(value)+`}`)) if err != nil { return } @@ -3328,6 +3402,8 @@ func setQuirks(opt *Options) { urlEncodeListings = false useMultipartEtag = false // untested useAlreadyExists = false // untested + case "Outscale": + virtualHostStyle = false case "RackCorp": // No quirks useMultipartEtag = false // untested @@ -3547,6 +3623,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e // MD5 digest of their object data. f.etagIsNotMD5 = true } + if opt.DirectoryBucket { + // Objects uploaded to directory buckets appear to have random ETags + // + // This doesn't appear to be documented + f.etagIsNotMD5 = true + // The normal API doesn't work for creating directory buckets, so don't try + f.opt.NoCheckBucket = true + } f.setRoot(root) f.features = (&fs.Features{ ReadMimeType: true, @@ -4811,15 +4895,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, var commandHelp = []fs.CommandHelp{{ Name: "restore", - Short: "Restore objects from GLACIER to normal storage", - Long: `This command can be used to restore one or more objects from GLACIER -to normal storage. + Short: "Restore objects from GLACIER or INTELLIGENT-TIERING archive tier", + Long: `This command can be used to restore one or more objects from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS + rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags @@ -4847,14 +4932,14 @@ if not. `, Opts: map[string]string{ "priority": "Priority of restore: Standard|Expedited|Bulk", - "lifetime": "Lifetime of the active copy in days", + "lifetime": "Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING storage", "description": "The optional description for the job.", }, }, { Name: "restore-status", - Short: "Show the restore status for objects being restored from GLACIER to normal storage", - Long: `This command can be used to show the status for objects being restored from GLACIER -to normal storage. + Short: "Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage", + Long: `This command can be used to show the status for objects being restored from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: @@ -4884,6 +4969,15 @@ It returns a list of status dictionaries. "RestoreExpiryDate": "2023-09-06T12:29:19+01:00" }, "StorageClass": "DEEP_ARCHIVE" + }, + { + "Remote": "test.gz", + "VersionID": null, + "RestoreStatus": { + "IsRestoreInProgress": true, + "RestoreExpiryDate": "null" + }, + "StorageClass": "INTELLIGENT_TIERING" } ] `, @@ -5007,11 +5101,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str RestoreRequest: &types.RestoreRequest{}, } if lifetime := opt["lifetime"]; lifetime != "" { - ilifetime, err := strconv.ParseInt(lifetime, 10, 64) - ilifetime32 := int32(ilifetime) + ilifetime, err := strconv.ParseInt(lifetime, 10, 32) if err != nil { return nil, fmt.Errorf("bad lifetime: %w", err) } + ilifetime32 := int32(ilifetime) req.RestoreRequest.Days = &ilifetime32 } if priority := opt["priority"]; priority != "" { @@ -5046,12 +5140,15 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str st.Status = "Not an S3 object" return } - if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE") { - st.Status = "Not GLACIER or DEEP_ARCHIVE storage class" + if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE" && *o.storageClass != "INTELLIGENT_TIERING") { + st.Status = "Not GLACIER or DEEP_ARCHIVE or INTELLIGENT_TIERING storage class" return } bucket, bucketPath := o.split() reqCopy := req + if *o.storageClass == "INTELLIGENT_TIERING" { + reqCopy.RestoreRequest.Days = nil + } reqCopy.Bucket = &bucket reqCopy.Key = &bucketPath reqCopy.VersionId = o.versionID @@ -5732,7 +5829,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ContentEncoding: header("Content-Encoding"), ContentLanguage: header("Content-Language"), ContentType: header("Content-Type"), - StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")), + StorageClass: types.StorageClass(deref(header("X-Amz-Storage-Class"))), } o.setMetaData(&head) return resp.Body, err @@ -5975,7 +6072,13 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader if do, ok := reader.(pool.DelayAccountinger); ok { // To figure out this number, do a transfer and if the accounted size is 0 or a // multiple of what it should be, increase or decrease this number. - do.DelayAccounting(3) + // + // For transfers over https the SDK does not sign the body whereas over http it does + if len(w.f.opt.Endpoint) >= 5 && strings.EqualFold(w.f.opt.Endpoint[:5], "http:") { + do.DelayAccounting(3) + } else { + do.DelayAccounting(2) + } } // create checksum of buffer for integrity checking @@ -6009,6 +6112,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader SSECustomerKey: w.multiPartUploadInput.SSECustomerKey, SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5, } + if w.f.opt.DirectoryBucket { + // Directory buckets do not support "Content-Md5" header + uploadPartReq.ContentMD5 = nil + } var uout *s3.UploadPartOutput err = w.f.pacer.Call(func() (bool, error) { // rewind the reader on retry and after reading md5 @@ -6285,7 +6392,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum { // Set the md5sum as metadata on the object if // - a multipart upload - // - the Etag is not an MD5, eg when using SSE/SSE-C + // - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets // provided checksums aren't disabled ui.req.Metadata[metaMD5Hash] = md5sumBase64 } @@ -6300,7 +6407,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [ if size >= 0 { ui.req.ContentLength = &size } - if md5sumBase64 != "" { + if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket { ui.req.ContentMD5 = &md5sumBase64 } if o.fs.opt.RequesterPays { diff --git a/backend/swift/swift.go b/backend/swift/swift.go index 53aab56d4..452d5f11a 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -883,7 +883,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( // About gets quota information func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { - var total, objects int64 + var used, objects, total int64 if f.rootContainer != "" { var container swift.Container err = f.pacer.Call(func() (bool, error) { @@ -893,8 +893,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { if err != nil { return nil, fmt.Errorf("container info failed: %w", err) } - total = container.Bytes + used = container.Bytes objects = container.Count + total = container.QuotaBytes } else { var containers []swift.Container err = f.pacer.Call(func() (bool, error) { @@ -905,14 +906,19 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { return nil, fmt.Errorf("container listing failed: %w", err) } for _, c := range containers { - total += c.Bytes + used += c.Bytes objects += c.Count + total += c.QuotaBytes } } usage = &fs.Usage{ - Used: fs.NewUsageValue(total), // bytes in use + Used: fs.NewUsageValue(used), // bytes in use Objects: fs.NewUsageValue(objects), // objects in use } + if total > 0 { + usage.Total = fs.NewUsageValue(total) + usage.Free = fs.NewUsageValue(total - used) + } return usage, nil } @@ -1410,14 +1416,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read return } -// min returns the smallest of x, y -func min(x, y int64) int64 { - if x < y { - return x - } - return y -} - // Get the segments for a large object // // It returns the names of the segments and the container that they live in diff --git a/backend/webdav/chunking.go b/backend/webdav/chunking.go index 4cea79838..379079cf9 100644 --- a/backend/webdav/chunking.go +++ b/backend/webdav/chunking.go @@ -14,21 +14,30 @@ import ( "io" "net/http" "path" + "time" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" ) -func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) { +func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) { // Not found. Can be returned by NextCloud when merging chunks of an upload. if resp != nil && resp.StatusCode == 404 { + if *wasLocked { + // Assume a 404 error after we've received a 423 error is actually a success + return false, nil + } return true, err } // 423 LOCKED if resp != nil && resp.StatusCode == 423 { - return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err) + *wasLocked = true + fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime) + time.Sleep(*sleepTime) + *sleepTime *= 2 + return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err) } return f.shouldRetry(ctx, resp, err) @@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs } opts.ExtraHeaders = o.extraHeaders(ctx, src) opts.ExtraHeaders["Destination"] = destinationURL.String() + sleepTime := 5 * time.Second + wasLocked := false err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.Call(ctx, &opts) - return o.fs.shouldRetryChunkMerge(ctx, resp, err) + return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked) }) if err != nil { return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err) diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index 798cccd6c..7171491ea 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "log" "net/http" "net/url" "path" @@ -26,6 +25,7 @@ import ( "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/oauthutil" "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/rclone/rclone/lib/rest" "golang.org/x/oauth2" @@ -39,6 +39,8 @@ const ( minSleep = 10 * time.Millisecond maxSleep = 2 * time.Second // may needs to be increased, testing needed decayConstant = 2 // bigger for slower decay, exponential + + userAgentTemplae = `Yandex.Disk {"os":"windows","dtype":"ydisk3","vsn":"3.2.37.4977","id":"6BD01244C7A94456BBCEE7EEC990AEAD","id2":"0F370CD40C594A4783BC839C846B999C","session_id":"%s"}` ) // Globals @@ -79,15 +81,22 @@ func init() { // it doesn't seem worth making an exception for this Default: (encoder.Display | encoder.EncodeInvalidUtf8), + }, { + Name: "spoof_ua", + Help: "Set the user agent to match an official version of the yandex disk client. May help with upload performance.", + Default: true, + Advanced: true, + Hide: fs.OptionHideConfigurator, }}...), }) } // Options defines the configuration for this backend type Options struct { - Token string `config:"token"` - HardDelete bool `config:"hard_delete"` - Enc encoder.MultiEncoder `config:"encoding"` + Token string `config:"token"` + HardDelete bool `config:"hard_delete"` + Enc encoder.MultiEncoder `config:"encoding"` + SpoofUserAgent bool `config:"spoof_ua"` } // Fs represents a remote yandex @@ -254,6 +263,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e return nil, err } + ctx, ci := fs.AddConfig(ctx) + if fs.ConfigOptionsInfo.Get("user_agent").IsDefault() && opt.SpoofUserAgent { + randomSessionID, _ := random.Password(128) + ci.UserAgent = fmt.Sprintf(userAgentTemplae, randomSessionID) + } + token, err := oauthutil.GetToken(name, m) if err != nil { return nil, fmt.Errorf("couldn't read OAuth token: %w", err) @@ -267,14 +282,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if err != nil { return nil, fmt.Errorf("couldn't save OAuth token: %w", err) } - log.Printf("Automatically upgraded OAuth config.") + fs.Logf(nil, "Automatically upgraded OAuth config.") } oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { return nil, fmt.Errorf("failed to configure Yandex: %w", err) } - ci := fs.GetConfig(ctx) f := &Fs{ name: name, opt: *opt, diff --git a/backend/zoho/api/types.go b/backend/zoho/api/types.go index 5a516b6fb..efbb4c564 100644 --- a/backend/zoho/api/types.go +++ b/backend/zoho/api/types.go @@ -2,6 +2,8 @@ package api import ( + "encoding/json" + "fmt" "strconv" "time" ) @@ -12,7 +14,12 @@ type Time time.Time // UnmarshalJSON turns JSON into a Time func (t *Time) UnmarshalJSON(data []byte) error { - millis, err := strconv.ParseInt(string(data), 10, 64) + s := string(data) + // If the time is a quoted string, strip quotes + if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' { + s = s[1 : len(s)-1] + } + millis, err := strconv.ParseInt(s, 10, 64) if err != nil { return err } @@ -20,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error { return nil } -// User is a Zoho user we are only interested in the ZUID here -type User struct { +// OAuthUser is a Zoho user we are only interested in the ZUID here +type OAuthUser struct { FirstName string `json:"First_Name"` Email string `json:"Email"` LastName string `json:"Last_Name"` @@ -29,12 +36,41 @@ type User struct { ZUID int64 `json:"ZUID"` } -// TeamWorkspace represents a Zoho Team or workspace +// UserInfoResponse is returned by the user info API. +type UserInfoResponse struct { + Data struct { + ID string `json:"id"` + Type string `json:"users"` + Attributes struct { + EmailID string `json:"email_id"` + Edition string `json:"edition"` + } `json:"attributes"` + } `json:"data"` +} + +// PrivateSpaceInfo gives basic information about a users private folder. +type PrivateSpaceInfo struct { + Data struct { + ID string `json:"id"` + Type string `json:"string"` + } `json:"data"` +} + +// CurrentTeamInfo gives information about the current user in a team. +type CurrentTeamInfo struct { + Data struct { + ID string `json:"id"` + Type string `json:"string"` + } +} + +// TeamWorkspace represents a Zoho Team, Workspace or Private Space // It's actually a VERY large json object that differs between -// Team and Workspace but we are only interested in some fields -// that both of them have so we can use the same struct for both +// Team and Workspace and Private Space but we are only interested in some fields +// that all of them have so we can use the same struct. type TeamWorkspace struct { ID string `json:"id"` + Type string `json:"type"` Attributes struct { Name string `json:"name"` Created Time `json:"created_time_in_millisecond"` @@ -42,7 +78,8 @@ type TeamWorkspace struct { } `json:"attributes"` } -// TeamWorkspaceResponse is the response by the list teams api +// TeamWorkspaceResponse is the response by the list teams API, list workspace API +// or list team private spaces API. type TeamWorkspaceResponse struct { TeamWorkspace []TeamWorkspace `json:"data"` } @@ -84,6 +121,73 @@ type ItemList struct { Items []Item `json:"data"` } +// UploadFileInfo is what the FileInfo field in the UnloadInfo struct decodes to +type UploadFileInfo struct { + OrgID string `json:"ORG_ID"` + ResourceID string `json:"RESOURCE_ID"` + LibraryID string `json:"LIBRARY_ID"` + Md5Checksum string `json:"MD5_CHECKSUM"` + ParentModelID string `json:"PARENT_MODEL_ID"` + ParentID string `json:"PARENT_ID"` + ResourceType int `json:"RESOURCE_TYPE"` + WmsSentTime string `json:"WMS_SENT_TIME"` + TabID string `json:"TAB_ID"` + Owner string `json:"OWNER"` + ResourceGroup string `json:"RESOURCE_GROUP"` + ParentModelName string `json:"PARENT_MODEL_NAME"` + Size int64 `json:"size"` + Operation string `json:"OPERATION"` + EventID string `json:"EVENT_ID"` + AuditInfo struct { + VersionInfo struct { + VersionAuthors []string `json:"versionAuthors"` + VersionID string `json:"versionId"` + IsMinorVersion bool `json:"isMinorVersion"` + VersionTime Time `json:"versionTime"` + VersionAuthorZuid []string `json:"versionAuthorZuid"` + VersionNotes string `json:"versionNotes"` + VersionNumber string `json:"versionNumber"` + } `json:"versionInfo"` + Resource struct { + Owner string `json:"owner"` + CreatedTime Time `json:"created_time"` + Creator string `json:"creator"` + ServiceType int `json:"service_type"` + Extension string `json:"extension"` + StatusChangeTime Time `json:"status_change_time"` + ResourceType int `json:"resource_type"` + Name string `json:"name"` + } `json:"resource"` + ParentInfo struct { + ParentName string `json:"parentName"` + ParentID string `json:"parentId"` + ParentType int `json:"parentType"` + } `json:"parentInfo"` + LibraryInfo struct { + LibraryName string `json:"libraryName"` + LibraryID string `json:"libraryId"` + LibraryType int `json:"libraryType"` + } `json:"libraryInfo"` + UpdateType string `json:"updateType"` + StatusCode string `json:"statusCode"` + } `json:"AUDIT_INFO"` + ZUID int64 `json:"ZUID"` + TeamID string `json:"TEAM_ID"` +} + +// GetModTime fetches the modification time of the upload +// +// This tries a few places and if all fails returns the current time +func (ufi *UploadFileInfo) GetModTime() Time { + if t := ufi.AuditInfo.Resource.CreatedTime; !time.Time(t).IsZero() { + return t + } + if t := ufi.AuditInfo.Resource.StatusChangeTime; !time.Time(t).IsZero() { + return t + } + return Time(time.Now()) +} + // UploadInfo is a simplified and slightly different version of // the Item struct only used in the response to uploads type UploadInfo struct { @@ -91,14 +195,53 @@ type UploadInfo struct { ParentID string `json:"parent_id"` FileName string `json:"notes.txt"` RessourceID string `json:"resource_id"` + Permalink string `json:"Permalink"` + FileInfo string `json:"File INFO"` // JSON encoded UploadFileInfo } `json:"attributes"` } +// GetUploadFileInfo decodes the embedded FileInfo +func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) { + var ufi UploadFileInfo + err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi) + if err != nil { + return nil, fmt.Errorf("failed to decode FileInfo: %w", err) + } + return &ufi, nil +} + +// LargeUploadInfo is once again a slightly different version of UploadInfo +// returned as part of an LargeUploadResponse by the large file upload API. +type LargeUploadInfo struct { + Attributes struct { + ParentID string `json:"parent_id"` + FileName string `json:"file_name"` + RessourceID string `json:"resource_id"` + FileInfo string `json:"file_info"` + } `json:"attributes"` +} + +// GetUploadFileInfo decodes the embedded FileInfo +func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) { + var ufi UploadFileInfo + err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi) + if err != nil { + return nil, fmt.Errorf("failed to decode FileInfo: %w", err) + } + return &ufi, nil +} + // UploadResponse is the response to a file Upload type UploadResponse struct { Uploads []UploadInfo `json:"data"` } +// LargeUploadResponse is the response returned by large file upload API. +type LargeUploadResponse struct { + Uploads []LargeUploadInfo `json:"data"` + Status string `json:"status"` +} + // WriteMetadataRequest is used to write metadata for a // single item type WriteMetadataRequest struct { diff --git a/backend/zoho/zoho.go b/backend/zoho/zoho.go index e4e69353f..9561f56c0 100644 --- a/backend/zoho/zoho.go +++ b/backend/zoho/zoho.go @@ -14,6 +14,7 @@ import ( "strings" "time" + "github.com/google/uuid" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/pacer" "github.com/rclone/rclone/lib/random" @@ -36,9 +37,11 @@ const ( rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B" rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA" minSleep = 10 * time.Millisecond - maxSleep = 2 * time.Second + maxSleep = 60 * time.Second decayConstant = 2 // bigger for slower decay, exponential configRootID = "root_folder_id" + + defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB ) // Globals @@ -50,6 +53,7 @@ var ( "WorkDrive.team.READ", "WorkDrive.workspace.READ", "WorkDrive.files.ALL", + "ZohoFiles.files.ALL", }, Endpoint: oauth2.Endpoint{ AuthURL: "https://accounts.zoho.eu/oauth/v2/auth", @@ -61,6 +65,8 @@ var ( RedirectURL: oauthutil.RedirectLocalhostURL, } rootURL = "https://workdrive.zoho.eu/api/v1" + downloadURL = "https://download.zoho.eu/v1/workdrive" + uploadURL = "http://upload.zoho.eu/workdrive-api/v1/" accountsURL = "https://accounts.zoho.eu" ) @@ -79,7 +85,7 @@ func init() { getSrvs := func() (authSrv, apiSrv *rest.Client, err error) { oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig) if err != nil { - return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err) + return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err) } authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL) apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL) @@ -88,12 +94,12 @@ func init() { switch config.State { case "": - return oauthutil.ConfigOut("teams", &oauthutil.Options{ + return oauthutil.ConfigOut("type", &oauthutil.Options{ OAuth2Config: oauthConfig, // No refresh token unless ApprovalForce is set OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce}, }) - case "teams": + case "type": // We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants // it's own custom type token, err := oauthutil.GetToken(name, m) @@ -108,24 +114,43 @@ func init() { } } - authSrv, apiSrv, err := getSrvs() + _, apiSrv, err := getSrvs() if err != nil { return nil, err } - // Get the user Info - opts := rest.Opts{ - Method: "GET", - Path: "/oauth/user/info", + userInfo, err := getUserInfo(ctx, apiSrv) + if err != nil { + return nil, err } - var user api.User - _, err = authSrv.CallJSON(ctx, &opts, nil, &user) + // If personal Edition only one private Space is available. Directly configure that. + if userInfo.Data.Attributes.Edition == "PERSONAL" { + return fs.ConfigResult("private_space", userInfo.Data.ID) + } + // Otherwise go to team selection + return fs.ConfigResult("team", userInfo.Data.ID) + case "private_space": + _, apiSrv, err := getSrvs() + if err != nil { + return nil, err + } + + workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv) + if err != nil { + return nil, err + } + return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) { + workspace := workspaces[i] + return workspace.ID, workspace.Name + }) + case "team": + _, apiSrv, err := getSrvs() if err != nil { return nil, err } // Get the teams - teams, err := listTeams(ctx, user.ZUID, apiSrv) + teams, err := listTeams(ctx, config.Result, apiSrv) if err != nil { return nil, err } @@ -143,9 +168,19 @@ func init() { if err != nil { return nil, err } + currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv) + if err != nil { + return nil, err + } + privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv) + if err != nil { + return nil, err + } + workspaces = append(workspaces, privateSpaces...) + return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) { workspace := workspaces[i] - return workspace.ID, workspace.Attributes.Name + return workspace.ID, workspace.Name }) case "workspace_end": workspaceID := config.Result @@ -179,7 +214,13 @@ browser.`, }, { Value: "com.au", Help: "Australia", - }}}, { + }}, + }, { + Name: "upload_cutoff", + Help: "Cutoff for switching to large file upload api (>= 10 MiB).", + Default: fs.SizeSuffix(defaultUploadCutoff), + Advanced: true, + }, { Name: config.ConfigEncoding, Help: config.ConfigEncodingHelp, Advanced: true, @@ -193,6 +234,7 @@ browser.`, // Options defines the configuration for this backend type Options struct { + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` RootFolderID string `config:"root_folder_id"` Region string `config:"region"` Enc encoder.MultiEncoder `config:"encoding"` @@ -200,13 +242,15 @@ type Options struct { // Fs represents a remote workdrive type Fs struct { - name string // name of this remote - root string // the path we are working on - opt Options // parsed options - features *fs.Features // optional features - srv *rest.Client // the connection to the server - dirCache *dircache.DirCache // Map of directory path to directory id - pacer *fs.Pacer // pacer for API calls + name string // name of this remote + root string // the path we are working on + opt Options // parsed options + features *fs.Features // optional features + srv *rest.Client // the connection to the server + downloadsrv *rest.Client // the connection to the download server + uploadsrv *rest.Client // the connection to the upload server + dirCache *dircache.DirCache // Map of directory path to directory id + pacer *fs.Pacer // pacer for API calls } // Object describes a Zoho WorkDrive object @@ -229,6 +273,8 @@ func setupRegion(m configmap.Mapper) error { return errors.New("no region set") } rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region) + downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region) + uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region) accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region) oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region) oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region) @@ -237,11 +283,63 @@ func setupRegion(m configmap.Mapper) error { // ------------------------------------------------------------ -func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) { +type workspaceInfo struct { + ID string + Name string +} + +func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) { + var userInfo api.UserInfoResponse + opts := rest.Opts{ + Method: "GET", + Path: "/users/me", + ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, + } + _, err := srv.CallJSON(ctx, &opts, nil, &userInfo) + if err != nil { + return nil, err + } + return &userInfo, nil +} + +func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) { + var currentTeamInfo api.CurrentTeamInfo + opts := rest.Opts{ + Method: "GET", + Path: "/teams/" + teamID + "/currentuser", + ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, + } + _, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo) + if err != nil { + return nil, err + } + return ¤tTeamInfo, err +} + +func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) { + var privateSpaceListResponse api.TeamWorkspaceResponse + opts := rest.Opts{ + Method: "GET", + Path: "/users/" + teamUserID + "/privatespace", + ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, + } + _, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse) + if err != nil { + return nil, err + } + + workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace)) + for _, workspace := range privateSpaceListResponse.TeamWorkspace { + workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"}) + } + return workspaceList, err +} + +func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) { var teamList api.TeamWorkspaceResponse opts := rest.Opts{ Method: "GET", - Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams", + Path: "/users/" + zuid + "/teams", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } _, err := srv.CallJSON(ctx, &opts, nil, &teamList) @@ -251,18 +349,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork return teamList.TeamWorkspace, nil } -func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) { - var workspaceList api.TeamWorkspaceResponse +func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) { + var workspaceListResponse api.TeamWorkspaceResponse opts := rest.Opts{ Method: "GET", Path: "/teams/" + teamID + "/workspaces", ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"}, } - _, err := srv.CallJSON(ctx, &opts, nil, &workspaceList) + _, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse) if err != nil { return nil, err } - return workspaceList.TeamWorkspace, nil + + workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace)) + for _, workspace := range workspaceListResponse.TeamWorkspace { + workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name}) + } + + return workspaceList, nil } // -------------------------------------------------------------- @@ -285,13 +389,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err } authRetry := false + // Bail out early if we are missing OAuth Scopes. + if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") { + fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.") + return false, err + } + if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") { authRetry = true fs.Debugf(nil, "Should retry: %v", err) } if resp != nil && resp.StatusCode == 429 { - fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err) - time.Sleep(60 * time.Second) + err = pacer.RetryAfterError(err, 60*time.Second) + fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60) + return true, err } return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err } @@ -389,6 +500,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e if err := configstruct.Set(m, opt); err != nil { return nil, err } + + if opt.UploadCutoff < defaultUploadCutoff { + return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff)) + } + err := setupRegion(m) if err != nil { return nil, err @@ -401,11 +517,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e } f := &Fs{ - name: name, - root: root, - opt: *opt, - srv: rest.NewClient(oAuthClient).SetRoot(rootURL), - pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + name: name, + root: root, + opt: *opt, + srv: rest.NewClient(oAuthClient).SetRoot(rootURL), + downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL), + uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL), + pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), } f.features = (&fs.Features{ CanHaveEmptyDirectories: true, @@ -643,9 +761,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim return } +func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) { + opts := rest.Opts{ + Method: "POST", + Path: "/stream/upload", + Body: in, + ContentLength: &size, + ContentType: "application/octet-stream", + Options: options, + ExtraHeaders: map[string]string{ + "x-filename": url.QueryEscape(name), + "x-parent_id": parent, + "override-name-exist": "true", + "upload-id": uuid.New().String(), + "x-streammode": "1", + }, + } + + var err error + var resp *http.Response + var uploadResponse *api.LargeUploadResponse + err = f.pacer.CallNoRetry(func() (bool, error) { + resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse) + return shouldRetry(ctx, resp, err) + }) + if err != nil { + return nil, fmt.Errorf("upload large error: %v", err) + } + if len(uploadResponse.Uploads) != 1 { + return nil, errors.New("upload: invalid response") + } + upload := uploadResponse.Uploads[0] + uploadInfo, err := upload.GetUploadFileInfo() + if err != nil { + return nil, fmt.Errorf("upload error: %w", err) + } + + // Fill in the api.Item from the api.UploadFileInfo + var info api.Item + info.ID = upload.Attributes.RessourceID + info.Attributes.Name = upload.Attributes.FileName + // info.Attributes.Type = not used + info.Attributes.IsFolder = false + // info.Attributes.CreatedTime = not used + info.Attributes.ModifiedTime = uploadInfo.GetModTime() + // info.Attributes.UploadedTime = 0 not used + info.Attributes.StorageInfo.Size = uploadInfo.Size + info.Attributes.StorageInfo.FileCount = 0 + info.Attributes.StorageInfo.FolderCount = 0 + + return &info, nil +} + func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) { params := url.Values{} - params.Set("filename", name) + params.Set("filename", url.QueryEscape(name)) params.Set("parent_id", parent) params.Set("override-name-exist", strconv.FormatBool(true)) formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name) @@ -677,25 +847,26 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, if len(uploadResponse.Uploads) != 1 { return nil, errors.New("upload: invalid response") } - // Received meta data is missing size so we have to read it again. - // It doesn't always appear on first read so try again if necessary - var info *api.Item - const maxTries = 10 - sleepTime := 100 * time.Millisecond - for i := 0; i < maxTries; i++ { - info, err = f.readMetaDataForID(ctx, uploadResponse.Uploads[0].Attributes.RessourceID) - if err != nil { - return nil, err - } - if info.Attributes.StorageInfo.Size != 0 || size == 0 { - break - } - fs.Debugf(f, "Size not available yet for %q - try again in %v (try %d/%d)", name, sleepTime, i+1, maxTries) - time.Sleep(sleepTime) - sleepTime *= 2 + upload := uploadResponse.Uploads[0] + uploadInfo, err := upload.GetUploadFileInfo() + if err != nil { + return nil, fmt.Errorf("upload error: %w", err) } - return info, nil + // Fill in the api.Item from the api.UploadFileInfo + var info api.Item + info.ID = upload.Attributes.RessourceID + info.Attributes.Name = upload.Attributes.FileName + // info.Attributes.Type = not used + info.Attributes.IsFolder = false + // info.Attributes.CreatedTime = not used + info.Attributes.ModifiedTime = uploadInfo.GetModTime() + // info.Attributes.UploadedTime = 0 not used + info.Attributes.StorageInfo.Size = uploadInfo.Size + info.Attributes.StorageInfo.FileCount = 0 + info.Attributes.StorageInfo.FolderCount = 0 + + return &info, nil } // Put the object into the container @@ -704,21 +875,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, // // The new object may have been created if an error is returned func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { - size := src.Size() - remote := src.Remote() + existingObj, err := f.NewObject(ctx, src.Remote()) + switch err { + case nil: + return existingObj, existingObj.Update(ctx, in, src, options...) + case fs.ErrorObjectNotFound: + size := src.Size() + remote := src.Remote() - // Create the directory for the object if it doesn't exist - leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) - if err != nil { + // Create the directory for the object if it doesn't exist + leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true) + if err != nil { + return nil, err + } + + // use normal upload API for small sizes (<10MiB) + if size < int64(f.opt.UploadCutoff) { + info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) + if err != nil { + return nil, err + } + + return f.newObjectWithInfo(ctx, remote, info) + } + + // large file API otherwise + info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) + if err != nil { + return nil, err + } + + return f.newObjectWithInfo(ctx, remote, info) + default: return nil, err } - - // Upload the file - info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) - if err != nil { - return nil, err - } - return f.newObjectWithInfo(ctx, remote, info) } // Mkdir creates the container if it doesn't exist @@ -1158,7 +1348,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read Options: options, } err = o.fs.pacer.Call(func() (bool, error) { - resp, err = o.fs.srv.Call(ctx, &opts) + resp, err = o.fs.downloadsrv.Call(ctx, &opts) return shouldRetry(ctx, resp, err) }) if err != nil { @@ -1182,11 +1372,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op return err } - // Overwrite the old file - info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) + // use normal upload API for small sizes (<10MiB) + if size < int64(o.fs.opt.UploadCutoff) { + info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) + if err != nil { + return err + } + + return o.setMetaData(info) + } + + // large file API otherwise + info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...) if err != nil { return err } + return o.setMetaData(info) } diff --git a/backend/zoho/zoho_test.go b/backend/zoho/zoho_test.go index 1314440fa..75eb4a95b 100644 --- a/backend/zoho/zoho_test.go +++ b/backend/zoho/zoho_test.go @@ -11,7 +11,8 @@ import ( // TestIntegration runs integration tests against the remote func TestIntegration(t *testing.T) { fstests.Run(t, &fstests.Opt{ - RemoteName: "TestZoho:", - NilObject: (*zoho.Object)(nil), + RemoteName: "TestZoho:", + SkipInvalidUTF8: true, + NilObject: (*zoho.Object)(nil), }) } diff --git a/bin/make_backend_docs.py b/bin/make_backend_docs.py index 1871cc68b..c600469c0 100755 --- a/bin/make_backend_docs.py +++ b/bin/make_backend_docs.py @@ -21,17 +21,20 @@ def find_backends(): def output_docs(backend, out, cwd): """Output documentation for backend options to out""" out.flush() - subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out) + subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out) def output_backend_tool_docs(backend, out, cwd): """Output documentation for backend tool to out""" out.flush() - subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL) + subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL) def alter_doc(backend): """Alter the documentation for backend""" rclone_bin_dir = Path(sys.path[0]).parent.absolute() doc_file = "docs/content/"+backend+".md" + doc_file2 = "docs/content/"+backend+"/_index.md" + if not os.path.exists(doc_file) and os.path.exists(doc_file2): + doc_file = doc_file2 if not os.path.exists(doc_file): raise ValueError("Didn't find doc file %s" % (doc_file,)) new_file = doc_file+"~new~" diff --git a/bin/make_manual.py b/bin/make_manual.py index 922bd5688..990ab7130 100755 --- a/bin/make_manual.py +++ b/bin/make_manual.py @@ -52,6 +52,7 @@ docs = [ "hidrive.md", "http.md", "imagekit.md", + "iclouddrive.md", "internetarchive.md", "jottacloud.md", "koofr.md", @@ -64,7 +65,7 @@ docs = [ "azurefiles.md", "onedrive.md", "opendrive.md", - "oracleobjectstorage.md", + "oracleobjectstorage/_index.md", "qingstor.md", "quatrix.md", "sia.md", @@ -81,7 +82,6 @@ docs = [ "smb.md", "storj.md", "sugarsync.md", - "tardigrade.md", # stub only to redirect to storj.md "ulozto.md", "uptobox.md", "union.md", @@ -159,6 +159,7 @@ def read_doc(doc): def check_docs(docpath): """Check all the docs are in docpath""" files = set(f for f in os.listdir(docpath) if f.endswith(".md")) + files.update(f for f in docs if os.path.exists(os.path.join(docpath,f))) files -= set(ignore_docs) docs_set = set(docs) if files == docs_set: diff --git a/bin/not-in-stable.go b/bin/not-in-stable.go index dc685cb6f..57a025760 100644 --- a/bin/not-in-stable.go +++ b/bin/not-in-stable.go @@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) { cmd := exec.Command("git", "log", "--oneline", from+".."+to) out, err := cmd.Output() if err != nil { - log.Fatalf("failed to run git log %s: %v", from+".."+to, err) + log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. } logMap = map[string]string{} logs = []string{} @@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) { } match := logRe.FindSubmatch(line) if match == nil { - log.Fatalf("failed to parse line: %q", line) + log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. } var hash, logMessage = string(match[1]), string(match[2]) logMap[logMessage] = hash @@ -52,12 +52,12 @@ func main() { flag.Parse() args := flag.Args() if len(args) != 0 { - log.Fatalf("Syntax: %s", os.Args[0]) + log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. } // v1.54.0 versionBytes, err := os.ReadFile("VERSION") if err != nil { - log.Fatalf("Failed to read version: %v", err) + log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. } if versionBytes[0] == 'v' { versionBytes = versionBytes[1:] @@ -65,7 +65,7 @@ func main() { versionBytes = bytes.TrimSpace(versionBytes) semver := semver.New(string(versionBytes)) stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1) - log.Printf("Finding commits in %v not in stable %s", semver, stable) + log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. masterMap, masterLogs := readCommits(stable+".0", "master") stableMap, _ := readCommits(stable+".0", stable+"-stable") for _, logMessage := range masterLogs { diff --git a/bin/rules.go b/bin/rules.go new file mode 100644 index 000000000..05645adf5 --- /dev/null +++ b/bin/rules.go @@ -0,0 +1,51 @@ +// Ruleguard file implementing custom linting rules. +// +// Note that when used from golangci-lint (using the gocritic linter configured +// with the ruleguard check), because rule files are not handled by +// golangci-lint itself, changes will not invalidate the golangci-lint cache, +// and you must manually clean to cache (golangci-lint cache clean) for them to +// be considered, as explained here: +// https://www.quasilyte.dev/blog/post/ruleguard/#using-from-the-golangci-lint +// +// Note that this file is ignored from build with a build constraint, but using +// a different than "ignore" to avoid go mod tidy making dsl an indirect +// dependency, as explained here: +// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting + +//go:build ruleguard +// +build ruleguard + +// Package gorules implementing custom linting rules using ruleguard +package gorules + +import "github.com/quasilyte/go-ruleguard/dsl" + +// Suggest rewriting "log.(Print|Fatal|Panic)(f|ln)?" to +// "fs.(Printf|Fatalf|Panicf)", and do it if running golangci-lint with +// argument --fix. The suggestion wraps a single non-string single argument or +// variadic arguments in fmt.Sprint to be compatible with format string +// argument of fs functions. +// +// Caveats: +// - After applying the suggestions, imports may have to be fixed manually, +// removing unused "log", adding "github.com/rclone/rclone/fs" and "fmt", +// and if there was a variable named "fs" or "fmt" in the scope the name +// clash must be fixed. +// - Suggested code is incorrect when within fs package itself, due to the +// "fs."" prefix. Could handle it using condition +// ".Where(m.File().PkgPath.Matches(`github.com/rclone/rclone/fs`))" +// but not sure how to avoid duplicating all checks with and without this +// condition so haven't bothered yet. +func useFsLog(m dsl.Matcher) { + m.Match(`log.Print($x)`, `log.Println($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Log(nil, $x)`) + m.Match(`log.Print($*args)`, `log.Println($*args)`).Suggest(`fs.Log(nil, fmt.Sprint($args))`) + m.Match(`log.Printf($*args)`).Suggest(`fs.Logf(nil, $args)`) + + m.Match(`log.Fatal($x)`, `log.Fatalln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Fatal(nil, $x)`) + m.Match(`log.Fatal($*args)`, `log.Fatalln($*args)`).Suggest(`fs.Fatal(nil, fmt.Sprint($args))`) + m.Match(`log.Fatalf($*args)`).Suggest(`fs.Fatalf(nil, $args)`) + + m.Match(`log.Panic($x)`, `log.Panicln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Panic(nil, $x)`) + m.Match(`log.Panic($*args)`, `log.Panicln($*args)`).Suggest(`fs.Panic(nil, fmt.Sprint($args))`) + m.Match(`log.Panicf($*args)`).Suggest(`fs.Panicf(nil, $args)`) +} diff --git a/cmd/bisync/bisync_test.go b/cmd/bisync/bisync_test.go index ba1e313e5..da89c3e20 100644 --- a/cmd/bisync/bisync_test.go +++ b/cmd/bisync/bisync_test.go @@ -10,12 +10,12 @@ import ( "errors" "flag" "fmt" - "log" "os" "path" "path/filepath" "regexp" "runtime" + "slices" "sort" "strconv" "strings" @@ -208,15 +208,16 @@ type bisyncTest struct { parent1 fs.Fs parent2 fs.Fs // global flags - argRemote1 string - argRemote2 string - noCompare bool - noCleanup bool - golden bool - debug bool - stopAt int - TestFn bisync.TestFunc - ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support + argRemote1 string + argRemote2 string + noCompare bool + noCleanup bool + golden bool + debug bool + stopAt int + TestFn bisync.TestFunc + ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support + ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank } var color = bisync.Color @@ -232,7 +233,7 @@ func TestBisyncRemoteLocal(t *testing.T) { t.Skip("path1 and path2 are the same remote") } _, remote, cleanup, err := fstest.RandomRemote() - log.Printf("remote: %v", remote) + fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() testBisync(t, remote, *argRemote2) @@ -244,7 +245,7 @@ func TestBisyncLocalRemote(t *testing.T) { t.Skip("path1 and path2 are the same remote") } _, remote, cleanup, err := fstest.RandomRemote() - log.Printf("remote: %v", remote) + fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() testBisync(t, *argRemote2, remote) @@ -254,7 +255,7 @@ func TestBisyncLocalRemote(t *testing.T) { // (useful for testing server-side copy/move) func TestBisyncRemoteRemote(t *testing.T) { _, remote, cleanup, err := fstest.RandomRemote() - log.Printf("remote: %v", remote) + fs.Logf(nil, "remote: %v", remote) require.NoError(t, err) defer cleanup() testBisync(t, remote, remote) @@ -450,13 +451,13 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str for _, dir := range srcDirs { dirs = append(dirs, norm.NFC.String(dir.Remote())) } - log.Printf("checking initFs %s", initFs) + fs.Logf(nil, "checking initFs %s", initFs) fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision()) checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1") - log.Printf("checking Path1 %s", b.fs1) + fs.Logf(nil, "checking Path1 %s", b.fs1) fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision()) checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2") - log.Printf("checking path2 %s", b.fs2) + fs.Logf(nil, "checking path2 %s", b.fs2) fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision()) // Create log file @@ -514,21 +515,21 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str require.NoError(b.t, err, "saving log file %s", savedLog) if b.golden && !b.stopped { - log.Printf("Store results to golden directory") + fs.Logf(nil, "Store results to golden directory") b.storeGolden() return } errorCount := 0 if b.noCompare { - log.Printf("Skip comparing results with golden directory") + fs.Logf(nil, "Skip comparing results with golden directory") errorCount = -2 } else { errorCount = b.compareResults() } if b.noCleanup { - log.Printf("Skip cleanup") + fs.Logf(nil, "Skip cleanup") } else { b.cleanupCase(ctx) } @@ -947,6 +948,10 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") { b.t.Skip("skipping test as remote does not support empty dirs") } + ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank + if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) { + b.ignoreBlankHash = true + } if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported { if b.testCase != "nomodtime" { b.t.Skip("skipping test as at least one remote does not support setting modtime") @@ -1383,24 +1388,24 @@ func (b *bisyncTest) compareResults() int { const divider = "----------------------------------------------------------" if goldenNum != resultNum { - log.Print(divider) - log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:")) - log.Printf(" Golden count: %d", goldenNum) - log.Printf(" Result count: %d", resultNum) - log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", ")) - log.Printf(" Result files: %s", strings.Join(resultFiles, ", ")) + fs.Log(nil, divider) + fs.Log(nil, color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:")) + fs.Logf(nil, " Golden count: %d", goldenNum) + fs.Logf(nil, " Result count: %d", resultNum) + fs.Logf(nil, " Golden files: %s", strings.Join(goldenFiles, ", ")) + fs.Logf(nil, " Result files: %s", strings.Join(resultFiles, ", ")) } for _, file := range goldenFiles { if !resultSet.Has(file) { errorCount++ - log.Printf(" File found in Golden but not in Results: %s", file) + fs.Logf(nil, " File found in Golden but not in Results: %s", file) } } for _, file := range resultFiles { if !goldenSet.Has(file) { errorCount++ - log.Printf(" File found in Results but not in Golden: %s", file) + fs.Logf(nil, " File found in Results but not in Golden: %s", file) } } @@ -1433,15 +1438,15 @@ func (b *bisyncTest) compareResults() int { text, err := difflib.GetUnifiedDiffString(diff) require.NoError(b.t, err, "diff failed") - log.Print(divider) - log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file) + fs.Log(nil, divider) + fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file) for _, line := range strings.Split(strings.TrimSpace(text), "\n") { - log.Printf("| %s", strings.TrimSpace(line)) + fs.Logf(nil, "| %s", strings.TrimSpace(line)) } } if errorCount > 0 { - log.Print(divider) + fs.Log(nil, divider) } if errorCount == 0 && goldenNum != resultNum { return -1 @@ -1464,7 +1469,7 @@ func (b *bisyncTest) storeGolden() { continue } if fileName == "backupdirs" { - log.Printf("skipping: %v", fileName) + fs.Logf(nil, "skipping: %v", fileName) continue } goldName := b.toGolden(fileName) @@ -1489,7 +1494,7 @@ func (b *bisyncTest) storeGolden() { continue } if fileName == "backupdirs" { - log.Printf("skipping: %v", fileName) + fs.Logf(nil, "skipping: %v", fileName) continue } text := b.mangleResult(b.goldenDir, fileName, true) @@ -1552,6 +1557,12 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string { if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) { logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe) } + if b.ignoreBlankHash { + logReplacements = append(logReplacements, + `^.*hash is missing.*$`, dropMe, + `^.*not equal on recheck.*$`, dropMe, + ) + } rep := logReplacements if b.testCase == "dry_run" { rep = append(rep, dryrunReplacements...) @@ -1849,7 +1860,7 @@ func fileType(fileName string) string { // logPrintf prints a message to stdout and to the test log func (b *bisyncTest) logPrintf(text string, args ...interface{}) { line := fmt.Sprintf(text, args...) - log.Print(line) + fs.Log(nil, line) if b.logFile != nil { _, err := fmt.Fprintln(b.logFile, line) require.NoError(b.t, err, "writing log file") diff --git a/cmd/bisync/cmd.go b/cmd/bisync/cmd.go index 22a600923..c8b2136c4 100644 --- a/cmd/bisync/cmd.go +++ b/cmd/bisync/cmd.go @@ -20,6 +20,7 @@ import ( "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/filter" + "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/hash" "github.com/spf13/cobra" @@ -193,7 +194,7 @@ var commandDefinition = &cobra.Command{ cmd.Run(false, true, command, func() error { err := Bisync(ctx, fs1, fs2, &opt) if err == ErrBisyncAborted { - os.Exit(2) + return fserrors.FatalError(err) } return err }) diff --git a/cmd/bisync/listing.go b/cmd/bisync/listing.go index 8f6d8eb19..13179b26f 100644 --- a/cmd/bisync/listing.go +++ b/cmd/bisync/listing.go @@ -10,6 +10,7 @@ import ( "io" "os" "regexp" + "slices" "sort" "strconv" "strings" @@ -21,7 +22,6 @@ import ( "github.com/rclone/rclone/fs/filter" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/operations" - "golang.org/x/exp/slices" ) // ListingHeader defines first line of a listing @@ -43,8 +43,10 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT const timeFormat = "2006-01-02T15:04:05.000000000-0700" // TZ defines time zone used in listings -var TZ = time.UTC -var tzLocal = false +var ( + TZ = time.UTC + tzLocal = false +) // fileInfo describes a file type fileInfo struct { @@ -83,7 +85,7 @@ func (ls *fileList) has(file string) bool { } _, found := ls.info[file] if !found { - //try unquoting + // try unquoting file, _ = strconv.Unquote(`"` + file + `"`) _, found = ls.info[file] } @@ -93,7 +95,7 @@ func (ls *fileList) has(file string) bool { func (ls *fileList) get(file string) *fileInfo { info, found := ls.info[file] if !found { - //try unquoting + // try unquoting file, _ = strconv.Unquote(`"` + file + `"`) info = ls.info[fmt.Sprint(file)] } @@ -420,7 +422,7 @@ func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) { func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) { var fulllisting *fileList - var dirsonly = newFileList() + dirsonly := newFileList() var err error if !b.opt.CreateEmptySrcDirs { @@ -450,24 +452,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) { return dirsonly, err } -// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged -// Need to use the other fs's precision (if lower) when copying -// Note: we need to use Truncate rather than Round so that After() is reliable. -// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC) -func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time { - DestPrecision := dst.Precision() - - // In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest. - if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) { - DestPrecision = fs.UnWrapFs(dst).Precision() - } - - if Modtime.After(Modtime.Truncate(DestPrecision)) { - return Modtime.Truncate(DestPrecision) - } - return Modtime -} - // modifyListing will modify the listing based on the results of the sync func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) { queue := queues.copy2to1 @@ -533,13 +517,13 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res // build src winners list if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") { - srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags) + srcWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags) prettyprint(result, "winner: copy to src", fs.LogLevelDebug) } // build dst winners list if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") { - dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags) + dstWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags) prettyprint(result, "winner: copy to dst", fs.LogLevelDebug) } @@ -623,7 +607,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res } if srcNewName != "" { // if it was renamed and not deleted srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags) - dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags) + dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags) } if srcNewName != srcOldName { srcList.remove(srcOldName) diff --git a/cmd/bisync/operations.go b/cmd/bisync/operations.go index b457645fa..987a94e66 100644 --- a/cmd/bisync/operations.go +++ b/cmd/bisync/operations.go @@ -23,7 +23,7 @@ import ( "github.com/rclone/rclone/lib/terminal" ) -// ErrBisyncAborted signals that bisync is aborted and forces exit code 2 +// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code var ErrBisyncAborted = errors.New("bisync aborted") // bisyncRun keeps bisync runtime state diff --git a/cmd/cat/cat.go b/cmd/cat/cat.go index 9961dc222..c2325dd4c 100644 --- a/cmd/cat/cat.go +++ b/cmd/cat/cat.go @@ -4,11 +4,11 @@ package cat import ( "context" "io" - "log" "os" "strings" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" @@ -79,7 +79,7 @@ files, use: usedHead := head > 0 usedTail := tail > 0 if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset { - log.Fatalf("Can only use one of --head, --tail or --offset with --count") + fs.Fatalf(nil, "Can only use one of --head, --tail or --offset with --count") } if head > 0 { offset = 0 diff --git a/cmd/cmd.go b/cmd/cmd.go index 2aca5341d..8f5f5f093 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -10,7 +10,6 @@ import ( "context" "errors" "fmt" - "log" "os" "os/exec" "path" @@ -51,7 +50,6 @@ var ( version bool // Errors errorCommandNotFound = errors.New("command not found") - errorUncategorized = errors.New("uncategorized error") errorNotEnoughArguments = errors.New("not enough arguments") errorTooManyArguments = errors.New("too many arguments") ) @@ -85,12 +83,13 @@ func ShowVersion() { // It returns a string with the file name if points to a file // otherwise "". func NewFsFile(remote string) (fs.Fs, string) { + ctx := context.Background() _, fsPath, err := fspath.SplitFs(remote) if err != nil { - err = fs.CountError(err) - log.Fatalf("Failed to create file system for %q: %v", remote, err) + err = fs.CountError(ctx, err) + fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } - f, err := cache.Get(context.Background(), remote) + f, err := cache.Get(ctx, remote) switch err { case fs.ErrorIsFile: cache.Pin(f) // pin indefinitely since it was on the CLI @@ -99,8 +98,8 @@ func NewFsFile(remote string) (fs.Fs, string) { cache.Pin(f) // pin indefinitely since it was on the CLI return f, "" default: - err = fs.CountError(err) - log.Fatalf("Failed to create file system for %q: %v", remote, err) + err = fs.CountError(ctx, err) + fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } return nil, "" } @@ -110,19 +109,20 @@ func NewFsFile(remote string) (fs.Fs, string) { // This works the same as NewFsFile however it adds filters to the Fs // to limit it to a single file if the remote pointed to a file. func newFsFileAddFilter(remote string) (fs.Fs, string) { - fi := filter.GetConfig(context.Background()) + ctx := context.Background() + fi := filter.GetConfig(ctx) f, fileName := NewFsFile(remote) if fileName != "" { if !fi.InActive() { err := fmt.Errorf("can't limit to single files when using filters: %v", remote) - err = fs.CountError(err) - log.Fatal(err.Error()) + err = fs.CountError(ctx, err) + fs.Fatal(nil, err.Error()) } // Limit transfers to this file err := fi.AddFile(fileName) if err != nil { - err = fs.CountError(err) - log.Fatalf("Failed to limit to single file %q: %v", remote, err) + err = fs.CountError(ctx, err) + fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err) } } return f, fileName @@ -141,10 +141,11 @@ func NewFsSrc(args []string) fs.Fs { // // This must point to a directory func newFsDir(remote string) fs.Fs { - f, err := cache.Get(context.Background(), remote) + ctx := context.Background() + f, err := cache.Get(ctx, remote) if err != nil { - err = fs.CountError(err) - log.Fatalf("Failed to create file system for %q: %v", remote, err) + err = fs.CountError(ctx, err) + fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err) } cache.Pin(f) // pin indefinitely since it was on the CLI return f @@ -177,6 +178,7 @@ func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs) // NewFsSrcDstFiles creates a new src and dst fs from the arguments // If src is a file then srcFileName and dstFileName will be non-empty func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) { + ctx := context.Background() fsrc, srcFileName = newFsFileAddFilter(args[0]) // If copying a file... dstRemote := args[1] @@ -186,24 +188,24 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs var err error dstRemote, dstFileName, err = fspath.Split(dstRemote) if err != nil { - log.Fatalf("Parsing %q failed: %v", args[1], err) + fs.Fatalf(nil, "Parsing %q failed: %v", args[1], err) } if dstRemote == "" { dstRemote = "." } if dstFileName == "" { - log.Fatalf("%q is a directory", args[1]) + fs.Fatalf(nil, "%q is a directory", args[1]) } } - fdst, err := cache.Get(context.Background(), dstRemote) + fdst, err := cache.Get(ctx, dstRemote) switch err { case fs.ErrorIsFile: - _ = fs.CountError(err) - log.Fatalf("Source doesn't exist or is a directory and destination is a file") + _ = fs.CountError(ctx, err) + fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file") case nil: default: - _ = fs.CountError(err) - log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err) + _ = fs.CountError(ctx, err) + fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err) } cache.Pin(fdst) // pin indefinitely since it was on the CLI return @@ -213,13 +215,13 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) { dstRemote, dstFileName, err := fspath.Split(args[0]) if err != nil { - log.Fatalf("Parsing %q failed: %v", args[0], err) + fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err) } if dstRemote == "" { dstRemote = "." } if dstFileName == "" { - log.Fatalf("%q is a directory", args[0]) + fs.Fatalf(nil, "%q is a directory", args[0]) } fdst = newFsDir(dstRemote) return @@ -236,7 +238,8 @@ func ShowStats() bool { // Run the function with stats and retries if required func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { - ci := fs.GetConfig(context.Background()) + ctx := context.Background() + ci := fs.GetConfig(ctx) var cmdErr error stopStats := func() {} if !showStats && ShowStats() { @@ -250,7 +253,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { SigInfoHandler() for try := 1; try <= ci.Retries; try++ { cmdErr = f() - cmdErr = fs.CountError(cmdErr) + cmdErr = fs.CountError(ctx, cmdErr) lastErr := accounting.GlobalStats().GetLastError() if cmdErr == nil { cmdErr = lastErr @@ -328,9 +331,9 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) { if cmdErr != nil { nerrs := accounting.GlobalStats().GetErrors() if nerrs <= 1 { - log.Printf("Failed to %s: %v", cmd.Name(), cmdErr) + fs.Logf(nil, "Failed to %s: %v", cmd.Name(), cmdErr) } else { - log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr) + fs.Logf(nil, "Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr) } } resolveExitCode(cmdErr) @@ -383,7 +386,7 @@ func initConfig() { // Set the global options from the flags err := fs.GlobalOptionsInit() if err != nil { - log.Fatalf("Failed to initialise global options: %v", err) + fs.Fatalf(nil, "Failed to initialise global options: %v", err) } ctx := context.Background() @@ -421,9 +424,16 @@ func initConfig() { } // Start the remote control server if configured - _, err = rcserver.Start(context.Background(), &rc.Opt) + _, err = rcserver.Start(ctx, &rc.Opt) if err != nil { - log.Fatalf("Failed to start remote control: %v", err) + fs.Fatalf(nil, "Failed to start remote control: %v", err) + } + + // Start the metrics server if configured + _, err = rcserver.MetricsStart(ctx, &rc.Opt) + if err != nil { + fs.Fatalf(nil, "Failed to start metrics server: %v", err) + } // Setup CPU profiling if desired @@ -431,20 +441,20 @@ func initConfig() { fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile) f, err := os.Create(*cpuProfile) if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } err = pprof.StartCPUProfile(f) if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } atexit.Register(func() { pprof.StopCPUProfile() err := f.Close() if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } }) } @@ -455,25 +465,26 @@ func initConfig() { fs.Infof(nil, "Saving Memory profile %q\n", *memProfile) f, err := os.Create(*memProfile) if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } err = pprof.WriteHeapProfile(f) if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } err = f.Close() if err != nil { - err = fs.CountError(err) - log.Fatal(err) + err = fs.CountError(ctx, err) + fs.Fatal(nil, fmt.Sprint(err)) } }) } } func resolveExitCode(err error) { - ci := fs.GetConfig(context.Background()) + ctx := context.Background() + ci := fs.GetConfig(ctx) atexit.Run() if err == nil { if ci.ErrorOnNoTransfer { @@ -489,8 +500,6 @@ func resolveExitCode(err error) { os.Exit(exitcode.DirNotFound) case errors.Is(err, fs.ErrorObjectNotFound): os.Exit(exitcode.FileNotFound) - case errors.Is(err, errorUncategorized): - os.Exit(exitcode.UncategorizedError) case errors.Is(err, accounting.ErrorMaxTransferLimitReached): os.Exit(exitcode.TransferExceeded) case errors.Is(err, fssync.ErrorMaxDurationReached): @@ -501,8 +510,10 @@ func resolveExitCode(err error) { os.Exit(exitcode.NoRetryError) case fserrors.IsFatalError(err): os.Exit(exitcode.FatalError) - default: + case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments): os.Exit(exitcode.UsageError) + default: + os.Exit(exitcode.UncategorizedError) } } @@ -530,6 +541,7 @@ func Main() { if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled { Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath()) } - log.Fatalf("Fatal error: %v", err) + fs.Logf(nil, "Fatal error: %v", err) + os.Exit(exitcode.UsageError) } } diff --git a/cmd/config/config.go b/cmd/config/config.go index 0300fd76e..4aefe62c0 100644 --- a/cmd/config/config.go +++ b/cmd/config/config.go @@ -36,6 +36,7 @@ func init() { configCommand.AddCommand(configReconnectCommand) configCommand.AddCommand(configDisconnectCommand) configCommand.AddCommand(configUserInfoCommand) + configCommand.AddCommand(configEncryptionCommand) } var configCommand = &cobra.Command{ @@ -518,3 +519,91 @@ system. return nil }, } + +func init() { + configEncryptionCommand.AddCommand(configEncryptionSetCommand) + configEncryptionCommand.AddCommand(configEncryptionRemoveCommand) + configEncryptionCommand.AddCommand(configEncryptionCheckCommand) +} + +var configEncryptionCommand = &cobra.Command{ + Use: "encryption", + Short: `set, remove and check the encryption for the config file`, + Long: `This command sets, clears and checks the encryption for the config file using +the subcommands below. +`, +} + +var configEncryptionSetCommand = &cobra.Command{ + Use: "set", + Short: `Set or change the config file encryption password`, + Long: strings.ReplaceAll(`This command sets or changes the config file encryption password. + +If there was no config password set then it sets a new one, otherwise +it changes the existing config password. + +Note that if you are changing an encryption password using +|--password-command| then this will be called once to decrypt the +config using the old password and then again to read the new +password to re-encrypt the config. + +When |--password-command| is called to change the password then the +environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if +changing passwords programatically you can use the environment +variable to distinguish which password you must supply. + +Alternatively you can remove the password first (with |rclone config +encryption remove|), then set it again with this command which may be +easier if you don't mind the unecrypted config file being on the disk +briefly. +`, "|", "`"), + RunE: func(command *cobra.Command, args []string) error { + cmd.CheckArgs(0, 0, command, args) + config.LoadedData() + config.ChangeConfigPasswordAndSave() + return nil + }, +} + +var configEncryptionRemoveCommand = &cobra.Command{ + Use: "remove", + Short: `Remove the config file encryption password`, + Long: strings.ReplaceAll(`Remove the config file encryption password + +This removes the config file encryption, returning it to un-encrypted. + +If |--password-command| is in use, this will be called to supply the old config +password. + +If the config was not encrypted then no error will be returned and +this command will do nothing. +`, "|", "`"), + RunE: func(command *cobra.Command, args []string) error { + cmd.CheckArgs(0, 0, command, args) + config.LoadedData() + config.RemoveConfigPasswordAndSave() + return nil + }, +} + +var configEncryptionCheckCommand = &cobra.Command{ + Use: "check", + Short: `Check that the config file is encrypted`, + Long: strings.ReplaceAll(`This checks the config file is encrypted and that you can decrypt it. + +It will attempt to decrypt the config using the password you supply. + +If decryption fails it will return a non-zero exit code if using +|--password-command|, otherwise it will prompt again for the password. + +If the config file is not encrypted it will return a non zero exit code. +`, "|", "`"), + RunE: func(command *cobra.Command, args []string) error { + cmd.CheckArgs(0, 0, command, args) + config.LoadedData() + if !config.IsEncrypted() { + return errors.New("config file is NOT encrypted") + } + return nil + }, +} diff --git a/cmd/dedupe/dedupe.go b/cmd/dedupe/dedupe.go index 38c6e41a0..e280306ec 100644 --- a/cmd/dedupe/dedupe.go +++ b/cmd/dedupe/dedupe.go @@ -3,7 +3,7 @@ package dedupe import ( "context" - "log" + "fmt" "github.com/rclone/rclone/cmd" "github.com/rclone/rclone/fs" @@ -142,7 +142,7 @@ Or if len(args) > 1 { err := dedupeMode.Set(args[0]) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } args = args[1:] } diff --git a/cmd/genautocomplete/genautocomplete_bash.go b/cmd/genautocomplete/genautocomplete_bash.go index f9eb83a68..e4c08d662 100644 --- a/cmd/genautocomplete/genautocomplete_bash.go +++ b/cmd/genautocomplete/genautocomplete_bash.go @@ -1,10 +1,11 @@ package genautocomplete import ( - "log" + "fmt" "os" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) @@ -50,7 +51,7 @@ current shell. if args[0] == "-" { err := cmd.Root.GenBashCompletionV2(os.Stdout, false) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } return } @@ -58,7 +59,7 @@ current shell. } err := cmd.Root.GenBashCompletionFileV2(out, false) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } }, } diff --git a/cmd/genautocomplete/genautocomplete_fish.go b/cmd/genautocomplete/genautocomplete_fish.go index 8a419fb91..e8a8014e2 100644 --- a/cmd/genautocomplete/genautocomplete_fish.go +++ b/cmd/genautocomplete/genautocomplete_fish.go @@ -1,10 +1,11 @@ package genautocomplete import ( - "log" + "fmt" "os" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) @@ -39,7 +40,7 @@ If output_file is "-", then the output will be written to stdout. if args[0] == "-" { err := cmd.Root.GenFishCompletion(os.Stdout, true) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } return } @@ -47,7 +48,7 @@ If output_file is "-", then the output will be written to stdout. } err := cmd.Root.GenFishCompletionFile(out, true) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } }, } diff --git a/cmd/genautocomplete/genautocomplete_powershell.go b/cmd/genautocomplete/genautocomplete_powershell.go index e07bd70b6..0fda7d730 100644 --- a/cmd/genautocomplete/genautocomplete_powershell.go +++ b/cmd/genautocomplete/genautocomplete_powershell.go @@ -1,10 +1,11 @@ package genautocomplete import ( - "log" + "fmt" "os" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) @@ -31,13 +32,13 @@ If output_file is "-" or missing, then the output will be written to stdout. if len(args) == 0 || (len(args) > 0 && args[0] == "-") { err := cmd.Root.GenPowerShellCompletion(os.Stdout) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } return } err := cmd.Root.GenPowerShellCompletionFile(args[0]) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } }, } diff --git a/cmd/genautocomplete/genautocomplete_zsh.go b/cmd/genautocomplete/genautocomplete_zsh.go index 23333fb7c..ef80254e8 100644 --- a/cmd/genautocomplete/genautocomplete_zsh.go +++ b/cmd/genautocomplete/genautocomplete_zsh.go @@ -1,10 +1,11 @@ package genautocomplete import ( - "log" + "fmt" "os" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/spf13/cobra" ) @@ -39,7 +40,7 @@ If output_file is "-", then the output will be written to stdout. if args[0] == "-" { err := cmd.Root.GenZshCompletion(os.Stdout) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } return } @@ -47,12 +48,12 @@ If output_file is "-", then the output will be written to stdout. } outFile, err := os.Create(out) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } defer func() { _ = outFile.Close() }() err = cmd.Root.GenZshCompletion(outFile) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } }, } diff --git a/cmd/gendocs/gendocs.go b/cmd/gendocs/gendocs.go index 5864283db..ffd6e23fd 100644 --- a/cmd/gendocs/gendocs.go +++ b/cmd/gendocs/gendocs.go @@ -4,7 +4,6 @@ package gendocs import ( "bytes" "fmt" - "log" "os" "path" "path/filepath" @@ -14,6 +13,7 @@ import ( "time" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/lib/file" "github.com/spf13/cobra" @@ -144,7 +144,7 @@ rclone.org website.`, var buf bytes.Buffer err := frontmatterTemplate.Execute(&buf, data) if err != nil { - log.Fatalf("Failed to render frontmatter template: %v", err) + fs.Fatalf(nil, "Failed to render frontmatter template: %v", err) } return buf.String() } diff --git a/cmd/help.go b/cmd/help.go index 94f399793..c9906d684 100644 --- a/cmd/help.go +++ b/cmd/help.go @@ -3,7 +3,6 @@ package cmd import ( "context" "fmt" - "log" "os" "regexp" "sort" @@ -76,7 +75,7 @@ var helpFlags = &cobra.Command{ if len(args) > 0 { re, err := filter.GlobStringToRegexp(args[0], false, true) if err != nil { - log.Fatalf("Invalid flag filter: %v", err) + fs.Fatalf(nil, "Invalid flag filter: %v", err) } fs.Debugf(nil, "Flag filter: %s", re.String()) filterFlagsRe = re @@ -244,6 +243,7 @@ var filterFlagsMultiGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags} var docFlagsTemplate = `--- title: "Global Flags" description: "Rclone Global Flags" +# autogenerated - DO NOT EDIT --- # Global Flags @@ -285,7 +285,7 @@ func quoteString(v interface{}) string { func showBackend(name string) { backend, err := fs.Find(name) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } var standardOptions, advancedOptions fs.Options done := map[string]struct{}{} diff --git a/cmd/lsjson/lsjson.go b/cmd/lsjson/lsjson.go index 91af96d6f..51a551d90 100644 --- a/cmd/lsjson/lsjson.go +++ b/cmd/lsjson/lsjson.go @@ -41,7 +41,7 @@ var commandDefinition = &cobra.Command{ Short: `List directories and objects in the path in JSON format.`, Long: `List directories and objects in the path in JSON format. -The output is an array of Items, where each Item looks like this +The output is an array of Items, where each Item looks like this: { "Hashes" : { @@ -63,44 +63,50 @@ The output is an array of Items, where each Item looks like this "Tier" : "hot", } -If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The -types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which -may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `. +The exact set of properties included depends on the backend: -If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can -speed things up on remotes where reading the ModTime takes an extra -request (e.g. s3, swift). +- The property IsBucket will only be included for bucket-based remotes, and only + for directories that are buckets. It will always be omitted when value is not true. +- Properties Encrypted and EncryptedPath will only be included for encrypted + remotes, and (as mentioned below) only if the ` + "`--encrypted`" + ` option is set. -If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can -speed things up on remotes where reading the MimeType takes an extra -request (e.g. s3, swift). +Different options may also affect which properties are included: -If ` + "`--encrypted`" + ` is not specified the Encrypted will be omitted. +- If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The + types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which + may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `. +- If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can + speed things up on remotes where reading the ModTime takes an extra + request (e.g. s3, swift). +- If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can + speed things up on remotes where reading the MimeType takes an extra + request (e.g. s3, swift). +- If ` + "`--encrypted`" + ` is not specified the Encrypted and EncryptedPath + properties will be omitted - even for encrypted remotes. +- If ` + "`--metadata`" + ` is set then an additional Metadata property will be + returned. This will have [metadata](/docs/#metadata) in rclone standard format + as a JSON object. -If ` + "`--dirs-only`" + ` is not specified files in addition to directories are -returned +The default is to list directories and files/objects, but this can be changed +with the following options: -If ` + "`--files-only`" + ` is not specified directories in addition to the files -will be returned. +- If ` + "`--dirs-only`" + ` is specified then directories will be returned + only, no files/objects. +- If ` + "`--files-only`" + ` is specified then files will be returned only, + no directories. -If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned. -This will have metadata in rclone standard format as a JSON object. - -if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the -item pointed to. This will return an error if the item isn't found. -However on bucket based backends (like s3, gcs, b2, azureblob etc) if -the item isn't found it will return an empty directory as it isn't -possible to tell empty directories from missing directories there. +If ` + "`--stat`" + ` is set then the the output is not an array of items, +but instead a single JSON blob will be returned about the item pointed to. +This will return an error if the item isn't found, however on bucket based +backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will +return an empty directory, as it isn't possible to tell empty directories +from missing directories there. The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without ` + "`--recursive`" + ` the Path will always be the same as Name. -If the directory is a bucket in a bucket-based backend, then -"IsBucket" will be set to true. This key won't be present unless it is -"true". - The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the @@ -110,7 +116,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it -can be processed line by line as each item is written one to a line. +can be processed line by line as each item is written on individual lines +(except with ` + "`--stat`" + `). ` + lshelp.Help, Annotations: map[string]string{ "versionIntroduced": "v1.37", diff --git a/cmd/mount2/mount.go b/cmd/mount2/mount.go index 4548705a5..012142a29 100644 --- a/cmd/mount2/mount.go +++ b/cmd/mount2/mount.go @@ -5,7 +5,6 @@ package mount2 import ( "fmt" - "log" "runtime" "time" @@ -150,7 +149,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou opts = append(opts, "ro") } if fsys.opt.WritebackCache { - log.Printf("FIXME --write-back-cache not supported") + fs.Printf(nil, "FIXME --write-back-cache not supported") // FIXME opts = append(opts,fuse.WritebackCache()) } // Some OS X only options diff --git a/cmd/mountlib/mount.go b/cmd/mountlib/mount.go index 2331feff4..38a32ac65 100644 --- a/cmd/mountlib/mount.go +++ b/cmd/mountlib/mount.go @@ -5,7 +5,6 @@ import ( "context" _ "embed" "fmt" - "log" "os" "runtime" "strings" @@ -312,7 +311,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm err = mnt.Wait() } if err != nil { - log.Fatalf("Fatal error: %v", err) + fs.Fatalf(nil, "Fatal error: %v", err) } return } @@ -340,7 +339,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm atexit.Unregister(handle) } if err != nil { - log.Fatalf("Fatal error: %v", err) + fs.Fatalf(nil, "Fatal error: %v", err) } }, } diff --git a/cmd/mountlib/mount.md b/cmd/mountlib/mount.md index bd8bd3bd7..30bcee983 100644 --- a/cmd/mountlib/mount.md +++ b/cmd/mountlib/mount.md @@ -42,7 +42,9 @@ When running in background mode the user will have to stop the mount manually: # Linux fusermount -u /path/to/local/mount - # OS X + #... or on some systems + fusermount3 -u /path/to/local/mount + # OS X or Linux when using nfsmount umount /path/to/local/mount The umount operation can fail, for example when the mountpoint is busy. @@ -386,9 +388,9 @@ Note that systemd runs mount units without any environment variables including `PATH` or `HOME`. This means that tilde (`~`) expansion will not work and you should provide `--config` and `--cache-dir` explicitly as absolute paths via rclone arguments. -Since mounting requires the `fusermount` program, rclone will use the fallback -PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount` -is present on this PATH. +Since mounting requires the `fusermount` or `fusermount3` program, +rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario. +Please ensure that `fusermount`/`fusermount3` is present on this PATH. ### Rclone as Unix mount helper diff --git a/cmd/mountlib/rc.go b/cmd/mountlib/rc.go index a896e8c5f..8c5c94d5d 100644 --- a/cmd/mountlib/rc.go +++ b/cmd/mountlib/rc.go @@ -3,7 +3,6 @@ package mountlib import ( "context" "errors" - "log" "sort" "sync" "time" @@ -123,12 +122,12 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) { mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt) _, err = mnt.Mount() if err != nil { - log.Printf("mount FAILED: %v", err) + fs.Logf(nil, "mount FAILED: %v", err) return nil, err } go func() { if err = mnt.Wait(); err != nil { - log.Printf("unmount FAILED: %v", err) + fs.Logf(nil, "unmount FAILED: %v", err) return } mountMu.Lock() diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index f326e8ad2..42117ab0d 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -929,23 +929,23 @@ func (u *UI) Run() error { return fmt.Errorf("screen init: %w", err) } - // Hijack fs.LogPrint so that it doesn't corrupt the screen. - if logPrint := fs.LogPrint; !log.Redirected() { + // Hijack fs.LogOutput so that it doesn't corrupt the screen. + if logOutput := fs.LogOutput; !log.Redirected() { type log struct { text string level fs.LogLevel } var logs []log - fs.LogPrint = func(level fs.LogLevel, text string) { + fs.LogOutput = func(level fs.LogLevel, text string) { if len(logs) > 100 { logs = logs[len(logs)-100:] } logs = append(logs, log{level: level, text: text}) } defer func() { - fs.LogPrint = logPrint + fs.LogOutput = logOutput for i := range logs { - logPrint(logs[i].level, logs[i].text) + logOutput(logs[i].level, logs[i].text) } }() } diff --git a/cmd/nfsmount/nfsmount.go b/cmd/nfsmount/nfsmount.go index b21ffdd3e..2931cdac0 100644 --- a/cmd/nfsmount/nfsmount.go +++ b/cmd/nfsmount/nfsmount.go @@ -21,8 +21,7 @@ import ( ) var ( - sudo = false - nfsServerOpt nfs.Options + sudo = false ) func init() { @@ -33,11 +32,11 @@ func init() { mountlib.AddRc(name, mount) cmdFlags := cmd.Flags() flags.BoolVarP(cmdFlags, &sudo, "sudo", "", sudo, "Use sudo to run the mount/umount commands as root.", "") - nfs.AddFlags(cmdFlags, &nfsServerOpt) + nfs.AddFlags(cmdFlags) } func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (asyncerrors <-chan error, unmount func() error, err error) { - s, err := nfs.NewServer(context.Background(), VFS, &nfsServerOpt) + s, err := nfs.NewServer(context.Background(), VFS, &nfs.Opt) if err != nil { return } diff --git a/cmd/nfsmount/nfsmount_test.go b/cmd/nfsmount/nfsmount_test.go index 78990b572..8412cea71 100644 --- a/cmd/nfsmount/nfsmount_test.go +++ b/cmd/nfsmount/nfsmount_test.go @@ -7,6 +7,7 @@ import ( "runtime" "testing" + "github.com/rclone/rclone/cmd/serve/nfs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/rclone/rclone/vfs/vfstest" "github.com/stretchr/testify/require" @@ -29,7 +30,7 @@ func TestMount(t *testing.T) { } sudo = true } - nfsServerOpt.HandleCacheDir = t.TempDir() - require.NoError(t, nfsServerOpt.HandleCache.Set("disk")) + nfs.Opt.HandleCacheDir = t.TempDir() + require.NoError(t, nfs.Opt.HandleCache.Set("disk")) vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount) } diff --git a/cmd/progress.go b/cmd/progress.go index fbdcd19a1..679c1d30d 100644 --- a/cmd/progress.go +++ b/cmd/progress.go @@ -28,12 +28,12 @@ const ( // It returns a func which should be called to stop the stats. func startProgress() func() { stopStats := make(chan struct{}) - oldLogPrint := fs.LogPrint + oldLogOutput := fs.LogOutput oldSyncPrint := operations.SyncPrintf if !log.Redirected() { // Intercept the log calls if not logging to file or syslog - fs.LogPrint = func(level fs.LogLevel, text string) { + fs.LogOutput = func(level fs.LogLevel, text string) { printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text)) } @@ -60,7 +60,7 @@ func startProgress() func() { case <-stopStats: ticker.Stop() printProgress("") - fs.LogPrint = oldLogPrint + fs.LogOutput = oldLogOutput operations.SyncPrintf = oldSyncPrint fmt.Println("") return diff --git a/cmd/rcat/rcat.go b/cmd/rcat/rcat.go index bb53c2a23..3d767d115 100644 --- a/cmd/rcat/rcat.go +++ b/cmd/rcat/rcat.go @@ -3,11 +3,11 @@ package rcat import ( "context" - "log" "os" "time" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/fs/operations" "github.com/spf13/cobra" @@ -64,7 +64,7 @@ destination which can use retries.`, stat, _ := os.Stdin.Stat() if (stat.Mode() & os.ModeCharDevice) != 0 { - log.Fatalf("nothing to read from standard input (stdin).") + fs.Fatalf(nil, "nothing to read from standard input (stdin).") } fdst, dstFileName := cmd.NewFsDstFile(args) diff --git a/cmd/rcd/rcd.go b/cmd/rcd/rcd.go index 1dc875d28..391a63321 100644 --- a/cmd/rcd/rcd.go +++ b/cmd/rcd/rcd.go @@ -3,9 +3,9 @@ package rcd import ( "context" - "log" "github.com/rclone/rclone/cmd" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/rcflags" "github.com/rclone/rclone/fs/rc/rcserver" @@ -39,7 +39,7 @@ See the [rc documentation](/rc/) for more info on the rc flags. Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(0, 1, command, args) if rc.Opt.Enabled { - log.Fatalf("Don't supply --rc flag when using rcd") + fs.Fatalf(nil, "Don't supply --rc flag when using rcd") } // Start the rc @@ -50,10 +50,10 @@ See the [rc documentation](/rc/) for more info on the rc flags. s, err := rcserver.Start(context.Background(), &rc.Opt) if err != nil { - log.Fatalf("Failed to start remote control: %v", err) + fs.Fatalf(nil, "Failed to start remote control: %v", err) } if s == nil { - log.Fatal("rc server not configured") + fs.Fatal(nil, "rc server not configured") } // Notify stopping on exit diff --git a/cmd/selfupdate/selfupdate.go b/cmd/selfupdate/selfupdate.go index 6e0b61e8d..cca72a81b 100644 --- a/cmd/selfupdate/selfupdate.go +++ b/cmd/selfupdate/selfupdate.go @@ -14,7 +14,6 @@ import ( "errors" "fmt" "io" - "log" "net/http" "os" "os/exec" @@ -83,19 +82,19 @@ var cmdSelfUpdate = &cobra.Command{ } if Opt.Package != "zip" { if Opt.Package != "deb" && Opt.Package != "rpm" { - log.Fatalf("--package should be one of zip|deb|rpm") + fs.Fatalf(nil, "--package should be one of zip|deb|rpm") } if runtime.GOOS != "linux" { - log.Fatalf(".deb and .rpm packages are supported only on Linux") + fs.Fatalf(nil, ".deb and .rpm packages are supported only on Linux") } else if os.Geteuid() != 0 && !Opt.Check { - log.Fatalf(".deb and .rpm must be installed by root") + fs.Fatalf(nil, ".deb and .rpm must be installed by root") } if Opt.Output != "" && !Opt.Check { fmt.Println("Warning: --output is ignored with --package deb|rpm") } } if err := InstallUpdate(context.Background(), &Opt); err != nil { - log.Fatalf("Error: %v", err) + fs.Fatalf(nil, "Error: %v", err) } }, } diff --git a/cmd/serve/dlna/cds.go b/cmd/serve/dlna/cds.go index 94b37bf18..2e2752c73 100644 --- a/cmd/serve/dlna/cds.go +++ b/cmd/serve/dlna/cds.go @@ -5,7 +5,6 @@ import ( "encoding/xml" "errors" "fmt" - "log" "net/http" "net/url" "os" @@ -360,7 +359,7 @@ func (o *object) FilePath() string { // Returns the ObjectID for the object. This is used in various ContentDirectory actions. func (o object) ID() string { if !path.IsAbs(o.Path) { - log.Panicf("Relative object path: %s", o.Path) + fs.Panicf(nil, "Relative object path: %s", o.Path) } if len(o.Path) == 1 { return "0" diff --git a/cmd/serve/dlna/dlna.go b/cmd/serve/dlna/dlna.go index 29af87b4a..199350877 100644 --- a/cmd/serve/dlna/dlna.go +++ b/cmd/serve/dlna/dlna.go @@ -190,16 +190,17 @@ func (s *server) ModelNumber() string { // Renders the root device descriptor. func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() tmpl, err := data.GetTemplate() if err != nil { - serveError(s, w, "Failed to load root descriptor template", err) + serveError(ctx, s, w, "Failed to load root descriptor template", err) return } buffer := new(bytes.Buffer) err = tmpl.Execute(buffer, s) if err != nil { - serveError(s, w, "Failed to render root descriptor XML", err) + serveError(ctx, s, w, "Failed to render root descriptor XML", err) return } @@ -215,15 +216,16 @@ func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) { // Handle a service control HTTP request. func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() soapActionString := r.Header.Get("SOAPACTION") soapAction, err := upnp.ParseActionHTTPHeader(soapActionString) if err != nil { - serveError(s, w, "Could not parse SOAPACTION header", err) + serveError(ctx, s, w, "Could not parse SOAPACTION header", err) return } var env soap.Envelope if err := xml.NewDecoder(r.Body).Decode(&env); err != nil { - serveError(s, w, "Could not parse SOAP request body", err) + serveError(ctx, s, w, "Could not parse SOAP request body", err) return } @@ -257,6 +259,7 @@ func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte, // Serves actual resources (media files). func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() remotePath := r.URL.Path node, err := s.vfs.Stat(r.URL.Path) if err != nil { @@ -277,7 +280,7 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) { file := node.(*vfs.File) in, err := file.Open(os.O_RDONLY) if err != nil { - serveError(node, w, "Could not open resource", err) + serveError(ctx, node, w, "Could not open resource", err) return } defer fs.CheckClose(in, &err) diff --git a/cmd/serve/dlna/dlna_util.go b/cmd/serve/dlna/dlna_util.go index d54dfea5b..ca61488af 100644 --- a/cmd/serve/dlna/dlna_util.go +++ b/cmd/serve/dlna/dlna_util.go @@ -1,11 +1,11 @@ package dlna import ( + "context" "crypto/md5" "encoding/xml" "fmt" "io" - "log" "net" "net/http" "net/http/httptest" @@ -31,7 +31,7 @@ func makeDefaultFriendlyName() string { func makeDeviceUUID(unique string) string { h := md5.New() if _, err := io.WriteString(h, unique); err != nil { - log.Panicf("makeDeviceUUID write failed: %s", err) + fs.Panicf(nil, "makeDeviceUUID write failed: %s", err) } buf := h.Sum(nil) return upnp.FormatUUID(buf) @@ -41,7 +41,7 @@ func makeDeviceUUID(unique string) string { func listInterfaces() []net.Interface { ifs, err := net.Interfaces() if err != nil { - log.Printf("list network interfaces: %v", err) + fs.Logf(nil, "list network interfaces: %v", err) return []net.Interface{} } @@ -71,7 +71,7 @@ func didlLite(chardata string) string { func mustMarshalXML(value interface{}) []byte { ret, err := xml.MarshalIndent(value, "", " ") if err != nil { - log.Panicf("mustMarshalXML failed to marshal %v: %s", value, err) + fs.Panicf(nil, "mustMarshalXML failed to marshal %v: %s", value, err) } return ret } @@ -143,9 +143,10 @@ func logging(next http.Handler) http.Handler { // Error recovery and general request logging are left to logging(). func traceLogging(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() dump, err := httputil.DumpRequest(r, true) if err != nil { - serveError(nil, w, "error dumping request", err) + serveError(ctx, nil, w, "error dumping request", err) return } fs.Debugf(nil, "%s", dump) @@ -183,8 +184,8 @@ func withHeader(name string, value string, next http.Handler) http.Handler { } // serveError returns an http.StatusInternalServerError and logs the error -func serveError(what interface{}, w http.ResponseWriter, text string, err error) { - err = fs.CountError(err) +func serveError(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) { + err = fs.CountError(ctx, err) fs.Errorf(what, "%s: %v", text, err) http.Error(w, text+".", http.StatusInternalServerError) } diff --git a/cmd/serve/docker/options.go b/cmd/serve/docker/options.go index 4f0d0ddd9..f2d65a43f 100644 --- a/cmd/serve/docker/options.go +++ b/cmd/serve/docker/options.go @@ -2,6 +2,7 @@ package docker import ( "fmt" + "math" "strings" "github.com/rclone/rclone/cmd/mountlib" @@ -270,7 +271,13 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool err = getFVarP(&vfsOpt.DiskSpaceTotalSize, opt, key) case "vfs-read-chunk-streams": intVal, err = opt.GetInt64(key) - vfsOpt.ChunkStreams = int(intVal) + if err == nil { + if intVal >= 0 && intVal <= math.MaxInt { + vfsOpt.ChunkStreams = int(intVal) + } else { + err = fmt.Errorf("key %q (%v) overflows int", key, intVal) + } + } // unprefixed vfs options case "no-modtime": @@ -295,10 +302,22 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool err = getFVarP(&vfsOpt.Umask, opt, key) case "uid": intVal, err = opt.GetInt64(key) - vfsOpt.UID = uint32(intVal) + if err == nil { + if intVal >= 0 && intVal <= math.MaxUint32 { + vfsOpt.UID = uint32(intVal) + } else { + err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal) + } + } case "gid": intVal, err = opt.GetInt64(key) - vfsOpt.GID = uint32(intVal) + if err == nil { + if intVal >= 0 && intVal <= math.MaxUint32 { + vfsOpt.UID = uint32(intVal) + } else { + err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal) + } + } // non-vfs options default: diff --git a/cmd/serve/http/http.go b/cmd/serve/http/http.go index 5ffa2d0a3..7aba1e598 100644 --- a/cmd/serve/http/http.go +++ b/cmd/serve/http/http.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "io" - "log" "net/http" "os" "path" @@ -92,7 +91,7 @@ control the stats printing. cmd.Run(false, true, command, func() error { s, err := run(context.Background(), f, Opt) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } defer systemd.Notify()() @@ -187,6 +186,7 @@ func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) { // serveDir serves a directory index at dirRemote func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) { + ctx := r.Context() VFS, err := s.getVFS(r.Context()) if err != nil { http.Error(w, "Root directory not found", http.StatusNotFound) @@ -199,7 +199,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string http.Error(w, "Directory not found", http.StatusNotFound) return } else if err != nil { - serve.Error(dirRemote, w, "Failed to list directory", err) + serve.Error(ctx, dirRemote, w, "Failed to list directory", err) return } if !node.IsDir() { @@ -209,7 +209,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string dir := node.(*vfs.Dir) dirEntries, err := dir.ReadDirAll() if err != nil { - serve.Error(dirRemote, w, "Failed to list directory", err) + serve.Error(ctx, dirRemote, w, "Failed to list directory", err) return } @@ -235,6 +235,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string // serveFile serves a file object at remote func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) { + ctx := r.Context() VFS, err := s.getVFS(r.Context()) if err != nil { http.Error(w, "File not found", http.StatusNotFound) @@ -248,7 +249,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) http.Error(w, "File not found", http.StatusNotFound) return } else if err != nil { - serve.Error(remote, w, "Failed to find file", err) + serve.Error(ctx, remote, w, "Failed to find file", err) return } if !node.IsFile() { @@ -288,7 +289,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) // open the object in, err := file.Open(os.O_RDONLY) if err != nil { - serve.Error(remote, w, "Failed to open file", err) + serve.Error(ctx, remote, w, "Failed to open file", err) return } defer func() { diff --git a/cmd/serve/nfs/filesystem.go b/cmd/serve/nfs/filesystem.go index 17e044856..db918d895 100644 --- a/cmd/serve/nfs/filesystem.go +++ b/cmd/serve/nfs/filesystem.go @@ -3,6 +3,7 @@ package nfs import ( + "math" "os" "path" "strings" @@ -13,8 +14,34 @@ import ( "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" + "github.com/willscott/go-nfs/file" ) +// setSys sets the Sys() call up for the vfs.Node passed in +// +// The billy abstraction layer does not extend to exposing `uid` and `gid` +// ownership of files. If ownership is important to your file system, you +// will need to ensure that the `os.FileInfo` meets additional constraints. +// In particular, the `Sys()` escape hatch is queried by this library, and +// if your file system populates a [`syscall.Stat_t`](https://golang.org/pkg/syscall/#Stat_t) +// concrete struct, the ownership specified in that object will be used. +// It can also return a file.FileInfo which is easier to manage cross platform +func setSys(fi os.FileInfo) { + node, ok := fi.(vfs.Node) + if !ok { + fs.Errorf(fi, "internal error: %T is not a vfs.Node", fi) + } + vfs := node.VFS() + // Set the UID and GID for the node passed in from the VFS defaults. + stat := file.FileInfo{ + Nlink: 1, + UID: vfs.Opt.UID, + GID: vfs.Opt.GID, + Fileid: math.MaxUint64, // without this mounting doesn't work on Linux + } + node.SetSys(&stat) +} + // FS is our wrapper around the VFS to properly support billy.Filesystem interface type FS struct { vfs *vfs.VFS @@ -23,7 +50,14 @@ type FS struct { // ReadDir implements read dir func (f *FS) ReadDir(path string) (dir []os.FileInfo, err error) { defer log.Trace(path, "")("items=%d, err=%v", &dir, &err) - return f.vfs.ReadDir(path) + dir, err = f.vfs.ReadDir(path) + if err != nil { + return nil, err + } + for _, fi := range dir { + setSys(fi) + } + return dir, nil } // Create implements creating new files @@ -47,7 +81,12 @@ func (f *FS) OpenFile(filename string, flag int, perm os.FileMode) (node billy.F // Stat gets the file stat func (f *FS) Stat(filename string) (fi os.FileInfo, err error) { defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err) - return f.vfs.Stat(filename) + fi, err = f.vfs.Stat(filename) + if err != nil { + return nil, err + } + setSys(fi) + return fi, nil } // Rename renames a file @@ -95,7 +134,12 @@ func (f *FS) MkdirAll(filename string, perm os.FileMode) (err error) { // Lstat gets the stats for symlink func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) { defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err) - return f.vfs.Stat(filename) + fi, err = f.vfs.Stat(filename) + if err != nil { + return nil, err + } + setSys(fi) + return fi, nil } // Symlink is not supported over NFS diff --git a/cmd/serve/nfs/handler.go b/cmd/serve/nfs/handler.go index de419d4f9..921bd89dc 100644 --- a/cmd/serve/nfs/handler.go +++ b/cmd/serve/nfs/handler.go @@ -24,7 +24,8 @@ type Handler struct { } // NewHandler creates a handler for the provided filesystem -func NewHandler(vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) { +func NewHandler(ctx context.Context, vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) { + ci := fs.GetConfig(ctx) h := &Handler{ vfs: vfs, opt: *opt, @@ -35,7 +36,20 @@ func NewHandler(vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) { if err != nil { return nil, fmt.Errorf("failed to make cache: %w", err) } - nfs.SetLogger(&logIntercepter{Level: nfs.DebugLevel}) + var level nfs.LogLevel + switch { + case ci.LogLevel >= fs.LogLevelDebug: // Debug level, needs -vv + level = nfs.TraceLevel + case ci.LogLevel >= fs.LogLevelInfo: // Transfers, needs -v + level = nfs.InfoLevel + case ci.LogLevel >= fs.LogLevelNotice: // Normal logging, -q suppresses + level = nfs.WarnLevel + case ci.LogLevel >= fs.LogLevelError: // Error - can't be suppressed + level = nfs.ErrorLevel + default: + level = nfs.WarnLevel + } + nfs.SetLogger(&logger{level: level}) return h, nil } @@ -108,120 +122,167 @@ func onUnmount() { } } -// logIntercepter intercepts noisy go-nfs logs and reroutes them to DEBUG -type logIntercepter struct { - Level nfs.LogLevel +// logger handles go-nfs logs and reroutes them to rclone's logging system +type logger struct { + level nfs.LogLevel } -// Intercept intercepts go-nfs logs and calls fs.Debugf instead -func (l *logIntercepter) Intercept(args ...interface{}) { - args = append([]interface{}{"[NFS DEBUG] "}, args...) - argsS := fmt.Sprint(args...) - fs.Debugf(nil, "%v", argsS) +// logPrint intercepts go-nfs logs and calls rclone's log system instead +func (l *logger) logPrint(level fs.LogLevel, args ...interface{}) { + fs.LogPrintf(level, "nfs", "%s", fmt.Sprint(args...)) } -// Interceptf intercepts go-nfs logs and calls fs.Debugf instead -func (l *logIntercepter) Interceptf(format string, args ...interface{}) { - argsS := fmt.Sprint(args...) - // bit of a workaround... the real fix is probably https://github.com/willscott/go-nfs/pull/28 - if strings.Contains(argsS, "mount.Umnt") { - onUnmount() - } - - fs.Debugf(nil, "[NFS DEBUG] "+format, args...) +// logPrintf intercepts go-nfs logs and calls rclone's log system instead +func (l *logger) logPrintf(level fs.LogLevel, format string, args ...interface{}) { + fs.LogPrintf(level, "nfs", format, args...) } // Debug reroutes go-nfs Debug messages to Intercept -func (l *logIntercepter) Debug(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Debug(args ...interface{}) { + if l.level < nfs.DebugLevel { + return + } + l.logPrint(fs.LogLevelDebug, args...) } -// Debugf reroutes go-nfs Debugf messages to Interceptf -func (l *logIntercepter) Debugf(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Debugf reroutes go-nfs Debugf messages to logPrintf +func (l *logger) Debugf(format string, args ...interface{}) { + if l.level < nfs.DebugLevel { + return + } + l.logPrintf(fs.LogLevelDebug, format, args...) } // Error reroutes go-nfs Error messages to Intercept -func (l *logIntercepter) Error(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Error(args ...interface{}) { + if l.level < nfs.ErrorLevel { + return + } + l.logPrint(fs.LogLevelError, args...) } -// Errorf reroutes go-nfs Errorf messages to Interceptf -func (l *logIntercepter) Errorf(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Errorf reroutes go-nfs Errorf messages to logPrintf +func (l *logger) Errorf(format string, args ...interface{}) { + if l.level < nfs.ErrorLevel { + return + } + l.logPrintf(fs.LogLevelError, format, args...) } // Fatal reroutes go-nfs Fatal messages to Intercept -func (l *logIntercepter) Fatal(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Fatal(args ...interface{}) { + if l.level < nfs.FatalLevel { + return + } + l.logPrint(fs.LogLevelError, args...) } -// Fatalf reroutes go-nfs Fatalf messages to Interceptf -func (l *logIntercepter) Fatalf(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Fatalf reroutes go-nfs Fatalf messages to logPrintf +func (l *logger) Fatalf(format string, args ...interface{}) { + if l.level < nfs.FatalLevel { + return + } + l.logPrintf(fs.LogLevelError, format, args...) } // GetLevel returns the nfs.LogLevel -func (l *logIntercepter) GetLevel() nfs.LogLevel { - return l.Level +func (l *logger) GetLevel() nfs.LogLevel { + return l.level } // Info reroutes go-nfs Info messages to Intercept -func (l *logIntercepter) Info(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Info(args ...interface{}) { + if l.level < nfs.InfoLevel { + return + } + l.logPrint(fs.LogLevelInfo, args...) } -// Infof reroutes go-nfs Infof messages to Interceptf -func (l *logIntercepter) Infof(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Infof reroutes go-nfs Infof messages to logPrintf +func (l *logger) Infof(format string, args ...interface{}) { + if l.level < nfs.InfoLevel { + return + } + l.logPrintf(fs.LogLevelInfo, format, args...) } // Panic reroutes go-nfs Panic messages to Intercept -func (l *logIntercepter) Panic(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Panic(args ...interface{}) { + if l.level < nfs.PanicLevel { + return + } + l.logPrint(fs.LogLevelError, args...) } -// Panicf reroutes go-nfs Panicf messages to Interceptf -func (l *logIntercepter) Panicf(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Panicf reroutes go-nfs Panicf messages to logPrintf +func (l *logger) Panicf(format string, args ...interface{}) { + if l.level < nfs.PanicLevel { + return + } + l.logPrintf(fs.LogLevelError, format, args...) } // ParseLevel parses the nfs.LogLevel -func (l *logIntercepter) ParseLevel(level string) (nfs.LogLevel, error) { +func (l *logger) ParseLevel(level string) (nfs.LogLevel, error) { return nfs.Log.ParseLevel(level) } // Print reroutes go-nfs Print messages to Intercept -func (l *logIntercepter) Print(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Print(args ...interface{}) { + if l.level < nfs.InfoLevel { + return + } + l.logPrint(fs.LogLevelInfo, args...) } // Printf reroutes go-nfs Printf messages to Intercept -func (l *logIntercepter) Printf(format string, args ...interface{}) { - l.Interceptf(format, args...) +func (l *logger) Printf(format string, args ...interface{}) { + if l.level < nfs.InfoLevel { + return + } + l.logPrintf(fs.LogLevelInfo, format, args...) } // SetLevel sets the nfs.LogLevel -func (l *logIntercepter) SetLevel(level nfs.LogLevel) { - l.Level = level +func (l *logger) SetLevel(level nfs.LogLevel) { + l.level = level } // Trace reroutes go-nfs Trace messages to Intercept -func (l *logIntercepter) Trace(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Trace(args ...interface{}) { + if l.level < nfs.DebugLevel { + return + } + l.logPrint(fs.LogLevelDebug, args...) } -// Tracef reroutes go-nfs Tracef messages to Interceptf -func (l *logIntercepter) Tracef(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Tracef reroutes go-nfs Tracef messages to logPrintf +func (l *logger) Tracef(format string, args ...interface{}) { + // FIXME BODGE ... the real fix is probably https://github.com/willscott/go-nfs/pull/28 + // This comes from `Log.Tracef("request: %v", w.req)` in conn.go + // DEBUG : nfs: request: RPC #3285799202 (mount.Umnt) + argsS := fmt.Sprint(args...) + if strings.Contains(argsS, "mount.Umnt") { + onUnmount() + } + if l.level < nfs.DebugLevel { + return + } + l.logPrintf(fs.LogLevelDebug, format, args...) } // Warn reroutes go-nfs Warn messages to Intercept -func (l *logIntercepter) Warn(args ...interface{}) { - l.Intercept(args...) +func (l *logger) Warn(args ...interface{}) { + if l.level < nfs.WarnLevel { + return + } + l.logPrint(fs.LogLevelNotice, args...) } -// Warnf reroutes go-nfs Warnf messages to Interceptf -func (l *logIntercepter) Warnf(format string, args ...interface{}) { - l.Interceptf(format, args...) +// Warnf reroutes go-nfs Warnf messages to logPrintf +func (l *logger) Warnf(format string, args ...interface{}) { + if l.level < nfs.WarnLevel { + return + } + l.logPrintf(fs.LogLevelNotice, format, args...) } diff --git a/cmd/serve/nfs/nfs.go b/cmd/serve/nfs/nfs.go index c30b3b755..b6819b52d 100644 --- a/cmd/serve/nfs/nfs.go +++ b/cmd/serve/nfs/nfs.go @@ -43,7 +43,7 @@ var OptionsInfo = fs.Options{{ }} func init() { - fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &opt, Options: OptionsInfo}) + fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &Opt, Options: OptionsInfo}) } type handleCache = fs.Enum[handleCacheChoices] @@ -72,16 +72,17 @@ type Options struct { HandleCacheDir string `config:"nfs_cache_dir"` // where the handle cache should be stored } -var opt Options +// Opt is the default set of serve nfs options +var Opt Options // AddFlags adds flags for serve nfs (and nfsmount) -func AddFlags(flagSet *pflag.FlagSet, Opt *Options) { +func AddFlags(flagSet *pflag.FlagSet) { flags.AddFlagsFromOptions(flagSet, "", OptionsInfo) } func init() { vfsflags.AddFlags(Command.Flags()) - AddFlags(Command.Flags(), &opt) + AddFlags(Command.Flags()) } // Run the command @@ -90,7 +91,7 @@ func Run(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1, command, args) f = cmd.NewFsSrc(args) cmd.Run(false, true, command, func() error { - s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &opt) + s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &Opt) if err != nil { return err } diff --git a/cmd/serve/nfs/server.go b/cmd/serve/nfs/server.go index baa8acdf6..47584ecd0 100644 --- a/cmd/serve/nfs/server.go +++ b/cmd/serve/nfs/server.go @@ -37,7 +37,7 @@ func NewServer(ctx context.Context, vfs *vfs.VFS, opt *Options) (s *Server, err ctx: ctx, opt: *opt, } - s.handler, err = NewHandler(vfs, opt) + s.handler, err = NewHandler(ctx, vfs, opt) if err != nil { return nil, fmt.Errorf("failed to make NFS handler: %w", err) } diff --git a/cmd/serve/proxy/proxy_test.go b/cmd/serve/proxy/proxy_test.go index 1488e7406..539468d44 100644 --- a/cmd/serve/proxy/proxy_test.go +++ b/cmd/serve/proxy/proxy_test.go @@ -6,11 +6,11 @@ import ( "crypto/rsa" "crypto/sha256" "encoding/base64" - "log" "strings" "testing" _ "github.com/rclone/rclone/backend/local" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/obscure" "github.com/stretchr/testify/assert" @@ -149,11 +149,11 @@ func TestRun(t *testing.T) { privateKey, privateKeyErr := rsa.GenerateKey(rand.Reader, 2048) if privateKeyErr != nil { - log.Fatal("error generating test private key " + privateKeyErr.Error()) + fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error()) } publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey) if privateKeyErr != nil { - log.Fatal("error generating test public key " + publicKeyError.Error()) + fs.Fatal(nil, "error generating test public key "+publicKeyError.Error()) } publicKeyString := base64.StdEncoding.EncodeToString(publicKey.Marshal()) diff --git a/cmd/serve/sftp/server.go b/cmd/serve/sftp/server.go index 96ab94aa0..843b3c640 100644 --- a/cmd/serve/sftp/server.go +++ b/cmd/serve/sftp/server.go @@ -27,6 +27,7 @@ import ( "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/env" "github.com/rclone/rclone/lib/file" + sdActivation "github.com/rclone/rclone/lib/sdactivation" "github.com/rclone/rclone/vfs" "github.com/rclone/rclone/vfs/vfscommon" "golang.org/x/crypto/ssh" @@ -266,10 +267,27 @@ func (s *server) serve() (err error) { // Once a ServerConfig has been configured, connections can be // accepted. - s.listener, err = net.Listen("tcp", s.opt.ListenAddr) + var listener net.Listener + + // In case we run in a socket-activated environment, listen on (the first) + // passed FD. + sdListeners, err := sdActivation.Listeners() if err != nil { - return fmt.Errorf("failed to listen for connection: %w", err) + return fmt.Errorf("unable to acquire listeners: %w", err) } + + if len(sdListeners) > 0 { + if len(sdListeners) > 1 { + fs.LogPrintf(fs.LogLevelWarning, nil, "more than one listener passed, ignoring all but the first.\n") + } + listener = sdListeners[0] + } else { + listener, err = net.Listen("tcp", s.opt.ListenAddr) + if err != nil { + return fmt.Errorf("failed to listen for connection: %w", err) + } + } + s.listener = listener fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr()) go s.acceptConnections() diff --git a/cmd/serve/sftp/sftp.go b/cmd/serve/sftp/sftp.go index 238e4c31e..328e7b561 100644 --- a/cmd/serve/sftp/sftp.go +++ b/cmd/serve/sftp/sftp.go @@ -115,6 +115,17 @@ directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply ` + "`--addr :2022`" + ` for example. +This also supports being run with socket activation, in which case it will +listen on the first passed FD. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command: + + systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ + +This will socket-activate rclone on the first connection to port 2222 over TCP. + Note that the default of ` + "`--vfs-cache-mode off`" + ` is fine for the rclone sftp backend, but it may not be with other SFTP clients. diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go index 622575e8d..0c5c16416 100644 --- a/cmd/serve/webdav/webdav.go +++ b/cmd/serve/webdav/webdav.go @@ -349,6 +349,7 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) { // serveDir serves a directory index at dirRemote // This is similar to serveDir in serve http. func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) { + ctx := r.Context() VFS, err := w.getVFS(r.Context()) if err != nil { http.Error(rw, "Root directory not found", http.StatusNotFound) @@ -361,7 +362,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str http.Error(rw, "Directory not found", http.StatusNotFound) return } else if err != nil { - serve.Error(dirRemote, rw, "Failed to list directory", err) + serve.Error(ctx, dirRemote, rw, "Failed to list directory", err) return } if !node.IsDir() { @@ -372,7 +373,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str dirEntries, err := dir.ReadDirAll() if err != nil { - serve.Error(dirRemote, rw, "Failed to list directory", err) + serve.Error(ctx, dirRemote, rw, "Failed to list directory", err) return } diff --git a/cmd/siginfo_bsd.go b/cmd/siginfo_bsd.go index 2f5798f53..1144459d6 100644 --- a/cmd/siginfo_bsd.go +++ b/cmd/siginfo_bsd.go @@ -3,11 +3,11 @@ package cmd import ( - "log" "os" "os/signal" "syscall" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/accounting" ) @@ -17,7 +17,7 @@ func SigInfoHandler() { signal.Notify(signals, syscall.SIGINFO) go func() { for range signals { - log.Printf("%v\n", accounting.GlobalStats()) + fs.Printf(nil, "%v\n", accounting.GlobalStats()) } }() } diff --git a/cmd/test/info/base32768.go b/cmd/test/info/base32768.go index 8f3f61343..23949a2e1 100644 --- a/cmd/test/info/base32768.go +++ b/cmd/test/info/base32768.go @@ -5,7 +5,6 @@ package info import ( "context" "fmt" - "log" "os" "path/filepath" "strings" @@ -25,7 +24,7 @@ func (r *results) checkBase32768() { n := 0 dir, err := os.MkdirTemp("", "rclone-base32768-files") if err != nil { - log.Printf("Failed to make temp dir: %v", err) + fs.Logf(nil, "Failed to make temp dir: %v", err) return } defer func() { @@ -41,7 +40,7 @@ func (r *results) checkBase32768() { fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String())) err = os.WriteFile(fileName, []byte(fileName), 0666) if err != nil { - log.Printf("write %q failed: %v", fileName, err) + fs.Logf(nil, "write %q failed: %v", fileName, err) return } n++ @@ -50,7 +49,7 @@ func (r *results) checkBase32768() { // Make a local fs fLocal, err := fs.NewFs(ctx, dir) if err != nil { - log.Printf("Failed to make local fs: %v", err) + fs.Logf(nil, "Failed to make local fs: %v", err) return } @@ -61,14 +60,14 @@ func (r *results) checkBase32768() { s = fspath.JoinRootPath(s, testDir) fRemote, err := fs.NewFs(ctx, s) if err != nil { - log.Printf("Failed to make remote fs: %v", err) + fs.Logf(nil, "Failed to make remote fs: %v", err) return } defer func() { err := operations.Purge(ctx, r.f, testDir) if err != nil { - log.Printf("Failed to purge test directory: %v", err) + fs.Logf(nil, "Failed to purge test directory: %v", err) return } }() @@ -76,7 +75,7 @@ func (r *results) checkBase32768() { // Sync local to remote err = sync.Sync(ctx, fRemote, fLocal, false) if err != nil { - log.Printf("Failed to sync remote fs: %v", err) + fs.Logf(nil, "Failed to sync remote fs: %v", err) return } @@ -86,7 +85,7 @@ func (r *results) checkBase32768() { Fsrc: fLocal, }) if err != nil { - log.Printf("Failed to check remote fs: %v", err) + fs.Logf(nil, "Failed to check remote fs: %v", err) return } diff --git a/cmd/test/info/info.go b/cmd/test/info/info.go index c2ac0977d..83bcdfee8 100644 --- a/cmd/test/info/info.go +++ b/cmd/test/info/info.go @@ -10,7 +10,6 @@ import ( "encoding/json" "fmt" "io" - "log" "os" "path" "regexp" @@ -77,7 +76,7 @@ code for each one. Run: func(command *cobra.Command, args []string) { cmd.CheckArgs(1, 1e6, command, args) if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !checkBase32768 && !all { - log.Fatalf("no tests selected - select a test or use --all") + fs.Fatalf(nil, "no tests selected - select a test or use --all") } if all { checkNormalization = true @@ -93,7 +92,7 @@ code for each one. fs.Infof(f, "Created temporary directory for test files: %s", tempDirPath) err := f.Mkdir(context.Background(), "") if err != nil { - log.Fatalf("couldn't create temporary directory: %v", err) + fs.Fatalf(nil, "couldn't create temporary directory: %v", err) } cmd.Run(false, false, command, func() error { diff --git a/cmd/test/info/internal/build_csv/main.go b/cmd/test/info/internal/build_csv/main.go index fbabe5b85..fa4b15b53 100644 --- a/cmd/test/info/internal/build_csv/main.go +++ b/cmd/test/info/internal/build_csv/main.go @@ -7,12 +7,12 @@ import ( "flag" "fmt" "io" - "log" "os" "sort" "strconv" "github.com/rclone/rclone/cmd/test/info/internal" + "github.com/rclone/rclone/fs" ) func main() { @@ -24,21 +24,21 @@ func main() { for _, fn := range args { f, err := os.Open(fn) if err != nil { - log.Fatalf("Unable to open %q: %s", fn, err) + fs.Fatalf(nil, "Unable to open %q: %s", fn, err) } var remote internal.InfoReport dec := json.NewDecoder(f) err = dec.Decode(&remote) if err != nil { - log.Fatalf("Unable to decode %q: %s", fn, err) + fs.Fatalf(nil, "Unable to decode %q: %s", fn, err) } if remote.ControlCharacters == nil { - log.Printf("Skipping remote %s: no ControlCharacters", remote.Remote) + fs.Logf(nil, "Skipping remote %s: no ControlCharacters", remote.Remote) } else { remotes = append(remotes, remote) } if err := f.Close(); err != nil { - log.Fatalf("Closing %q failed: %s", fn, err) + fs.Fatalf(nil, "Closing %q failed: %s", fn, err) } } @@ -117,11 +117,11 @@ func main() { } else { f, err := os.Create(*fOut) if err != nil { - log.Fatalf("Unable to create %q: %s", *fOut, err) + fs.Fatalf(nil, "Unable to create %q: %s", *fOut, err) } defer func() { if err := f.Close(); err != nil { - log.Fatalln("Error writing csv:", err) + fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } }() writer = f @@ -130,9 +130,9 @@ func main() { w := csv.NewWriter(writer) err := w.WriteAll(records) if err != nil { - log.Fatalln("Error writing csv:", err) + fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } else if err := w.Error(); err != nil { - log.Fatalln("Error writing csv:", err) + fs.Fatal(nil, fmt.Sprint("Error writing csv:", err)) } } diff --git a/cmd/test/makefiles/makefiles.go b/cmd/test/makefiles/makefiles.go index f48778cb0..3027d78ae 100644 --- a/cmd/test/makefiles/makefiles.go +++ b/cmd/test/makefiles/makefiles.go @@ -4,7 +4,6 @@ package makefiles import ( "io" - "log" "math" "math/rand" "os" @@ -117,7 +116,7 @@ var makefileCmd = &cobra.Command{ var size fs.SizeSuffix err := size.Set(args[0]) if err != nil { - log.Fatalf("Failed to parse size %q: %v", args[0], err) + fs.Fatalf(nil, "Failed to parse size %q: %v", args[0], err) } start := time.Now() fs.Logf(nil, "Creating %d files of size %v.", len(args[1:]), size) @@ -148,7 +147,7 @@ func commonInit() { } randSource = rand.New(rand.NewSource(seed)) if bool2int(zero)+bool2int(sparse)+bool2int(ascii)+bool2int(pattern)+bool2int(chargen) > 1 { - log.Fatal("Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen") + fs.Fatal(nil, "Can only supply one of --zero, --sparse, --ascii, --pattern or --chargen") } switch { case zero, sparse: @@ -276,12 +275,12 @@ func (d *dir) list(path string, output []string) []string { func writeFile(dir, name string, size int64) { err := file.MkdirAll(dir, 0777) if err != nil { - log.Fatalf("Failed to make directory %q: %v", dir, err) + fs.Fatalf(nil, "Failed to make directory %q: %v", dir, err) } path := filepath.Join(dir, name) fd, err := os.Create(path) if err != nil { - log.Fatalf("Failed to open file %q: %v", path, err) + fs.Fatalf(nil, "Failed to open file %q: %v", path, err) } if sparse { err = fd.Truncate(size) @@ -289,11 +288,11 @@ func writeFile(dir, name string, size int64) { _, err = io.CopyN(fd, source, size) } if err != nil { - log.Fatalf("Failed to write %v bytes to file %q: %v", size, path, err) + fs.Fatalf(nil, "Failed to write %v bytes to file %q: %v", size, path, err) } err = fd.Close() if err != nil { - log.Fatalf("Failed to close file %q: %v", path, err) + fs.Fatalf(nil, "Failed to close file %q: %v", path, err) } fs.Infof(path, "Written file size %v", fs.SizeSuffix(size)) } diff --git a/cmd/touch/touch.go b/cmd/touch/touch.go index 4290c47bd..913d052a8 100644 --- a/cmd/touch/touch.go +++ b/cmd/touch/touch.go @@ -6,7 +6,6 @@ import ( "context" "errors" "fmt" - "log" "time" "github.com/rclone/rclone/cmd" @@ -86,7 +85,7 @@ then add the ` + "`--localtime`" + ` flag. func newFsDst(args []string) (f fs.Fs, remote string) { root, remote, err := fspath.Split(args[0]) if err != nil { - log.Fatalf("Parsing %q failed: %v", args[0], err) + fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err) } if root == "" { root = "." diff --git a/cmdtest/cmdtest_test.go b/cmdtest/cmdtest_test.go index 659838959..3f65284b3 100644 --- a/cmdtest/cmdtest_test.go +++ b/cmdtest/cmdtest_test.go @@ -6,13 +6,13 @@ package cmdtest import ( - "log" "os" "os/exec" "path/filepath" "strings" "testing" + "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -26,14 +26,14 @@ func TestMain(m *testing.M) { // started by Go test => execute tests err := os.Setenv(rcloneTestMain, "true") if err != nil { - log.Fatalf("Unable to set %s: %s", rcloneTestMain, err.Error()) + fs.Fatalf(nil, "Unable to set %s: %s", rcloneTestMain, err.Error()) } os.Exit(m.Run()) } else { // started by func rcloneExecMain => call rclone main in cmdtest.go err := os.Unsetenv(rcloneTestMain) if err != nil { - log.Fatalf("Unable to unset %s: %s", rcloneTestMain, err.Error()) + fs.Fatalf(nil, "Unable to unset %s: %s", rcloneTestMain, err.Error()) } main() } @@ -47,7 +47,7 @@ const rcloneTestMain = "RCLONE_TEST_MAIN" func rcloneExecMain(env string, args ...string) (string, error) { _, found := os.LookupEnv(rcloneTestMain) if !found { - log.Fatalf("Unexpected execution path: %s is missing.", rcloneTestMain) + fs.Fatalf(nil, "Unexpected execution path: %s is missing.", rcloneTestMain) } // make a call to self to execute rclone main in a predefined environment (enters TestMain above) command := exec.Command(os.Args[0], args...) @@ -174,7 +174,7 @@ func TestCmdTest(t *testing.T) { // Test error and error output out, err = rclone("version", "--provoke-an-error") if assert.Error(t, err) { - assert.Contains(t, err.Error(), "exit status 1") + assert.Contains(t, err.Error(), "exit status 2") assert.Contains(t, out, "Error: unknown flag") } diff --git a/cmdtest/environment_test.go b/cmdtest/environment_test.go index 3f1ad8035..2c161bf49 100644 --- a/cmdtest/environment_test.go +++ b/cmdtest/environment_test.go @@ -6,7 +6,9 @@ package cmdtest import ( "os" + "regexp" "runtime" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -344,4 +346,42 @@ func TestEnvironmentVariables(t *testing.T) { env = "" out, err = rcloneEnv(env, "version", "-vv", "--use-json-log") jsonLogOK() + + // Find all the File filter lines in out and return them + parseFileFilters := func(out string) (extensions []string) { + // Match: - (^|/)[^/]*\.jpg$ + find := regexp.MustCompile(`^- \(\^\|\/\)\[\^\/\]\*\\\.(.*?)\$$`) + for _, line := range strings.Split(out, "\n") { + if m := find.FindStringSubmatch(line); m != nil { + extensions = append(extensions, m[1]) + } + } + return extensions + } + + // Make sure that multiple valued (stringArray) environment variables are handled properly + env = `` + out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif") + require.NoError(t, err) + assert.Equal(t, []string{"gif", "tif"}, parseFileFilters(out)) + + env = `RCLONE_EXCLUDE=*.jpg` + out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif") + require.NoError(t, err) + assert.Equal(t, []string{"jpg", "gif"}, parseFileFilters(out)) + + env = `RCLONE_EXCLUDE=*.jpg,*.png` + out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters", "--exclude", "*.gif", "--exclude", "*.tif") + require.NoError(t, err) + assert.Equal(t, []string{"jpg", "png", "gif", "tif"}, parseFileFilters(out)) + + env = `RCLONE_EXCLUDE="*.jpg","*.png"` + out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters") + require.NoError(t, err) + assert.Equal(t, []string{"jpg", "png"}, parseFileFilters(out)) + + env = `RCLONE_EXCLUDE="*.,,,","*.png"` + out, err = rcloneEnv(env, "version", "-vv", "--dump", "filters") + require.NoError(t, err) + assert.Equal(t, []string{",,,", "png"}, parseFileFilters(out)) } diff --git a/docs/content/_index.md b/docs/content/_index.md index 2fa03fb06..45abcfef0 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -132,6 +132,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Hetzner Storage Box" home="https://www.hetzner.com/storage/storage-box" config="/sftp/#hetzner-storage-box" >}} {{< provider name="HiDrive" home="https://www.strato.de/cloud-speicher/" config="/hidrive/" >}} {{< provider name="HTTP" home="https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol" config="/http/" >}} +{{< provider name="iCloud Drive" home="https://icloud.com/" config="/iclouddrive/" >}} {{< provider name="ImageKit" home="https://imagekit.io" config="/imagekit/" >}} {{< provider name="Internet Archive" home="https://archive.org/" config="/internetarchive/" >}} {{< provider name="Jottacloud" home="https://www.jottacloud.com/en/" config="/jottacloud/" >}} @@ -159,6 +160,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="OpenStack Swift" home="https://docs.openstack.org/swift/latest/" config="/swift/" >}} {{< provider name="Oracle Cloud Storage Swift" home="https://docs.oracle.com/en-us/iaas/integration/doc/configure-object-storage.html" config="/swift/" >}} {{< provider name="Oracle Object Storage" home="https://www.oracle.com/cloud/storage/object-storage" config="/oracleobjectstorage/" >}} +{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}} {{< provider name="ownCloud" home="https://owncloud.org/" config="/webdav/#owncloud" >}} {{< provider name="pCloud" home="https://www.pcloud.com/" config="/pcloud/" >}} {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} diff --git a/docs/content/alias.md b/docs/content/alias.md index b0fbcc32a..eb0f9ec15 100644 --- a/docs/content/alias.md +++ b/docs/content/alias.md @@ -56,10 +56,11 @@ Remote or path to alias. Can be "myremote:path/to/dir", "myremote:bucket", "myremote:" or "/local/path". remote> /mnt/storage/backup Remote config --------------------- -[remote] -remote = /mnt/storage/backup --------------------- +Configuration complete. +Options: +- type: alias +- remote: /mnt/storage/backup +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/authors.md b/docs/content/authors.md index 70d161f00..beac7491e 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -878,3 +878,25 @@ put them back in again.` >}} * Will Miles * David Seifert <16636962+SoapGentoo@users.noreply.github.com> * Fornax + * Sam Harrison + * Péter Bozsó <3806723+peterbozso@users.noreply.github.com> + * Georg Welzel + * John Oxley + * Pawel Palucha + * crystalstall + * nipil + * yuval-cloudinary <46710068+yuval-cloudinary@users.noreply.github.com> + * Mathieu Moreau + * fsantagostinobietti <6057026+fsantagostinobietti@users.noreply.github.com> + * Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com> + * Divyam <47589864+divyam234@users.noreply.github.com> + * ttionya + * quiescens + * rishi.sridhar + * Lawrence Murray + * Leandro Piccilli + * Benjamin Legrand + * Noam Ross + * lostb1t + * Matthias Gatto + * André Tran diff --git a/docs/content/azureblob.md b/docs/content/azureblob.md index 58c3f1f05..f95f1299a 100644 --- a/docs/content/azureblob.md +++ b/docs/content/azureblob.md @@ -40,12 +40,13 @@ key> base64encodedkey== Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = account_name -key = base64encodedkey== -endpoint = --------------------- +Configuration complete. +Options: +- type: azureblob +- account: account_name +- key: base64encodedkey== +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -179,6 +180,13 @@ If the resource has multiple user-assigned identities you will need to unset `env_auth` and set `use_msi` instead. See the [`use_msi` section](#use_msi). +If you are operating in disconnected clouds, or private clouds such as +Azure Stack you may want to set `disable_instance_discovery = true`. +This determines whether rclone requests Microsoft Entra instance +metadata from `https://login.microsoft.com/` before authenticating. +Setting this to `true` will skip this request, making you responsible +for ensuring the configured authority is valid and trustworthy. + ##### Env Auth: 3. Azure CLI credentials (as used by the az tool) Credentials created with the `az` tool can be picked up using `env_auth`. @@ -289,6 +297,16 @@ be explicitly specified using exactly one of the `msi_object_id`, If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is set, this is is equivalent to using `env_auth`. +#### Azure CLI tool `az` {#use_az} + +Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/) +as the sole means of authentication. + +Setting this can be useful if you wish to use the `az` CLI on a host with +a System Managed Identity that you do not want to use. + +Don't set `env_auth` at the same time. + #### Anonymous {#anonymous} If you want to access resources with public anonymous access then set diff --git a/docs/content/b2.md b/docs/content/b2.md index 28f31c240..6f73e0219 100644 --- a/docs/content/b2.md +++ b/docs/content/b2.md @@ -43,12 +43,13 @@ key> 0123456789abcdef0123456789abcdef0123456789 Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = 123456789abc -key = 0123456789abcdef0123456789abcdef0123456789 -endpoint = --------------------- +Configuration complete. +Options: +- type: b2 +- account: 123456789abc +- key: 0123456789abcdef0123456789abcdef0123456789 +- endpoint: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/bisync.md b/docs/content/bisync.md index c51030df1..94a51fd9e 100644 --- a/docs/content/bisync.md +++ b/docs/content/bisync.md @@ -968,12 +968,15 @@ that while concurrent bisync runs are allowed, _be very cautious_ that there is no overlap in the trees being synched between concurrent runs, lest there be replicated files, deleted files and general mayhem. -### Return codes +### Exit codes `rclone bisync` returns the following codes to calling program: - `0` on a successful run, - `1` for a non-critical failing run (a rerun may be successful), -- `2` for a critically aborted run (requires a `--resync` to recover). +- `2` on syntax or usage error, +- `7` for a critically aborted run (requires a `--resync` to recover). + +See also the section about [exit codes](/docs/#exit-code) in main docs. ### Graceful Shutdown @@ -1812,6 +1815,12 @@ about _Unison_ and synchronization in general. ## Changelog +### `v1.68` +* Fixed an issue affecting backends that round modtimes to a lower precision. + +### `v1.67` +* Added integration tests against all backends. + ### `v1.66` * Copies and deletes are now handled in one operation instead of two * `--track-renames` and `--backup-dir` are now supported diff --git a/docs/content/box.md b/docs/content/box.md index 512ff59bd..2c70d4c4d 100644 --- a/docs/content/box.md +++ b/docs/content/box.md @@ -68,12 +68,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} --------------------- +Configuration complete. +Options: +- type: box +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"XXX"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -156,11 +157,11 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = box -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2017-07-08T23:40:08.059167677+01:00"} +Keep this "remote" remote? Edit remote Value "client_id" = "" Edit? (y/n)> @@ -188,11 +189,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = box -token = {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} --------------------- +Configuration complete. +Options: +- type: box +- token: {"access_token":"YYY","token_type":"bearer","refresh_token":"YYY","expiry":"2017-07-23T12:22:29.259137901+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 094baf605..3f04c6746 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -5,6 +5,146 @@ description: "Rclone Changelog" # Changelog +## v1.68.1 - 2024-09-24 + +[See commits](https://github.com/rclone/rclone/compare/v1.68.0...v1.68.1) + +* Bug Fixes + * build: Fix docker release build (ttionya) + * doc fixes (Nick Craig-Wood, Pawel Palucha) + * fs + * Fix `--dump filters` not always appearing (Nick Craig-Wood) + * Fix setting `stringArray` config values from environment variables (Nick Craig-Wood) + * rc: Fix default value of `--metrics-addr` (Nick Craig-Wood) + * serve docker: Add missing `vfs-read-chunk-streams` option in docker volume driver (Divyam) +* Onedrive + * Fix spurious "Couldn't decode error response: EOF" DEBUG (Nick Craig-Wood) +* Pikpak + * Fix login issue where token retrieval fails (wiserain) +* S3 + * Fix rclone ignoring static credentials when `env_auth=true` (Nick Craig-Wood) + +## v1.68.0 - 2024-09-08 + +[See commits](https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0) + +* New backends + * [Files.com](/filescom) (Sam Harrison) + * [Gofile](/gofile/) (Nick Craig-Wood) + * [Pixeldrain](/pixeldrain/) (Fornax) +* Changed backends + * [S3](/s3/) backend updated to use [AWS SDKv2](https://github.com/aws/aws-sdk-go-v2) as v1 is now unsupported. + * The matrix of providers and auth methods is huge and there could be problems with obscure combinations. + * Please report problems in a [new issue](https://github.com/rclone/rclone/issues/new/choose) on Github. +* New commands + * [config encryption](/commands/rclone_config_encryption/): set, remove and check to manage config file encryption (Nick Craig-Wood) +* New Features + * build + * Update to go1.23 and make go1.21 the minimum required version (Nick Craig-Wood) + * Update all dependencies (Nick Craig-Wood) + * Disable wasm/js build due to [go bug #64856](https://github.com/golang/go/issues/64856) (Nick Craig-Wood) + * Enable custom linting rules with ruleguard via gocritic (albertony) + * Update logging statements to make `--use-json-log` work always (albertony) + * Adding new code quality tests and fixing the fallout (albertony) + * config + * Internal config re-organised to be more consistent and make it available from the rc (Nick Craig-Wood) + * Avoid remotes with empty names from the environment (albertony) + * Make listing of remotes more consistent (albertony) + * Make getting config values more consistent (albertony) + * Use `--password-command` to set config file password if supplied (Nick Craig-Wood) + * doc fixes (albertony, crystalstall, David Seifert, Eng Zer Jun, Ernie Hershey, Florian Klink, John Oxley, kapitainsky, Mathieu Moreau, Nick Craig-Wood, nipil, Pétr Bozsó, Russ Bubley, Sam Harrison, Thearas, URenko, Will Miles, yuval-cloudinary) + * fs: Allow semicolons as well as spaces in `--bwlimit` timetable parsing (Kyle Reynolds) + * help + * Global flags help command now takes glob filter (albertony) + * Make help command output less distracting (albertony) + * lib/encoder: Add Raw encoding for use where no encoding at all is required, eg `--local-encoding Raw` (URenko) + * listremotes: Added options for filtering, ordering and json output (albertony) + * nfsmount + * Make the `--sudo` flag work for umount as well as mount (Nick Craig-Wood) + * Add `-o tcp` option to NFS mount options to fix mounting under Linux (Nick Craig-Wood) + * operations: copy: generate stable partial suffix (Georg Welzel) + * rc + * Add [options/info](/rc/#options-info) call to enumerate options (Nick Craig-Wood) + * Add option blocks parameter to [options/get](/rc/#options-get) and [options/info](/rc/#options-info) (Nick Craig-Wood) + * Add [vfs/queue](/rc/#vfs-queue) to show the status of the upload queue (Nick Craig-Wood) + * Add [vfs/queue-set-expiry](/rc/#vfs-queue-set-expiry) to adjust expiry of items in the VFS queue (Nick Craig-Wood) + * Add `--unix-socket` option to `rc` command (Florian Klink) + * Prevent unmount rc command from sending a `STOPPING=1` sd-notify message (AThePeanut4) + * rcserver: Implement [prometheus metrics](/docs/#metrics) on a dedicated port (Oleg Kunitsyn) + * serve dlna + * Also look at "Subs" subdirectory (Florian Klink) + * Don't swallow `video.{idx,sub}` (Florian Klink) + * Set more correct mime type (Florian Klink) + * serve nfs + * Implement on disk cache for file handles selected with `--nfs-cache-type` (Nick Craig-Wood) + * Add tracing to filesystem calls (Nick Craig-Wood) + * Mask unimplemented error from chmod (Nick Craig-Wood) + * Unify the nfs library logging with rclone's logging better (Nick Craig-Wood) + * Fix incorrect user id and group id exported to NFS (Nick Craig-Wood) + * serve s3 + * Implement `--auth-proxy` (Sawjan Gurung) + * Update to AWS SDKv2 by updating `github.com/rclone/gofakes3` (Nick Craig-Wood) +* Bug Fixes + * bisync: Fix sync time problems with backends that round time (eg Dropbox) (nielash) + * serve dlna: Fix panic: invalid argument to Int63n (Nick Craig-Wood) +* VFS + * Add [--vfs-read-chunk-streams](/commands/rclone_mount/#vfs-read-chunk-streams-0-1) to parallel read chunks from files (Nick Craig-Wood) + * This can increase mount performance on high bandwidth or large latency links + * Fix cache encoding with special characters (URenko) +* Local + * Fix encoding of root path fix (URenko) + * Add server-side copy (using clone) with xattrs on macOS (nielash) + * `--local-no-clone` flag to disable cloning for server-side copies (nielash) + * Support setting custom `--metadata` during server-side Copy (nielash) +* Azure Blob + * Allow anonymous access for public resources (Nick Craig-Wood) +* B2 + * Include custom upload headers in large file info (Pat Patterson) +* Drive + * Fix copying Google Docs to a backend which only supports SHA1 (Nick Craig-Wood) +* Fichier + * Fix detection of Flood Detected error (Nick Craig-Wood) + * Fix server side move (Nick Craig-Wood) +* HTTP + * Reload client certificates on expiry (Saleh Dindar) + * Support listening on passed FDs (Florian Klink) +* Jottacloud + * Fix setting of metadata on server side move (albertony) +* Onedrive + * Fix nil pointer error when uploading small files (Nick Craig-Wood) +* Pcloud + * Implement `SetModTime` (Georg Welzel) + * Implement `OpenWriterAt` feature to enable multipart uploads (Georg Welzel) +* Pikpak + * Improve data consistency by ensuring async tasks complete (wiserain) + * Implement custom hash to replace wrong sha1 (wiserain) + * Fix error with `copyto` command (wiserain) + * Optimize file move by removing unnecessary `readMetaData()` call (wiserain) + * Non-buffered hash calculation for local source files (wiserain) + * Optimize upload by pre-fetching gcid from API (wiserain) + * Correct file transfer progress for uploads by hash (wiserain) + * Update to using AWS SDK v2 (wiserain) +* S3 + * Update to using AWS SDK v2 (Nick Craig-Wood) + * Add `--s3-sdk-log-mode` to control SDKv2 debugging (Nick Craig-Wood) + * Fix incorrect region for Magalu provider (Filipe Herculano) + * Allow restoring from intelligent-tiering storage class (Pawel Palucha) +* SFTP + * Use `uint32` for mtime to save memory (Tomasz Melcer) + * Ignore useless errors when closing the connection pool (Nick Craig-Wood) + * Support listening on passed FDs (Florian Klink) +* Swift + * Add workarounds for bad listings in Ceph RGW (Paul Collins) + * Add total/free space info in `about` command. (fsantagostinobietti) +* Ulozto + * Fix upload of > 2GB files on 32 bit platforms (Tobias Markus) +* WebDAV + * Add `--webdav-unix-socket-path` to connect to a unix socket (Florian Klink) +* Yandex + * Implement custom user agent to help with upload speeds (Sebastian Bünger) +* Zoho + * Fix inefficiencies uploading with new API to avoid throttling (Nick Craig-Wood) + ## v1.67.0 - 2024-06-14 [See commits](https://github.com/rclone/rclone/compare/v1.66.0...v1.67.0) diff --git a/docs/content/combine.md b/docs/content/combine.md index 15d0154e4..d6f05ca79 100644 --- a/docs/content/combine.md +++ b/docs/content/combine.md @@ -84,11 +84,11 @@ Embedded spaces can be added using quotes "dir=remote:path with space" "dir2=remote2:path with space" Enter a fs.SpaceSepList value. upstreams> images=s3:imagesbucket files=drive:important/files --------------------- -[remote] -type = combine -upstreams = images=s3:imagesbucket files=drive:important/files --------------------- +Configuration complete. +Options: +- type: combine +- upstreams: images=s3:imagesbucket files=drive:important/files +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/commands/rclone.md b/docs/content/commands/rclone.md index c73ff6f5e..2a39a39f8 100644 --- a/docs/content/commands/rclone.md +++ b/docs/content/commands/rclone.md @@ -3,12 +3,11 @@ title: "rclone" description: "Show help for rclone commands, flags and backends." # autogenerated - DO NOT EDIT, instead edit the source code in cmd/ and as part of making a release run "make commanddocs" --- -## rclone +# rclone Show help for rclone commands, flags and backends. -### Synopsis - +## Synopsis Rclone syncs files to and from cloud storage providers as well as mounting them, listing them in lots of different ways. @@ -22,7 +21,7 @@ documentation, changelog and configuration walkthroughs. rclone [flags] ``` -### Options +## Options ``` --alias-description string Description of the remote @@ -123,7 +122,7 @@ rclone [flags] --box-token-url string Token server url --box-upload-cutoff SizeSuffix Cutoff for switching to multipart upload (>= 50 MiB) (default 50Mi) --buffer-size SizeSuffix In memory buffer size when reading files for each --transfer (default 16Mi) - --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable. + --bwlimit BwTimetable Bandwidth limit in KiB/s, or use suffix B|K|M|G|T|P or a full timetable --bwlimit-file BwTimetable Bandwidth limit per file in KiB/s, or use suffix B|K|M|G|T|P or a full timetable --ca-cert stringArray CA certificate used to verify servers --cache-chunk-clean-interval Duration How often should the cache perform cleanups of the chunk storage (default 1m0s) @@ -150,7 +149,7 @@ rclone [flags] --cache-writes Cache file data on writes through the FS --check-first Do all the checks before starting transfers --checkers int Number of checkers to run in parallel (default 8) - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) --chunker-chunk-size SizeSuffix Files larger than chunk size will be split in chunks (default 2Gi) --chunker-description string Description of the remote --chunker-fail-hard Choose how chunker should handle files with missing or invalid chunks @@ -161,7 +160,7 @@ rclone [flags] --color AUTO|NEVER|ALWAYS When to show colors (and other ANSI codes) AUTO|NEVER|ALWAYS (default AUTO) --combine-description string Description of the remote --combine-upstreams SpaceSepList Upstreams for combining - --compare-dest stringArray Include additional comma separated server-side paths during comparison + --compare-dest stringArray Include additional server-side paths during comparison --compress-description string Description of the remote --compress-level int GZIP compression level (-2 to 9) (default -1) --compress-mode string Compression mode (default "gzip") @@ -192,7 +191,7 @@ rclone [flags] --delete-during When synchronizing, delete files during transfer --delete-excluded Delete files on dest excluded from sync --disable string Disable a comma separated list of features (use --disable help to see a list) - --disable-http-keep-alives Disable HTTP keep-alives and use each connection once. + --disable-http-keep-alives Disable HTTP keep-alives and use each connection once --disable-http2 Disable HTTP/2 in the global transport --drive-acknowledge-abuse Set to allow files which return cannotDownloadAbusiveFile to be downloaded --drive-allow-import-name-change Allow the filetype to change when uploading Google docs @@ -288,6 +287,12 @@ rclone [flags] --filefabric-version string Version read from the file fabric --files-from stringArray Read list of source-file names from file (use - to read from stdin) --files-from-raw stringArray Read list of source-file names from file without any processing of lines (use - to read from stdin) + --filescom-api-key string The API key used to authenticate with Files.com + --filescom-description string Description of the remote + --filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --filescom-password string The password used to authenticate with Files.com (obscured) + --filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + --filescom-username string The username used to authenticate with Files.com -f, --filter stringArray Add a file filtering rule --filter-from stringArray Read file filtering patterns from a file (use - to read from stdin) --fix-case Force rename of case insensitive dest to match source @@ -336,6 +341,12 @@ rclone [flags] --gcs-token string OAuth Access Token as a JSON blob --gcs-token-url string Token server url --gcs-user-project string User project + --gofile-access-token string API Access token + --gofile-account-id string Account ID + --gofile-description string Description of the remote + --gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation) + --gofile-list-chunk int Number of items to list in each call (default 1000) + --gofile-root-folder-id string ID of the root folder --gphotos-auth-url string Auth server URL --gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --gphotos-batch-mode string Upload file batching sync|async|off (default "sync") @@ -445,6 +456,7 @@ rclone [flags] --local-description string Description of the remote --local-encoding Encoding The encoding for the backend (default Slash,Dot) --local-no-check-updated Don't check to see if the files change during upload + --local-no-clone Disable reflink cloning for server-side copies --local-no-preallocate Disable preallocation of disk space for transferred files --local-no-set-modtime Disable setting modtime --local-no-sparse Disable sparse files for multi-thread downloads @@ -498,6 +510,22 @@ rclone [flags] --metadata-include-from stringArray Read metadata include patterns from file (use - to read from stdin) --metadata-mapper SpaceSepList Program to run to transforming metadata before upload --metadata-set stringArray Add metadata key=value when uploading + --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""]) + --metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from + --metrics-baseurl string Prefix for URLs - leave blank for root + --metrics-cert string TLS PEM key (concatenation of certificate and CA certificate) + --metrics-client-ca string Client certificate authority to verify clients with + --metrics-htpasswd string A htpasswd file - if not provided no authentication is done + --metrics-key string TLS PEM Private key + --metrics-max-header-bytes int Maximum size of request header (default 4096) + --metrics-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") + --metrics-pass string Password for authentication + --metrics-realm string Realm for authentication + --metrics-salt string Password hashing salt (default "dlPL2MqE") + --metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) + --metrics-template string User-specified template + --metrics-user string User name for authentication --min-age Duration Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y (default off) --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) --modify-window Duration Max time diff to be considered the same (default 1ns) @@ -603,6 +631,10 @@ rclone [flags] --pikpak-upload-concurrency int Concurrency for multipart uploads (default 5) --pikpak-use-trash Send files to the trash instead of deleting permanently (default true) --pikpak-user string Pikpak username + --pixeldrain-api-key string API key for your pixeldrain account + --pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api") + --pixeldrain-description string Description of the remote + --pixeldrain-root-folder-id string Root of the filesystem to use (default "me") --premiumizeme-auth-url string Auth server URL --premiumizeme-client-id string OAuth Client Id --premiumizeme-client-secret string OAuth Client Secret @@ -651,12 +683,12 @@ rclone [flags] --quatrix-skip-project-folders Skip project folders in operations -q, --quiet Print as little stuff as possible --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -712,6 +744,7 @@ rclone [flags] --s3-provider string Choose your S3 provider --s3-region string Region to connect to --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-sdk-log-mode Bits Set to debug the SDK (default Off) --s3-secret-access-key string AWS Secret Access Key (password) --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 --s3-session-token string An AWS session token @@ -722,7 +755,6 @@ rclone [flags] --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-sts-endpoint string Endpoint for STS --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint @@ -732,6 +764,7 @@ rclone [flags] --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset) --s3-v2-auth If true use v2 authentication --s3-version-at Time Show file versions as they were at the specified time (default off) --s3-version-deleted Show deleted file markers when using versions @@ -852,10 +885,12 @@ rclone [flags] --swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8) --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page --swift-key string API key or password (OS_PASSWORD) --swift-leave-parts-on-error If true avoid calling abort upload on a failure --swift-no-chunk Don't chunk files during streaming upload --swift-no-large-objects Disable support for static and dynamic large objects + --swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit --swift-region string Region name - optional (OS_REGION_NAME) --swift-storage-policy string The storage policy to use when creating a new container --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) @@ -866,7 +901,7 @@ rclone [flags] --swift-user string User name to log in (OS_USERNAME) --swift-user-id string User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID) --syslog Use Syslog for logging - --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") + --syslog-facility string Facility for syslog, e.g. KERN,USER (default "DAEMON") --temp-dir string Directory rclone will use for temporary files (default "/tmp") --timeout Duration IO idle timeout (default 5m0s) --tpslimit float Limit HTTP transactions per second to this @@ -897,7 +932,7 @@ rclone [flags] --use-json-log Use json log format --use-mmap Use mmap allocator (see docs) --use-server-modtime Use server modified time instead of object metadata - --user-agent string Set the user-agent to a specified string (default "rclone/v1.67.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0") -v, --verbose count Print lots more stuff (repeat for more) -V, --version Print the version number --webdav-bearer-token string Bearer token instead of user/pass (e.g. a Macaroon) @@ -910,6 +945,7 @@ rclone [flags] --webdav-owncloud-exclude-shares Exclude ownCloud shares --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms) --webdav-pass string Password (obscured) + --webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly --webdav-url string URL of http host to connect to --webdav-user string User name --webdav-vendor string Name of the WebDAV site/service/software you are using @@ -919,6 +955,7 @@ rclone [flags] --yandex-description string Description of the remote --yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true) --yandex-token string OAuth Access Token as a JSON blob --yandex-token-url string Token server url --zoho-auth-url string Auth server URL @@ -931,7 +968,7 @@ rclone [flags] --zoho-token-url string Token server url ``` -### SEE ALSO +## See Also * [rclone about](/commands/rclone_about/) - Get quota information from the remote. * [rclone authorize](/commands/rclone_authorize/) - Remote authorization. diff --git a/docs/content/commands/rclone_about.md b/docs/content/commands/rclone_about.md index bee45538d..5ef4d9234 100644 --- a/docs/content/commands/rclone_about.md +++ b/docs/content/commands/rclone_about.md @@ -10,8 +10,7 @@ Get quota information from the remote. ## Synopsis - -`rclone about` prints quota information about a remote to standard +Prints quota information about a remote to standard output. The output is typically used, free, quota and trash contents. E.g. Typical output from `rclone about remote:` is: @@ -70,10 +69,9 @@ rclone about remote: [flags] --json Format output as JSON ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_authorize.md b/docs/content/commands/rclone_authorize.md index f13052c95..abdaff9d2 100644 --- a/docs/content/commands/rclone_authorize.md +++ b/docs/content/commands/rclone_authorize.md @@ -10,7 +10,6 @@ Remote authorization. ## Synopsis - Remote authorization. Used to authorize a remote or headless rclone from a machine with a browser - use as instructed by rclone config. @@ -32,10 +31,9 @@ rclone authorize [flags] --template string The path to a custom Go template for generating HTML responses ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_backend.md b/docs/content/commands/rclone_backend.md index def1f8bba..6708ef634 100644 --- a/docs/content/commands/rclone_backend.md +++ b/docs/content/commands/rclone_backend.md @@ -10,7 +10,6 @@ Run a backend-specific command. ## Synopsis - This runs a backend-specific command. The commands themselves (except for "help" and "features") are defined by the backends and you should see the backend docs for definitions. @@ -50,10 +49,12 @@ rclone backend remote:path [opts] [flags] -o, --option stringArray Option in the form name=value or name ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -61,9 +62,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_bisync.md b/docs/content/commands/rclone_bisync.md index 838d3501c..55fdfdea6 100644 --- a/docs/content/commands/rclone_bisync.md +++ b/docs/content/commands/rclone_bisync.md @@ -63,15 +63,17 @@ rclone bisync remote1:path1 remote2:path2 [flags] --workdir string Use custom working dir - useful for testing. (default: {WORKDIR}) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -103,9 +105,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -113,9 +115,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -142,9 +144,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_cat.md b/docs/content/commands/rclone_cat.md index 3f194f108..6ec5666a0 100644 --- a/docs/content/commands/rclone_cat.md +++ b/docs/content/commands/rclone_cat.md @@ -10,8 +10,7 @@ Concatenates any files and sends them to stdout. ## Synopsis - -rclone cat sends any files to standard output. +Sends any files to standard output. You can use it like this to output a single file @@ -59,10 +58,12 @@ rclone cat remote:path [flags] --tail int Only print the last N characters ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -89,18 +90,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_check.md b/docs/content/commands/rclone_check.md index 66f517ef9..9647280b5 100644 --- a/docs/content/commands/rclone_check.md +++ b/docs/content/commands/rclone_check.md @@ -9,7 +9,6 @@ Checks the files in the source and destination match. ## Synopsis - Checks the files in the source and destination match. It compares sizes and hashes (MD5 or SHA1) and logs a report of files that don't match. It doesn't alter the source or destination. @@ -73,18 +72,20 @@ rclone check source:path dest:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Check Options +### Check Options -Flags used for `rclone check`. +Flags used for check commands ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -111,18 +112,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_checksum.md b/docs/content/commands/rclone_checksum.md index 0bb9963fe..9f6eaab39 100644 --- a/docs/content/commands/rclone_checksum.md +++ b/docs/content/commands/rclone_checksum.md @@ -10,7 +10,6 @@ Checks the files in the destination against a SUM file. ## Synopsis - Checks that hashsums of destination files match the SUM file. It compares hashes (MD5, SHA1, etc) and logs a report of files which don't match. It doesn't alter the file system. @@ -67,10 +66,12 @@ rclone checksum sumfile dst:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -97,18 +98,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_cleanup.md b/docs/content/commands/rclone_cleanup.md index e4a8b9e8b..8502d3f72 100644 --- a/docs/content/commands/rclone_cleanup.md +++ b/docs/content/commands/rclone_cleanup.md @@ -10,7 +10,6 @@ Clean up the remote if possible. ## Synopsis - Clean up the remote if possible. Empty the trash or delete old file versions. Not supported by all remotes. @@ -25,10 +24,12 @@ rclone cleanup remote:path [flags] -h, --help help for cleanup ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -36,9 +37,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_completion.md b/docs/content/commands/rclone_completion.md index 32394e0a1..d9b7e605d 100644 --- a/docs/content/commands/rclone_completion.md +++ b/docs/content/commands/rclone_completion.md @@ -12,7 +12,6 @@ Output completion script for a given shell. ## Synopsis - Generates a shell completion script for rclone. Run with `--help` to list the supported shells. @@ -23,10 +22,9 @@ Run with `--help` to list the supported shells. -h, --help help for completion ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone completion bash](/commands/rclone_completion_bash/) - Output bash completion script for rclone. diff --git a/docs/content/commands/rclone_completion_bash.md b/docs/content/commands/rclone_completion_bash.md index 4b1cf3fb3..54af5149c 100644 --- a/docs/content/commands/rclone_completion_bash.md +++ b/docs/content/commands/rclone_completion_bash.md @@ -11,12 +11,11 @@ Output bash completion script for rclone. ## Synopsis - Generates a bash shell autocompletion script for rclone. By default, when run without any arguments, - rclone genautocomplete bash + rclone completion bash the generated script will be written to @@ -51,10 +50,9 @@ rclone completion bash [output_file] [flags] -h, --help help for bash ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. diff --git a/docs/content/commands/rclone_completion_fish.md b/docs/content/commands/rclone_completion_fish.md index 6b5ed8b64..59dfa52ad 100644 --- a/docs/content/commands/rclone_completion_fish.md +++ b/docs/content/commands/rclone_completion_fish.md @@ -11,13 +11,12 @@ Output fish completion script for rclone. ## Synopsis - Generates a fish autocompletion script for rclone. This writes to /etc/fish/completions/rclone.fish by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete fish + sudo rclone completion fish Logout and login again to use the autocompletion scripts, or source them directly @@ -40,10 +39,9 @@ rclone completion fish [output_file] [flags] -h, --help help for fish ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. diff --git a/docs/content/commands/rclone_completion_powershell.md b/docs/content/commands/rclone_completion_powershell.md index 57b5a0615..f872531a3 100644 --- a/docs/content/commands/rclone_completion_powershell.md +++ b/docs/content/commands/rclone_completion_powershell.md @@ -11,7 +11,6 @@ Output powershell completion script for rclone. ## Synopsis - Generate the autocompletion script for powershell. To load completions in your current shell session: @@ -34,10 +33,9 @@ rclone completion powershell [output_file] [flags] -h, --help help for powershell ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. diff --git a/docs/content/commands/rclone_completion_zsh.md b/docs/content/commands/rclone_completion_zsh.md index eb23fa995..a12f3aa84 100644 --- a/docs/content/commands/rclone_completion_zsh.md +++ b/docs/content/commands/rclone_completion_zsh.md @@ -11,13 +11,12 @@ Output zsh completion script for rclone. ## Synopsis - Generates a zsh autocompletion script for rclone. This writes to /usr/share/zsh/vendor-completions/_rclone by default so will probably need to be run with sudo or as root, e.g. - sudo rclone genautocomplete zsh + sudo rclone completion zsh Logout and login again to use the autocompletion scripts, or source them directly @@ -40,10 +39,9 @@ rclone completion zsh [output_file] [flags] -h, --help help for zsh ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone completion](/commands/rclone_completion/) - Output completion script for a given shell. diff --git a/docs/content/commands/rclone_config.md b/docs/content/commands/rclone_config.md index e79798f8b..91b717cbe 100644 --- a/docs/content/commands/rclone_config.md +++ b/docs/content/commands/rclone_config.md @@ -25,10 +25,9 @@ rclone config [flags] -h, --help help for config ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone config create](/commands/rclone_config_create/) - Create a new remote with name, type and options. @@ -36,6 +35,7 @@ See the [global flags page](/flags/) for global options not listed here. * [rclone config disconnect](/commands/rclone_config_disconnect/) - Disconnects user from remote * [rclone config dump](/commands/rclone_config_dump/) - Dump the config file as JSON. * [rclone config edit](/commands/rclone_config_edit/) - Enter an interactive configuration session. +* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file * [rclone config file](/commands/rclone_config_file/) - Show path of configuration file in use. * [rclone config password](/commands/rclone_config_password/) - Update password in an existing remote. * [rclone config paths](/commands/rclone_config_paths/) - Show paths used for configuration, cache, temp etc. diff --git a/docs/content/commands/rclone_config_create.md b/docs/content/commands/rclone_config_create.md index 9a52085da..2832b70a8 100644 --- a/docs/content/commands/rclone_config_create.md +++ b/docs/content/commands/rclone_config_create.md @@ -10,7 +10,6 @@ Create a new remote with name, type and options. ## Synopsis - Create a new remote of `name` with `type` and options. The options should be passed in pairs of `key` `value` or as `key=value`. @@ -130,10 +129,9 @@ rclone config create name type [key value]* [flags] --state string State - use with --continue ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_delete.md b/docs/content/commands/rclone_config_delete.md index a1e740dd1..8ef2f744a 100644 --- a/docs/content/commands/rclone_config_delete.md +++ b/docs/content/commands/rclone_config_delete.md @@ -18,10 +18,9 @@ rclone config delete name [flags] -h, --help help for delete ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_disconnect.md b/docs/content/commands/rclone_config_disconnect.md index 1acdb209a..044842043 100644 --- a/docs/content/commands/rclone_config_disconnect.md +++ b/docs/content/commands/rclone_config_disconnect.md @@ -9,7 +9,6 @@ Disconnects user from remote ## Synopsis - This disconnects the remote: passed in to the cloud storage system. This normally means revoking the oauth token. @@ -27,10 +26,9 @@ rclone config disconnect remote: [flags] -h, --help help for disconnect ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_dump.md b/docs/content/commands/rclone_config_dump.md index d7f8f55d3..7a204b3ee 100644 --- a/docs/content/commands/rclone_config_dump.md +++ b/docs/content/commands/rclone_config_dump.md @@ -18,10 +18,9 @@ rclone config dump [flags] -h, --help help for dump ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_edit.md b/docs/content/commands/rclone_config_edit.md index 4fbd49124..0e988af6d 100644 --- a/docs/content/commands/rclone_config_edit.md +++ b/docs/content/commands/rclone_config_edit.md @@ -25,10 +25,9 @@ rclone config edit [flags] -h, --help help for edit ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_encryption.md b/docs/content/commands/rclone_config_encryption.md new file mode 100644 index 000000000..b7c552ee6 --- /dev/null +++ b/docs/content/commands/rclone_config_encryption.md @@ -0,0 +1,30 @@ +--- +title: "rclone config encryption" +description: "set, remove and check the encryption for the config file" +# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/ and as part of making a release run "make commanddocs" +--- +# rclone config encryption + +set, remove and check the encryption for the config file + +## Synopsis + +This command sets, clears and checks the encryption for the config file using +the subcommands below. + + +## Options + +``` + -h, --help help for encryption +``` + +See the [global flags page](/flags/) for global options not listed here. + +## See Also + +* [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. +* [rclone config encryption check](/commands/rclone_config_encryption_check/) - Check that the config file is encrypted +* [rclone config encryption remove](/commands/rclone_config_encryption_remove/) - Remove the config file encryption password +* [rclone config encryption set](/commands/rclone_config_encryption_set/) - Set or change the config file encryption password + diff --git a/docs/content/commands/rclone_config_encryption_check.md b/docs/content/commands/rclone_config_encryption_check.md new file mode 100644 index 000000000..f64c265f6 --- /dev/null +++ b/docs/content/commands/rclone_config_encryption_check.md @@ -0,0 +1,37 @@ +--- +title: "rclone config encryption check" +description: "Check that the config file is encrypted" +# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/check/ and as part of making a release run "make commanddocs" +--- +# rclone config encryption check + +Check that the config file is encrypted + +## Synopsis + +This checks the config file is encrypted and that you can decrypt it. + +It will attempt to decrypt the config using the password you supply. + +If decryption fails it will return a non-zero exit code if using +`--password-command`, otherwise it will prompt again for the password. + +If the config file is not encrypted it will return a non zero exit code. + + +``` +rclone config encryption check [flags] +``` + +## Options + +``` + -h, --help help for check +``` + +See the [global flags page](/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file + diff --git a/docs/content/commands/rclone_config_encryption_remove.md b/docs/content/commands/rclone_config_encryption_remove.md new file mode 100644 index 000000000..fa78458e2 --- /dev/null +++ b/docs/content/commands/rclone_config_encryption_remove.md @@ -0,0 +1,38 @@ +--- +title: "rclone config encryption remove" +description: "Remove the config file encryption password" +# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/remove/ and as part of making a release run "make commanddocs" +--- +# rclone config encryption remove + +Remove the config file encryption password + +## Synopsis + +Remove the config file encryption password + +This removes the config file encryption, returning it to un-encrypted. + +If `--password-command` is in use, this will be called to supply the old config +password. + +If the config was not encrypted then no error will be returned and +this command will do nothing. + + +``` +rclone config encryption remove [flags] +``` + +## Options + +``` + -h, --help help for remove +``` + +See the [global flags page](/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file + diff --git a/docs/content/commands/rclone_config_encryption_set.md b/docs/content/commands/rclone_config_encryption_set.md new file mode 100644 index 000000000..b02dff900 --- /dev/null +++ b/docs/content/commands/rclone_config_encryption_set.md @@ -0,0 +1,48 @@ +--- +title: "rclone config encryption set" +description: "Set or change the config file encryption password" +# autogenerated - DO NOT EDIT, instead edit the source code in cmd/config/encryption/set/ and as part of making a release run "make commanddocs" +--- +# rclone config encryption set + +Set or change the config file encryption password + +## Synopsis + +This command sets or changes the config file encryption password. + +If there was no config password set then it sets a new one, otherwise +it changes the existing config password. + +Note that if you are changing an encryption password using +`--password-command` then this will be called once to decrypt the +config using the old password and then again to read the new +password to re-encrypt the config. + +When `--password-command` is called to change the password then the +environment variable `RCLONE_PASSWORD_CHANGE=1` will be set. So if +changing passwords programatically you can use the environment +variable to distinguish which password you must supply. + +Alternatively you can remove the password first (with `rclone config +encryption remove`), then set it again with this command which may be +easier if you don't mind the unecrypted config file being on the disk +briefly. + + +``` +rclone config encryption set [flags] +``` + +## Options + +``` + -h, --help help for set +``` + +See the [global flags page](/flags/) for global options not listed here. + +## See Also + +* [rclone config encryption](/commands/rclone_config_encryption/) - set, remove and check the encryption for the config file + diff --git a/docs/content/commands/rclone_config_file.md b/docs/content/commands/rclone_config_file.md index 73a8d6e0b..68b4f3158 100644 --- a/docs/content/commands/rclone_config_file.md +++ b/docs/content/commands/rclone_config_file.md @@ -18,10 +18,9 @@ rclone config file [flags] -h, --help help for file ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_password.md b/docs/content/commands/rclone_config_password.md index 7d7cf73fd..6a9909ad1 100644 --- a/docs/content/commands/rclone_config_password.md +++ b/docs/content/commands/rclone_config_password.md @@ -10,7 +10,6 @@ Update password in an existing remote. ## Synopsis - Update an existing remote's password. The password should be passed in pairs of `key` `password` or as `key=password`. The `password` should be passed in in clear (unobscured). @@ -34,10 +33,9 @@ rclone config password name [key value]+ [flags] -h, --help help for password ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_paths.md b/docs/content/commands/rclone_config_paths.md index ef72f97ab..807d40259 100644 --- a/docs/content/commands/rclone_config_paths.md +++ b/docs/content/commands/rclone_config_paths.md @@ -18,10 +18,9 @@ rclone config paths [flags] -h, --help help for paths ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_providers.md b/docs/content/commands/rclone_config_providers.md index d8e636b32..d18c663ad 100644 --- a/docs/content/commands/rclone_config_providers.md +++ b/docs/content/commands/rclone_config_providers.md @@ -18,10 +18,9 @@ rclone config providers [flags] -h, --help help for providers ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_reconnect.md b/docs/content/commands/rclone_config_reconnect.md index 97265f46b..0237850d8 100644 --- a/docs/content/commands/rclone_config_reconnect.md +++ b/docs/content/commands/rclone_config_reconnect.md @@ -9,7 +9,6 @@ Re-authenticates user with remote. ## Synopsis - This reconnects remote: passed in to the cloud storage system. To disconnect the remote use "rclone config disconnect". @@ -27,10 +26,9 @@ rclone config reconnect remote: [flags] -h, --help help for reconnect ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_redacted.md b/docs/content/commands/rclone_config_redacted.md index 58221968e..e37f5d4ef 100644 --- a/docs/content/commands/rclone_config_redacted.md +++ b/docs/content/commands/rclone_config_redacted.md @@ -32,10 +32,9 @@ rclone config redacted [] [flags] -h, --help help for redacted ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_show.md b/docs/content/commands/rclone_config_show.md index 37a6f3c5c..eb1897105 100644 --- a/docs/content/commands/rclone_config_show.md +++ b/docs/content/commands/rclone_config_show.md @@ -18,10 +18,9 @@ rclone config show [] [flags] -h, --help help for show ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_touch.md b/docs/content/commands/rclone_config_touch.md index 913d895f7..8fd7a0028 100644 --- a/docs/content/commands/rclone_config_touch.md +++ b/docs/content/commands/rclone_config_touch.md @@ -18,10 +18,9 @@ rclone config touch [flags] -h, --help help for touch ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_update.md b/docs/content/commands/rclone_config_update.md index 3efc6219a..e4a160b6a 100644 --- a/docs/content/commands/rclone_config_update.md +++ b/docs/content/commands/rclone_config_update.md @@ -10,7 +10,6 @@ Update options in an existing remote. ## Synopsis - Update an existing remote's options. The options should be passed in pairs of `key` `value` or as `key=value`. @@ -130,10 +129,9 @@ rclone config update name [key value]+ [flags] --state string State - use with --continue ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_config_userinfo.md b/docs/content/commands/rclone_config_userinfo.md index 87018428f..cd6a04cdf 100644 --- a/docs/content/commands/rclone_config_userinfo.md +++ b/docs/content/commands/rclone_config_userinfo.md @@ -9,7 +9,6 @@ Prints info about logged in user of remote. ## Synopsis - This prints the details of the person logged in to the cloud storage system. @@ -25,10 +24,9 @@ rclone config userinfo remote: [flags] --json Format output as JSON ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone config](/commands/rclone_config/) - Enter an interactive configuration session. diff --git a/docs/content/commands/rclone_copy.md b/docs/content/commands/rclone_copy.md index ccb85397a..a243c2e16 100644 --- a/docs/content/commands/rclone_copy.md +++ b/docs/content/commands/rclone_copy.md @@ -9,7 +9,6 @@ Copy files from source to dest, skipping identical files. ## Synopsis - Copy the source to the destination. Does not transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. Doesn't delete files from the destination. If you @@ -87,15 +86,17 @@ rclone copy source:path dest:path [flags] -h, --help help for copy ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -127,9 +128,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -137,9 +138,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -166,18 +167,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_copyto.md b/docs/content/commands/rclone_copyto.md index 22479d13f..e2d4b05bf 100644 --- a/docs/content/commands/rclone_copyto.md +++ b/docs/content/commands/rclone_copyto.md @@ -10,7 +10,6 @@ Copy files from source to dest, skipping identical files. ## Synopsis - If source:path is a file or directory then it copies it to a file or directory named dest:path. @@ -50,15 +49,17 @@ rclone copyto source:path dest:path [flags] -h, --help help for copyto ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -90,9 +91,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -100,9 +101,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -129,18 +130,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_copyurl.md b/docs/content/commands/rclone_copyurl.md index 78d4bc6b7..061644fa3 100644 --- a/docs/content/commands/rclone_copyurl.md +++ b/docs/content/commands/rclone_copyurl.md @@ -10,7 +10,6 @@ Copy the contents of the URL supplied content to dest:path. ## Synopsis - Download a URL's content and copy it to the destination without saving it in temporary storage. @@ -56,10 +55,12 @@ rclone copyurl https://example.com dest:path [flags] --stdout Write the output to stdout rather than a file ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -67,9 +68,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_cryptcheck.md b/docs/content/commands/rclone_cryptcheck.md index bbc961449..1f3a916bd 100644 --- a/docs/content/commands/rclone_cryptcheck.md +++ b/docs/content/commands/rclone_cryptcheck.md @@ -10,10 +10,9 @@ Cryptcheck checks the integrity of an encrypted remote. ## Synopsis - -rclone cryptcheck checks a remote against a [crypted](/crypt/) remote. -This is the equivalent of running rclone [check](/commands/rclone_check/), -but able to check the checksums of the encrypted remote. +Checks a remote against a [crypted](/crypt/) remote. This is the equivalent +of running rclone [check](/commands/rclone_check/), but able to check the +checksums of the encrypted remote. For it to work the underlying remote of the cryptedremote must support some kind of checksum. @@ -76,18 +75,20 @@ rclone cryptcheck remote:path cryptedremote:path [flags] --one-way Check one way only, source files must exist on remote ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Check Options +### Check Options -Flags used for `rclone check`. +Flags used for check commands ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -114,18 +115,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_cryptdecode.md b/docs/content/commands/rclone_cryptdecode.md index 8964268fe..42691cd70 100644 --- a/docs/content/commands/rclone_cryptdecode.md +++ b/docs/content/commands/rclone_cryptdecode.md @@ -10,9 +10,8 @@ Cryptdecode returns unencrypted file names. ## Synopsis - -rclone cryptdecode returns unencrypted file names when provided with -a list of encrypted file names. List limit is 10 items. +Returns unencrypted file names when provided with a list of encrypted file +names. List limit is 10 items. If you supply the `--reverse` flag, it will return encrypted file names. @@ -37,10 +36,9 @@ rclone cryptdecode encryptedremote: encryptedfilename [flags] --reverse Reverse cryptdecode, encrypts filenames ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_dedupe.md b/docs/content/commands/rclone_dedupe.md index 8126fb600..477da82bb 100644 --- a/docs/content/commands/rclone_dedupe.md +++ b/docs/content/commands/rclone_dedupe.md @@ -10,8 +10,6 @@ Interactively find duplicate filenames and delete/rename them. ## Synopsis - - By default `dedupe` interactively finds files with duplicate names and offers to delete all but one or rename them to be different. This is known as deduping by name. @@ -130,10 +128,12 @@ rclone dedupe [mode] remote:path [flags] -h, --help help for dedupe ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -141,9 +141,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_delete.md b/docs/content/commands/rclone_delete.md index 17a326e53..542cfcd16 100644 --- a/docs/content/commands/rclone_delete.md +++ b/docs/content/commands/rclone_delete.md @@ -10,7 +10,6 @@ Remove the files in path. ## Synopsis - Remove the files in path. Unlike [purge](/commands/rclone_purge/) it obeys include/exclude filters so can be used to selectively delete files. @@ -50,10 +49,12 @@ rclone delete remote:path [flags] --rmdirs rmdirs removes empty directories but leaves root intact ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -61,9 +62,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -90,18 +91,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_deletefile.md b/docs/content/commands/rclone_deletefile.md index 37ffc0d11..17fb064d1 100644 --- a/docs/content/commands/rclone_deletefile.md +++ b/docs/content/commands/rclone_deletefile.md @@ -10,7 +10,6 @@ Remove a single file from remote. ## Synopsis - Remove a single file from remote. Unlike `delete` it cannot be used to remove a directory and it doesn't obey include/exclude filters - if the specified file exists, it will always be removed. @@ -26,10 +25,12 @@ rclone deletefile remote:path [flags] -h, --help help for deletefile ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -37,9 +38,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_gendocs.md b/docs/content/commands/rclone_gendocs.md index 43b4ffdf0..3b7bd9aaf 100644 --- a/docs/content/commands/rclone_gendocs.md +++ b/docs/content/commands/rclone_gendocs.md @@ -10,7 +10,6 @@ Output markdown docs for rclone to the directory supplied. ## Synopsis - This produces markdown docs for the rclone commands to the directory supplied. These are in a format suitable for hugo to render into the rclone.org website. @@ -25,10 +24,9 @@ rclone gendocs output_directory [flags] -h, --help help for gendocs ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_gitannex.md b/docs/content/commands/rclone_gitannex.md index bc8d096ec..39410238e 100644 --- a/docs/content/commands/rclone_gitannex.md +++ b/docs/content/commands/rclone_gitannex.md @@ -1,6 +1,8 @@ --- title: "rclone gitannex" description: "Speaks with git-annex over stdin/stdout." +aliases: + - /commands/rclone_git-annex-remote-rclone-builtin/ versionIntroduced: v1.67.0 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/gitannex/ and as part of making a release run "make commanddocs" --- @@ -91,10 +93,9 @@ rclone gitannex [flags] -h, --help help for gitannex ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_hashsum.md b/docs/content/commands/rclone_hashsum.md index 440e7b57d..4ca4f30a2 100644 --- a/docs/content/commands/rclone_hashsum.md +++ b/docs/content/commands/rclone_hashsum.md @@ -10,7 +10,6 @@ Produces a hashsum file for all the objects in the path. ## Synopsis - Produces a hash file for all the objects in the path using the hash named. The output is in the same format as the standard md5sum/sha1sum tool. @@ -59,10 +58,12 @@ rclone hashsum [ remote:path] [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -89,18 +90,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_link.md b/docs/content/commands/rclone_link.md index 65d0b2fb8..c07e2e221 100644 --- a/docs/content/commands/rclone_link.md +++ b/docs/content/commands/rclone_link.md @@ -10,8 +10,7 @@ Generate public link to file/folder. ## Synopsis -rclone link will create, retrieve or remove a public link to the given -file or folder. +Create, retrieve or remove a public link to the given file or folder. rclone link remote:path/to/file rclone link remote:path/to/folder/ @@ -45,10 +44,9 @@ rclone link remote:path [flags] --unlink Remove existing public link to file/folder ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_listremotes.md b/docs/content/commands/rclone_listremotes.md index 6e14a8115..77361752b 100644 --- a/docs/content/commands/rclone_listremotes.md +++ b/docs/content/commands/rclone_listremotes.md @@ -11,26 +11,40 @@ List all the remotes in the config file and defined in environment variables. ## Synopsis -rclone listremotes lists all the available remotes from the config file. +Lists all the available remotes from the config file, or the remotes matching +an optional filter. -When used with the `--long` flag it lists the types and the descriptions too. +Prints the result in human-readable format by default, and as a simple list of +remote names, or if used with flag `--long` a tabular format including +the remote names, types and descriptions. Using flag `--json` produces +machine-readable output instead, which always includes all attributes - including +the source (file or environment). + +Result can be filtered by a filter argument which applies to all attributes, +and/or filter flags specific for each attribute. The values must be specified +according to regular rclone filtering pattern syntax. ``` -rclone listremotes [flags] +rclone listremotes [] [flags] ``` ## Options ``` - -h, --help help for listremotes - --long Show the type and the description as well as names + --description string Filter remotes by description + -h, --help help for listremotes + --json Format output as JSON + --long Show type and description in addition to name + --name string Filter remotes by name + --order-by string Instructions on how to order the result, e.g. 'type,name=descending' + --source string Filter remotes by source, e.g. 'file' or 'environment' + --type string Filter remotes by type ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_ls.md b/docs/content/commands/rclone_ls.md index cf1ee9de6..54ad257f1 100644 --- a/docs/content/commands/rclone_ls.md +++ b/docs/content/commands/rclone_ls.md @@ -9,7 +9,6 @@ List the objects in the path with size and path. ## Synopsis - Lists the objects in the source path to standard output in a human readable format with size and path. Recurses by default. @@ -55,10 +54,12 @@ rclone ls remote:path [flags] -h, --help help for ls ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -85,18 +86,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_lsd.md b/docs/content/commands/rclone_lsd.md index 60a79b6dd..90fafb28d 100644 --- a/docs/content/commands/rclone_lsd.md +++ b/docs/content/commands/rclone_lsd.md @@ -9,7 +9,6 @@ List all directories/containers/buckets in the path. ## Synopsis - Lists the directories in the source path to standard output. Does not recurse by default. Use the `-R` flag to recurse. @@ -66,10 +65,12 @@ rclone lsd remote:path [flags] -R, --recursive Recurse into the listing ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -96,18 +97,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_lsf.md b/docs/content/commands/rclone_lsf.md index c7311af01..a02014a66 100644 --- a/docs/content/commands/rclone_lsf.md +++ b/docs/content/commands/rclone_lsf.md @@ -10,7 +10,6 @@ List directories and objects in remote:path formatted for parsing. ## Synopsis - List the contents of the source path (directories and objects) to standard output in a form which is easy to parse by scripts. By default this will just be the names of the objects and directories, @@ -163,10 +162,12 @@ rclone lsf remote:path [flags] -t, --time-format string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -193,18 +194,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_lsjson.md b/docs/content/commands/rclone_lsjson.md index f63e76b15..48ea8b155 100644 --- a/docs/content/commands/rclone_lsjson.md +++ b/docs/content/commands/rclone_lsjson.md @@ -12,7 +12,7 @@ List directories and objects in the path in JSON format. List directories and objects in the path in JSON format. -The output is an array of Items, where each Item looks like this +The output is an array of Items, where each Item looks like this: { "Hashes" : { @@ -34,44 +34,50 @@ The output is an array of Items, where each Item looks like this "Tier" : "hot", } -If `--hash` is not specified, the Hashes property will be omitted. The -types of hash can be specified with the `--hash-type` parameter (which -may be repeated). If `--hash-type` is set then it implies `--hash`. +The exact set of properties included depends on the backend: -If `--no-modtime` is specified then ModTime will be blank. This can -speed things up on remotes where reading the ModTime takes an extra -request (e.g. s3, swift). +- The property IsBucket will only be included for bucket-based remotes, and only + for directories that are buckets. It will always be omitted when value is not true. +- Properties Encrypted and EncryptedPath will only be included for encrypted + remotes, and (as mentioned below) only if the `--encrypted` option is set. -If `--no-mimetype` is specified then MimeType will be blank. This can -speed things up on remotes where reading the MimeType takes an extra -request (e.g. s3, swift). +Different options may also affect which properties are included: -If `--encrypted` is not specified the Encrypted will be omitted. +- If `--hash` is not specified, the Hashes property will be omitted. The + types of hash can be specified with the `--hash-type` parameter (which + may be repeated). If `--hash-type` is set then it implies `--hash`. +- If `--no-modtime` is specified then ModTime will be blank. This can + speed things up on remotes where reading the ModTime takes an extra + request (e.g. s3, swift). +- If `--no-mimetype` is specified then MimeType will be blank. This can + speed things up on remotes where reading the MimeType takes an extra + request (e.g. s3, swift). +- If `--encrypted` is not specified the Encrypted and EncryptedPath + properties will be omitted - even for encrypted remotes. +- If `--metadata` is set then an additional Metadata property will be + returned. This will have [metadata](/docs/#metadata) in rclone standard format + as a JSON object. -If `--dirs-only` is not specified files in addition to directories are -returned +The default is to list directories and files/objects, but this can be changed +with the following options: -If `--files-only` is not specified directories in addition to the files -will be returned. +- If `--dirs-only` is specified then directories will be returned + only, no files/objects. +- If `--files-only` is specified then files will be returned only, + no directories. -If `--metadata` is set then an additional Metadata key will be returned. -This will have metadata in rclone standard format as a JSON object. - -if `--stat` is set then a single JSON blob will be returned about the -item pointed to. This will return an error if the item isn't found. -However on bucket based backends (like s3, gcs, b2, azureblob etc) if -the item isn't found it will return an empty directory as it isn't -possible to tell empty directories from missing directories there. +If `--stat` is set then the the output is not an array of items, +but instead a single JSON blob will be returned about the item pointed to. +This will return an error if the item isn't found, however on bucket based +backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will +return an empty directory, as it isn't possible to tell empty directories +from missing directories there. The Path field will only show folders below the remote path being listed. If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt" will be "subfolder/file.txt", not "remote:path/subfolder/file.txt". When used without `--recursive` the Path will always be the same as Name. -If the directory is a bucket in a bucket-based backend, then -"IsBucket" will be set to true. This key won't be present unless it is -"true". - The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the @@ -81,7 +87,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown ("2017-05-31T16:15:57+01:00"). The whole output can be processed as a JSON blob, or alternatively it -can be processed line by line as each item is written one to a line. +can be processed line by line as each item is written on individual lines +(except with `--stat`). Any of the filtering options can be applied to this command. @@ -127,10 +134,12 @@ rclone lsjson remote:path [flags] --stat Just return the info for the pointed to file ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -157,18 +166,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_lsl.md b/docs/content/commands/rclone_lsl.md index 1fca5a44a..ba2d587ca 100644 --- a/docs/content/commands/rclone_lsl.md +++ b/docs/content/commands/rclone_lsl.md @@ -10,7 +10,6 @@ List the objects in path with modification time, size and path. ## Synopsis - Lists the objects in the source path to standard output in a human readable format with modification time, size and path. Recurses by default. @@ -56,10 +55,12 @@ rclone lsl remote:path [flags] -h, --help help for lsl ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -86,18 +87,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_md5sum.md b/docs/content/commands/rclone_md5sum.md index 76160249b..fe40b7b1e 100644 --- a/docs/content/commands/rclone_md5sum.md +++ b/docs/content/commands/rclone_md5sum.md @@ -10,7 +10,6 @@ Produces an md5sum file for all the objects in the path. ## Synopsis - Produces an md5sum file for all the objects in the path. This is in the same format as the standard md5sum tool produces. @@ -43,10 +42,12 @@ rclone md5sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -73,18 +74,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_mkdir.md b/docs/content/commands/rclone_mkdir.md index 292fa8a0e..94d6637c8 100644 --- a/docs/content/commands/rclone_mkdir.md +++ b/docs/content/commands/rclone_mkdir.md @@ -17,10 +17,12 @@ rclone mkdir remote:path [flags] -h, --help help for mkdir ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -28,9 +30,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_mount.md b/docs/content/commands/rclone_mount.md index 9da0e7213..67bd27f55 100644 --- a/docs/content/commands/rclone_mount.md +++ b/docs/content/commands/rclone_mount.md @@ -704,6 +704,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -719,6 +724,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -846,9 +875,9 @@ rclone mount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for mount @@ -864,7 +893,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -877,6 +906,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -886,10 +916,12 @@ rclone mount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -916,9 +948,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_move.md b/docs/content/commands/rclone_move.md index 79a1d3986..7c7796e06 100644 --- a/docs/content/commands/rclone_move.md +++ b/docs/content/commands/rclone_move.md @@ -10,7 +10,6 @@ Move files from source to dest. ## Synopsis - Moves the contents of the source directory to the destination directory. Rclone will error if the source and destination overlap and the remote does not support a server-side directory move operation. @@ -62,15 +61,17 @@ rclone move source:path dest:path [flags] -h, --help help for move ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -102,9 +103,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -112,9 +113,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -141,18 +142,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_moveto.md b/docs/content/commands/rclone_moveto.md index 4df6e05f3..132efe3d2 100644 --- a/docs/content/commands/rclone_moveto.md +++ b/docs/content/commands/rclone_moveto.md @@ -10,7 +10,6 @@ Move file or directory from source to dest. ## Synopsis - If source:path is a file or directory then it moves it to a file or directory named dest:path. @@ -53,15 +52,17 @@ rclone moveto source:path dest:path [flags] -h, --help help for moveto ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -93,9 +94,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -103,9 +104,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -132,18 +133,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_ncdu.md b/docs/content/commands/rclone_ncdu.md index f182aa82a..62623c59d 100644 --- a/docs/content/commands/rclone_ncdu.md +++ b/docs/content/commands/rclone_ncdu.md @@ -10,7 +10,6 @@ Explore a remote with a text based user interface. ## Synopsis - This displays a text based user interface allowing the navigation of a remote. It is most useful for answering the question - "What is using all my disk space?". @@ -63,7 +62,7 @@ These flags have the following meaning: This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for rclone remotes. It is missing lots of features at the moment -but is useful as it stands. +but is useful as it stands. Unlike ncdu it does not show excluded files. Note that it might take some time to delete big files/directories. The UI won't respond in the meantime since the deletion is done synchronously. @@ -83,10 +82,12 @@ rclone ncdu remote:path [flags] -h, --help help for ncdu ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -113,18 +114,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_nfsmount.md b/docs/content/commands/rclone_nfsmount.md index 3c53e6fd2..5230b00d9 100644 --- a/docs/content/commands/rclone_nfsmount.md +++ b/docs/content/commands/rclone_nfsmount.md @@ -705,6 +705,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -720,6 +725,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -848,16 +877,18 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfsmount --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki) --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset) --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only) + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files @@ -866,9 +897,9 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] -o, --option stringArray Option for libfuse/WinFsp (repeat if required) --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --sudo Use sudo to run the mount command as root. + --sudo Use sudo to run the mount/umount commands as root. --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -881,6 +912,7 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -890,10 +922,12 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -920,9 +954,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_obscure.md b/docs/content/commands/rclone_obscure.md index 0498ac79b..07f0ddeff 100644 --- a/docs/content/commands/rclone_obscure.md +++ b/docs/content/commands/rclone_obscure.md @@ -44,10 +44,9 @@ rclone obscure password [flags] -h, --help help for obscure ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_purge.md b/docs/content/commands/rclone_purge.md index 84413f168..ab191f57e 100644 --- a/docs/content/commands/rclone_purge.md +++ b/docs/content/commands/rclone_purge.md @@ -9,7 +9,6 @@ Remove the path and all of its contents. ## Synopsis - Remove the path and all of its contents. Note that this does not obey include/exclude filters - everything will be removed. Use the [delete](/commands/rclone_delete/) command if you want to selectively @@ -30,10 +29,12 @@ rclone purge remote:path [flags] -h, --help help for purge ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -41,9 +42,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_rc.md b/docs/content/commands/rclone_rc.md index a5259dd4a..cf54e8454 100644 --- a/docs/content/commands/rclone_rc.md +++ b/docs/content/commands/rclone_rc.md @@ -10,8 +10,6 @@ Run a command against a running rclone. ## Synopsis - - This runs a command against a running rclone. Use the `--url` flag to specify an non default URL to connect on. This can be either a ":port" which is taken to mean "http://localhost:port" or a @@ -22,6 +20,13 @@ A username and password can be passed in with `--user` and `--pass`. Note that `--rc-addr`, `--rc-user`, `--rc-pass` will be read also for `--url`, `--user`, `--pass`. +The `--unix-socket` flag can be used to connect over a unix socket like this + + # start server on /tmp/my.socket + rclone rcd --rc-addr unix:///tmp/my.socket + # Connect to it + rclone rc --unix-socket /tmp/my.socket core/stats + Arguments should be passed in as parameter=value. The result will be returned as a JSON object by default. @@ -68,21 +73,21 @@ rclone rc commands parameter [flags] ## Options ``` - -a, --arg stringArray Argument placed in the "arg" array - -h, --help help for rc - --json string Input JSON - use instead of key=value args - --loopback If set connect to this rclone instance not via HTTP - --no-output If set, don't output the JSON result - -o, --opt stringArray Option in the form name=value or name placed in the "opt" array - --pass string Password to use to connect to rclone remote control - --url string URL to connect to rclone remote control (default "http://localhost:5572/") - --user string Username to use to rclone remote control + -a, --arg stringArray Argument placed in the "arg" array + -h, --help help for rc + --json string Input JSON - use instead of key=value args + --loopback If set connect to this rclone instance not via HTTP + --no-output If set, don't output the JSON result + -o, --opt stringArray Option in the form name=value or name placed in the "opt" array + --pass string Password to use to connect to rclone remote control + --unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --url string URL to connect to rclone remote control (default "http://localhost:5572/") + --user string Username to use to rclone remote control ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_rcat.md b/docs/content/commands/rclone_rcat.md index 959ca3d14..bc96d3af6 100644 --- a/docs/content/commands/rclone_rcat.md +++ b/docs/content/commands/rclone_rcat.md @@ -10,9 +10,7 @@ Copies standard input to file on remote. ## Synopsis - -rclone rcat reads from standard input (stdin) and copies it to a -single remote file. +Reads from standard input (stdin) and copies it to a single remote file. echo "hello world" | rclone rcat remote:path/to/file ffmpeg - | rclone rcat remote:path/to/file @@ -53,10 +51,12 @@ rclone rcat remote:path [flags] --size int File size hint to preallocate (default -1) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -64,9 +64,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_rcd.md b/docs/content/commands/rclone_rcd.md index 6c64bbb0f..3750bf477 100644 --- a/docs/content/commands/rclone_rcd.md +++ b/docs/content/commands/rclone_rcd.md @@ -35,6 +35,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--rc-addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--rc-server-read-timeout` and `--rc-server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -67,6 +68,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --rc-addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--rc-template` allows a user to specify a custom markup template for HTTP @@ -142,19 +157,21 @@ rclone rcd * [flags] -h, --help help for rcd ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## RC Options +### RC Options -Flags to control the Remote Control API. +Flags to control the Remote Control API ``` --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -179,9 +196,7 @@ Flags to control the Remote Control API. --rc-web-gui-update Check and update to latest version of web gui ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_rmdir.md b/docs/content/commands/rclone_rmdir.md index ed5047176..9eb865ee1 100644 --- a/docs/content/commands/rclone_rmdir.md +++ b/docs/content/commands/rclone_rmdir.md @@ -9,7 +9,6 @@ Remove the empty directory at path. ## Synopsis - This removes empty directory given by path. Will not remove the path if it has any objects in it, not even empty subdirectories. Use command [rmdirs](/commands/rclone_rmdirs/) (or [delete](/commands/rclone_delete/) @@ -28,10 +27,12 @@ rclone rmdir remote:path [flags] -h, --help help for rmdir ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -39,9 +40,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_rmdirs.md b/docs/content/commands/rclone_rmdirs.md index d7132f756..b64d3a616 100644 --- a/docs/content/commands/rclone_rmdirs.md +++ b/docs/content/commands/rclone_rmdirs.md @@ -10,7 +10,6 @@ Remove empty directories under the path. ## Synopsis - This recursively removes any empty directories (including directories that only contain empty directories), that it finds under the path. The root path itself will also be removed if it is empty, unless @@ -42,10 +41,12 @@ rclone rmdirs remote:path [flags] --leave-root Do not remove root directory if empty ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -53,9 +54,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_selfupdate.md b/docs/content/commands/rclone_selfupdate.md index e245ed63f..b32a1ed85 100644 --- a/docs/content/commands/rclone_selfupdate.md +++ b/docs/content/commands/rclone_selfupdate.md @@ -1,6 +1,8 @@ --- title: "rclone selfupdate" description: "Update the rclone binary." +aliases: + - /commands/rclone_self-update/ versionIntroduced: v1.55 # autogenerated - DO NOT EDIT, instead edit the source code in cmd/selfupdate/ and as part of making a release run "make commanddocs" --- @@ -75,10 +77,9 @@ rclone selfupdate [flags] --version string Install the given rclone version (default: latest) ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_serve.md b/docs/content/commands/rclone_serve.md index 80d726cfa..df5c9a9de 100644 --- a/docs/content/commands/rclone_serve.md +++ b/docs/content/commands/rclone_serve.md @@ -28,10 +28,9 @@ rclone serve [opts] [flags] -h, --help help for serve ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone serve dlna](/commands/rclone_serve_dlna/) - Serve remote:path over DLNA diff --git a/docs/content/commands/rclone_serve_dlna.md b/docs/content/commands/rclone_serve_dlna.md index a121d76ac..e6ed3d6d2 100644 --- a/docs/content/commands/rclone_serve_dlna.md +++ b/docs/content/commands/rclone_serve_dlna.md @@ -21,6 +21,10 @@ based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly. +Rclone will add external subtitle files (.srt) to videos if they have the same +filename as the video file itself (except the extension), either in the same +directory as the video, or in a "Subs" subdirectory. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -265,6 +269,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -280,6 +289,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -398,8 +431,8 @@ rclone serve dlna remote:path [flags] --addr string The ip:port or :port to bind the DLNA http server to (default ":7879") --announce-interval Duration The interval between SSDP announcements (default 12m0s) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for dlna --interface stringArray The interface to use for SSDP (repeat as necessary) @@ -411,7 +444,7 @@ rclone serve dlna remote:path [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -424,6 +457,7 @@ rclone serve dlna remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -431,10 +465,12 @@ rclone serve dlna remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -461,9 +497,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_docker.md b/docs/content/commands/rclone_serve_docker.md index a5147058d..1a2e86e71 100644 --- a/docs/content/commands/rclone_serve_docker.md +++ b/docs/content/commands/rclone_serve_docker.md @@ -281,6 +281,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -296,6 +301,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -424,9 +453,9 @@ rclone serve docker [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --forget-state Skip restoring previous state --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -446,7 +475,7 @@ rclone serve docker [flags] --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -459,6 +488,7 @@ rclone serve docker [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -468,10 +498,12 @@ rclone serve docker [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -498,9 +530,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_ftp.md b/docs/content/commands/rclone_serve_ftp.md index 7c8bfa6f6..9f22e856c 100644 --- a/docs/content/commands/rclone_serve_ftp.md +++ b/docs/content/commands/rclone_serve_ftp.md @@ -262,6 +262,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -277,6 +282,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -477,8 +506,8 @@ rclone serve ftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for ftp --key string TLS PEM Private key @@ -491,7 +520,7 @@ rclone serve ftp remote:path [flags] --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication (default "anonymous") --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -505,6 +534,7 @@ rclone serve ftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -512,10 +542,12 @@ rclone serve ftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -542,9 +574,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_http.md b/docs/content/commands/rclone_serve_http.md index dc45363c2..9206d6f8a 100644 --- a/docs/content/commands/rclone_serve_http.md +++ b/docs/content/commands/rclone_serve_http.md @@ -37,6 +37,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -69,6 +70,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--template` allows a user to specify a custom markup template for HTTP @@ -364,6 +379,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -379,6 +399,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -575,15 +619,15 @@ rclone serve http remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done @@ -602,7 +646,7 @@ rclone serve http remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -616,6 +660,7 @@ rclone serve http remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -623,10 +668,12 @@ rclone serve http remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -653,9 +700,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_nfs.md b/docs/content/commands/rclone_serve_nfs.md index 6d3a264dc..f783876b4 100644 --- a/docs/content/commands/rclone_serve_nfs.md +++ b/docs/content/commands/rclone_serve_nfs.md @@ -13,34 +13,69 @@ Serve the remote as an NFS mount Create an NFS server that serves the given remote over the network. -The primary purpose for this command is to enable [mount command](/commands/rclone_mount/) on recent macOS versions where -installing FUSE is very cumbersome. +This implements an NFSv3 server to serve any rclone remote via NFS. -Since this is running on NFSv3, no authentication method is available. Any client -will be able to access the data. To limit access, you can use serve NFS on loopback address -and rely on secure tunnels (such as SSH). For this reason, by default, a random TCP port is chosen and loopback interface is used for the listening address; -meaning that it is only available to the local machine. If you want other machines to access the -NFS mount over local network, you need to specify the listening address and port using `--addr` flag. +The primary purpose for this command is to enable the [mount +command](/commands/rclone_mount/) on recent macOS versions where +installing FUSE is very cumbersome. -Modifying files through NFS protocol requires VFS caching. Usually you will need to specify `--vfs-cache-mode` -in order to be able to write to the mountpoint (full is recommended). If you don't specify VFS cache mode, -the mount will be read-only. Note also that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the caching handler. -This should not be set too low or you may experience errors when trying to access files. The default is `1000000`, but consider lowering this limit if -the server's system resource usage causes problems. +This server does not implement any authentication so any client will be +able to access the data. To limit access, you can use `serve nfs` on +the loopback address or rely on secure tunnels (such as SSH) or use +firewalling. + +For this reason, by default, a random TCP port is chosen and the +loopback interface is used for the listening address by default; +meaning that it is only available to the local machine. If you want +other machines to access the NFS mount over local network, you need to +specify the listening address and port using the `--addr` flag. + +Modifying files through the NFS protocol requires VFS caching. Usually +you will need to specify `--vfs-cache-mode` in order to be able to +write to the mountpoint (`full` is recommended). If you don't specify +VFS cache mode, the mount will be read-only. + +`--nfs-cache-type` controls the type of the NFS handle cache. By +default this is `memory` where new handles will be randomly allocated +when needed. These are stored in memory. If the server is restarted +the handle cache will be lost and connected NFS clients will get stale +handle errors. + +`--nfs-cache-type disk` uses an on disk NFS handle cache. Rclone +hashes the path of the object and stores it in a file named after the +hash. These hashes are stored on disk the directory controlled by +`--cache-dir` or the exact directory may be specified with +`--nfs-cache-dir`. Using this means that the NFS server can be +restarted at will without affecting the connected clients. + +`--nfs-cache-type symlink` is similar to `--nfs-cache-type disk` in +that it uses an on disk cache, but the cache entries are held as +symlinks. Rclone will use the handle of the underlying file as the NFS +handle which improves performance. This sort of cache can't be backed +up and restored as the underlying handles will change. This is Linux +only. + +`--nfs-cache-handle-limit` controls the maximum number of cached NFS +handles stored by the caching handler. This should not be set too low +or you may experience errors when trying to access files. The default +is `1000000`, but consider lowering this limit if the server's system +resource usage causes problems. This is only used by the `memory` type +cache. To serve NFS over the network use following command: rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full -We specify a specific port that we can use in the mount command: - -To mount the server under Linux/macOS, use the following command: +This specifies a port that can be used in the mount command. To mount +the server under Linux/macOS, use the following command: - mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint + mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint -Where `$PORT` is the same port number we used in the serve nfs command. +Where `$PORT` is the same port number used in the `serve nfs` command +and `$HOSTNAME` is the network address of the machine that `serve nfs` +was run on. -This feature is only available on Unix platforms. +This command is only available on Unix platforms. ## VFS - Virtual File System @@ -274,6 +309,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -289,6 +329,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -406,18 +470,20 @@ rclone serve nfs remote:path [flags] ``` --addr string IPaddress:Port or :Port to bind server to --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfs + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don't compare checksums on up/download --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -430,6 +496,7 @@ rclone serve nfs remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -437,10 +504,12 @@ rclone serve nfs remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -467,9 +536,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_restic.md b/docs/content/commands/rclone_serve_restic.md index 25c31c9d6..817b406dd 100644 --- a/docs/content/commands/rclone_serve_restic.md +++ b/docs/content/commands/rclone_serve_restic.md @@ -107,6 +107,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -139,6 +140,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Authentication By default this will serve files without needing a login. @@ -175,7 +190,7 @@ rclone serve restic remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root @@ -197,10 +212,9 @@ rclone serve restic remote:path [flags] --user string User name for authentication ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_s3.md b/docs/content/commands/rclone_serve_s3.md index 89f769d07..888affd37 100644 --- a/docs/content/commands/rclone_serve_s3.md +++ b/docs/content/commands/rclone_serve_s3.md @@ -147,6 +147,33 @@ metadata which will be set as the modification time of the file. Other operations will return error `Unimplemented`. +### Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or +set a single username and password with the `--user` and `--pass` flags. + +If no static users are configured by either of the above methods, and client +certificates are required by the `--client-ca` flag passed to the server, the +client certificate common name will be considered as the username. + +Use `--htpasswd /path/to/htpasswd` to provide an htpasswd file. This is +in standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use `--realm` to set the authentication realm. + +Use `--salt` to change the password hashing salt from the default. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -162,6 +189,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -194,6 +222,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ## VFS - Virtual File System This command uses the VFS layer. This adapts the cloud storage objects @@ -426,6 +468,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -441,6 +488,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -556,19 +627,21 @@ rclone serve s3 remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key + --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --etag-hash string Which hash to use for the ETag, or auto or blank for off (default "MD5") - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --force-path-style If true use path style access if false use virtual hosted style (default true) (default true) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for s3 + --htpasswd string A htpasswd file - if not provided no authentication is done --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") @@ -576,12 +649,16 @@ rclone serve s3 remote:path [flags] --no-cleanup Not to cleanup empty folder after object is deleted --no-modtime Don't read/write the modification time (can speed things up) --no-seek Don't allow seeking in files + --pass string Password for authentication --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access + --realm string Realm for authentication + --salt string Password hashing salt (default "dlPL2MqE") --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) + --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -594,6 +671,7 @@ rclone serve s3 remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -601,10 +679,12 @@ rclone serve s3 remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -631,9 +711,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_sftp.md b/docs/content/commands/rclone_serve_sftp.md index 2dc502dd4..a662547c4 100644 --- a/docs/content/commands/rclone_serve_sftp.md +++ b/docs/content/commands/rclone_serve_sftp.md @@ -43,6 +43,17 @@ directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply `--addr :2022` for example. +This also supports being run with socket activation, in which case it will +listen on the first passed FD. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command: + + systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ + +This will socket-activate rclone on the first connection to port 2222 over TCP. + Note that the default of `--vfs-cache-mode off` is fine for the rclone sftp backend, but it may not be with other SFTP clients. @@ -294,6 +305,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -309,6 +325,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -509,8 +549,8 @@ rclone serve sftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default "~/.ssh/authorized_keys") --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for sftp --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate) @@ -523,7 +563,7 @@ rclone serve sftp remote:path [flags] --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -537,6 +577,7 @@ rclone serve sftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -544,10 +585,12 @@ rclone serve sftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -574,9 +617,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_serve_webdav.md b/docs/content/commands/rclone_serve_webdav.md index 9ec439e7f..acd1d69ce 100644 --- a/docs/content/commands/rclone_serve_webdav.md +++ b/docs/content/commands/rclone_serve_webdav.md @@ -52,6 +52,19 @@ Create a new DWORD BasicAuthLevel with value 2. https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint +## Serving over a unix socket + +You can serve the webdav on a unix socket like this: + + rclone serve webdav --addr unix:///tmp/my.socket remote:path + +and connect to it like this using rclone and the webdav backend: + + rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav: + +Note that there is no authentication on http protocol - this is expected to be +done by the permissions on the socket. + ## Server options Use `--addr` to specify which IP address and port the server should @@ -67,6 +80,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. `--addr` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. `--server-read-timeout` and `--server-write-timeout` can be used to control the timeouts on the server. Note that this is the total time @@ -99,6 +113,20 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +## Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by --addr`). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the `systemd-socket-activate`command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ### Template `--template` allows a user to specify a custom markup template for HTTP @@ -394,6 +422,11 @@ These flags control the chunking: --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once + +The chunking behaves differently depending on the `--vfs-read-chunk-streams` parameter. + +### `--vfs-read-chunk-streams` == 0 Rclone will start reading a chunk of size `--vfs-read-chunk-size`, and then double the size for each read. When `--vfs-read-chunk-size-limit` is @@ -409,6 +442,30 @@ When `--vfs-read-chunk-size-limit 500M` is specified, the result would be Setting `--vfs-read-chunk-size` to `0` or "off" disables chunked reading. +The chunks will not be buffered in memory. + +### `--vfs-read-chunk-streams` > 0 + +Rclone reads `--vfs-read-chunk-streams` chunks of size +`--vfs-read-chunk-size` concurrently. The size for each read will stay +constant. + +This improves performance performance massively on high latency links +or very high bandwidth links to high performance object stores. + +Some experimentation will be needed to find the optimum values of +`--vfs-read-chunk-size` and `--vfs-read-chunk-streams` as these will +depend on the backend in use and the latency to the backend. + +For high performance object stores (eg AWS S3) a reasonable place to +start might be `--vfs-read-chunk-streams 16` and +`--vfs-read-chunk-size 4M`. In testing with AWS S3 the performance +scaled roughly as the `--vfs-read-chunk-streams` setting. + +Similar settings should work for high latency links, but depending on +the latency they may need more `--vfs-read-chunk-streams` in order to +get the throughput. + ## VFS Performance These flags may be used to enable/disable features of the VFS for @@ -605,17 +662,17 @@ rclone serve webdav remote:path [flags] ## Options ``` - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav --htpasswd string A htpasswd file - if not provided no authentication is done @@ -634,7 +691,7 @@ rclone serve webdav remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -648,6 +705,7 @@ rclone serve webdav remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached ('off' is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -655,10 +713,12 @@ rclone serve webdav remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -685,9 +745,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone serve](/commands/rclone_serve/) - Serve a remote over a protocol. diff --git a/docs/content/commands/rclone_settier.md b/docs/content/commands/rclone_settier.md index 6e1b6d196..268fd61dc 100644 --- a/docs/content/commands/rclone_settier.md +++ b/docs/content/commands/rclone_settier.md @@ -10,10 +10,9 @@ Changes storage class/tier of objects in remote. ## Synopsis - -rclone settier changes storage tier or class at remote if supported. -Few cloud storage services provides different storage classes on objects, -for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, +Changes storage tier or class at remote if supported. Few cloud storage +services provides different storage classes on objects, for example +AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline etc. Note that, certain tier changes make objects not available to access immediately. @@ -44,10 +43,9 @@ rclone settier tier remote:path [flags] -h, --help help for settier ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_sha1sum.md b/docs/content/commands/rclone_sha1sum.md index db05eb2fa..ad6450d9b 100644 --- a/docs/content/commands/rclone_sha1sum.md +++ b/docs/content/commands/rclone_sha1sum.md @@ -10,7 +10,6 @@ Produces an sha1sum file for all the objects in the path. ## Synopsis - Produces an sha1sum file for all the objects in the path. This is in the same format as the standard sha1sum tool produces. @@ -46,10 +45,12 @@ rclone sha1sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -76,18 +77,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_size.md b/docs/content/commands/rclone_size.md index c853316d9..7e241f557 100644 --- a/docs/content/commands/rclone_size.md +++ b/docs/content/commands/rclone_size.md @@ -10,7 +10,6 @@ Prints the total size and number of objects in remote:path. ## Synopsis - Counts objects in the path and calculates the total size. Prints the result to standard output. @@ -41,10 +40,12 @@ rclone size remote:path [flags] --json Format output as JSON ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -71,18 +72,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_sync.md b/docs/content/commands/rclone_sync.md index 10da45199..82aa00056 100644 --- a/docs/content/commands/rclone_sync.md +++ b/docs/content/commands/rclone_sync.md @@ -9,7 +9,6 @@ Make source and dest identical, modifying destination only. ## Synopsis - Sync the source to the destination, changing the destination only. Doesn't transfer files that are identical on source and destination, testing by size and modification time or MD5SUM. @@ -118,15 +117,17 @@ rclone sync source:path dest:path [flags] -t, --timeformat string Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05) ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Copy Options +### Copy Options -Flags for anything which can Copy a file. +Flags for anything which can copy a file ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -158,9 +159,9 @@ Flags for anything which can Copy a file. -u, --update Skip files that are newer on the destination ``` -## Sync Options +### Sync Options -Flags just used for `rclone sync`. +Flags used for sync commands ``` --backup-dir string Make backups into hierarchy based in DIR @@ -177,9 +178,9 @@ Flags just used for `rclone sync`. --track-renames-strategy string Strategies to use when synchronizing using track-renames hash|modtime|leaf (default "hash") ``` -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -187,9 +188,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -216,18 +217,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_test.md b/docs/content/commands/rclone_test.md index 8af7509bf..0aaddb775 100644 --- a/docs/content/commands/rclone_test.md +++ b/docs/content/commands/rclone_test.md @@ -28,10 +28,9 @@ so reading their documentation first is recommended. -h, --help help for test ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. * [rclone test changenotify](/commands/rclone_test_changenotify/) - Log any change notify requests for the remote passed in. diff --git a/docs/content/commands/rclone_test_changenotify.md b/docs/content/commands/rclone_test_changenotify.md index 92c5179ca..1efc25554 100644 --- a/docs/content/commands/rclone_test_changenotify.md +++ b/docs/content/commands/rclone_test_changenotify.md @@ -19,10 +19,9 @@ rclone test changenotify remote: [flags] --poll-interval Duration Time to wait between polling for changes (default 10s) ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_test_histogram.md b/docs/content/commands/rclone_test_histogram.md index 6a70b7059..b3b3088ab 100644 --- a/docs/content/commands/rclone_test_histogram.md +++ b/docs/content/commands/rclone_test_histogram.md @@ -27,10 +27,9 @@ rclone test histogram [remote:path] [flags] -h, --help help for histogram ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_test_info.md b/docs/content/commands/rclone_test_info.md index 2cfe34855..2a9ccf16f 100644 --- a/docs/content/commands/rclone_test_info.md +++ b/docs/content/commands/rclone_test_info.md @@ -10,10 +10,10 @@ Discovers file name or other limitations for paths. ## Synopsis -rclone info discovers what filenames and upload methods are possible -to write to the paths passed in and how long they can be. It can take some -time. It will write test files into the remote:path passed in. It outputs -a bit of go code for each one. +Discovers what filenames and upload methods are possible to write to the +paths passed in and how long they can be. It can take some time. It will +write test files into the remote:path passed in. It outputs a bit of go +code for each one. **NB** this can create undeletable files and other hazards - use with care @@ -37,10 +37,9 @@ rclone test info [remote:path]+ [flags] --write-json string Write results to file ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_test_makefile.md b/docs/content/commands/rclone_test_makefile.md index a0dd91ec6..82e5da0bb 100644 --- a/docs/content/commands/rclone_test_makefile.md +++ b/docs/content/commands/rclone_test_makefile.md @@ -24,10 +24,9 @@ rclone test makefile []+ [flags] --zero Fill files with ASCII 0x00 ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_test_makefiles.md b/docs/content/commands/rclone_test_makefiles.md index be1eb0f21..237ba1c9d 100644 --- a/docs/content/commands/rclone_test_makefiles.md +++ b/docs/content/commands/rclone_test_makefiles.md @@ -31,10 +31,9 @@ rclone test makefiles [flags] --zero Fill files with ASCII 0x00 ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_test_memory.md b/docs/content/commands/rclone_test_memory.md index c79aaea3d..50b985824 100644 --- a/docs/content/commands/rclone_test_memory.md +++ b/docs/content/commands/rclone_test_memory.md @@ -18,10 +18,9 @@ rclone test memory remote:path [flags] -h, --help help for memory ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone test](/commands/rclone_test/) - Run a test command diff --git a/docs/content/commands/rclone_touch.md b/docs/content/commands/rclone_touch.md index 1368ad294..f1725579f 100644 --- a/docs/content/commands/rclone_touch.md +++ b/docs/content/commands/rclone_touch.md @@ -10,7 +10,6 @@ Create new file or change file modification time. ## Synopsis - Set the modification time on file(s) as specified by remote:path to have the current time. @@ -46,10 +45,12 @@ rclone touch remote:path [flags] -t, --timestamp string Use specified time instead of the current time of day ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Important Options +### Important Options -Important flags useful for most commands. +Important flags useful for most commands ``` -n, --dry-run Do a trial run with no permanent changes @@ -57,9 +58,9 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) ``` -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -86,18 +87,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_tree.md b/docs/content/commands/rclone_tree.md index 43bb3a087..0460b61b3 100644 --- a/docs/content/commands/rclone_tree.md +++ b/docs/content/commands/rclone_tree.md @@ -10,9 +10,7 @@ List the contents of the remote in a tree like fashion. ## Synopsis - -rclone tree lists the contents of a remote in a similar way to the -unix tree command. +Lists the contents of a remote in a similar way to the unix tree command. For example @@ -67,10 +65,12 @@ rclone tree remote:path [flags] --version Sort files alphanumerically by version ``` +Options shared with other commands are described next. +See the [global flags page](/flags/) for global options not listed here. -## Filter Options +### Filter Options -Flags for filtering directory listings. +Flags for filtering directory listings ``` --delete-excluded Delete files on dest excluded from sync @@ -97,18 +97,16 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) ``` -## Listing Options +### Listing Options -Flags for listing directories. +Flags for listing directories ``` --default-time Time Time to show if modtime is unknown for files and directories (default 2000-01-01T00:00:00Z) --fast-list Use recursive list if available; uses more memory but fewer transactions ``` -See the [global flags page](/flags/) for global options not listed here. - -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/commands/rclone_version.md b/docs/content/commands/rclone_version.md index 0896cda13..127ded349 100644 --- a/docs/content/commands/rclone_version.md +++ b/docs/content/commands/rclone_version.md @@ -10,7 +10,6 @@ Show the version number. ## Synopsis - Show the rclone version number, the go version, the build target OS and architecture, the runtime OS and kernel version and bitness, build tags and the type of executable (static or dynamic). @@ -60,10 +59,9 @@ rclone version [flags] -h, --help help for version ``` - See the [global flags page](/flags/) for global options not listed here. -# SEE ALSO +## See Also * [rclone](/commands/rclone/) - Show help for rclone commands, flags and backends. diff --git a/docs/content/docs.md b/docs/content/docs.md index 57aff00e9..cb05455b6 100644 --- a/docs/content/docs.md +++ b/docs/content/docs.md @@ -50,8 +50,10 @@ See the following for detailed instructions for * [Google Photos](/googlephotos/) * [Hasher](/hasher/) - to handle checksums for other remotes * [HDFS](/hdfs/) + * [Hetzner Storage Box](/sftp/#hetzner-storage-box) * [HiDrive](/hidrive/) * [HTTP](/http/) + * [iCloud Drive](/iclouddrive/) * [Internet Archive](/internetarchive/) * [Jottacloud](/jottacloud/) * [Koofr](/koofr/) @@ -73,6 +75,7 @@ See the following for detailed instructions for * [Proton Drive](/protondrive/) * [QingStor](/qingstor/) * [Quatrix by Maytech](/quatrix/) + * [rsync.net](/sftp/#rsync-net) * [Seafile](/seafile/) * [SFTP](/sftp/) * [Sia](/sia/) @@ -1345,11 +1348,12 @@ flag set) such as: - local - ftp - sftp +- pcloud Without `--inplace` (the default) rclone will first upload to a temporary file with an extension like this, where `XXXXXX` represents a -random string and `.partial` is [--partial-suffix](#partial-suffix) value -(`.partial` by default). +hash of the source file's fingerprint and `.partial` is +[--partial-suffix](#partial-suffix) value (`.partial` by default). original-file-name.XXXXXX.partial @@ -1921,11 +1925,12 @@ Suffix length limit is 16 characters. The default is `.partial`. -### --password-command SpaceSepList ### +### --password-command SpaceSepList {#password-command} This flag supplies a program which should supply the config password when run. This is an alternative to rclone prompting for the password -or setting the `RCLONE_CONFIG_PASS` variable. +or setting the `RCLONE_CONFIG_PASS` variable. It is also used when +setting the config password for the first time. The argument to this should be a command with a space separated list of arguments. If one of the arguments has a space in then enclose it @@ -1939,6 +1944,11 @@ Eg --password-command 'echo "hello with space"' --password-command 'echo "hello with ""quotes"" and space"' +Note that when changing the configuration password the environment +variable `RCLONE_PASSWORD_CHANGE=1` will be set. This can be used to +distinguish initial decryption of the config file from the new +password. + See the [Configuration Encryption](#configuration-encryption) for more info. See a [Windows PowerShell example on the Wiki](https://github.com/rclone/rclone/wiki/Windows-Powershell-use-rclone-password-command-for-Config-file-password). @@ -2542,6 +2552,12 @@ encryption from your configuration. There is no way to recover the configuration if you lose your password. +You can also use + +- [rclone config encryption set](/commands/rclone_config_encryption_set/) to set the config encryption directly +- [rclone config encryption remove](/commands/rclone_config_encryption_remove/) to remove it +- [rclone config encryption check](/commands/rclone_config_encryption_check/) to check that it is encrypted properly. + rclone uses [nacl secretbox](https://godoc.org/golang.org/x/crypto/nacl/secretbox) which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate your configuration with secret-key cryptography. @@ -2574,7 +2590,7 @@ An alternate means of supplying the password is to provide a script which will retrieve the password and print on standard output. This script should have a fully specified path name and not rely on any environment variables. The script is supplied either via -`--password-command="..."` command line argument or via the +[`--password-command="..."`](#password-command) command line argument or via the `RCLONE_PASSWORD_COMMAND` environment variable. One useful example of this is using the `passwordstore` application @@ -2617,6 +2633,57 @@ general, but are used without referencing a stored remote, e.g. listing local filesystem paths, or [connection strings](#connection-strings): `rclone --config="" ls .` +Configuration Encryption Cheatsheet +----------------------------------- +You can quickly apply a configuration encryption without plain-text +at rest or transfer. Detailed instructions for popular OSes: + +### Mac ### + +* Generate and store a password + +`security add-generic-password -a rclone -s config -w $(openssl rand -base64 40)` + +* Add the retrieval instruction to your .zprofile / .profile + +`export RCLONE_PASSWORD_COMMAND="/usr/bin/security find-generic-password -a rclone -s config -w"` + +### Linux ### + +* Prerequisite + +Linux doesn't come with a default password manager. Let's install +the "pass" utility using a package manager, e.g. `apt install pass`, + `yum install pass`, + [etc.](https://www.passwordstore.org/#download); then initialize a + password store: + +`pass init rclone` + +* Generate and store a password + +`echo $(openssl rand -base64 40) | pass insert -m rclone/config` + +* Add the retrieval instruction + +`export RCLONE_PASSWORD_COMMAND="/usr/bin/pass rclone/config"` + +### Windows ### + +* Generate and store a password + +`New-Object -TypeName PSCredential -ArgumentList "rclone", (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path "rclone-credential.xml"` + +* Add the password retrieval instruction + +`[Environment]::SetEnvironmentVariable("RCLONE_PASSWORD_COMMAND", "[System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path "rclone-credential.xml").Password))")` + +### Encrypt the config file (all systems) ### + +* Execute `rclone config` -> `s` + +* Add/update the password from previous steps + Developer options ----------------- @@ -2770,6 +2837,17 @@ Rclone prefixes all log messages with their level in capitals, e.g. INFO which makes it easy to grep the log file for different kinds of information. +Metrics +------- + +Rclone can publish metrics in the OpenMetrics/Prometheus format. + +To enable the metrics endpoint, use the `--metrics-addr` flag. Metrics can also be published on the `--rc-addr` port if the `--rc` flag and `--rc-enable-metrics` flags are supplied or if using rclone rcd `--rc-enable-metrics` + +Rclone provides extensive configuration options for the metrics HTTP endpoint. These settings are grouped under the Metrics section and have a prefix `--metrics-*`. + +When metrics are enabled with `--rc-enable-metrics`, they will be published on the same port as the rc API. In this case, the `--metrics-*` flags will be ignored, and the HTTP endpoint configuration will be managed by the `--rc-*` parameters. + Exit Code --------- @@ -2791,9 +2869,9 @@ messages may not be valid after the retry. If rclone has done a retry it will log a high priority message if the retry was successful. ### List of exit codes ### - * `0` - success - * `1` - Syntax or usage error - * `2` - Error not otherwise categorised + * `0` - Success + * `1` - Error not otherwise categorised + * `2` - Syntax or usage error * `3` - Directory not found * `4` - File not found * `5` - Temporary error (one that more retries might fix) (Retry errors) @@ -2834,6 +2912,22 @@ so they take exactly the same form. The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`. +Options that can appear multiple times (type `stringArray`) are +treated slighly differently as environment variables can only be +defined once. In order to allow a simple mechanism for adding one or +many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv) +string. For example + +| Environment Variable | Equivalent options | +|----------------------|--------------------| +| `RCLONE_EXCLUDE="*.jpg"` | `--exclude "*.jpg"` | +| `RCLONE_EXCLUDE="*.jpg,*.png"` | `--exclude "*.jpg"` `--exclude "*.png"` | +| `RCLONE_EXCLUDE='"*.jpg","*.png"'` | `--exclude "*.jpg"` `--exclude "*.png"` | +| `RCLONE_EXCLUDE='"/directory with comma , in it /**"'` | `--exclude "/directory with comma , in it /**" | + +If `stringArray` options are defined as environment variables **and** +options on the command line then all the values will be used. + ### Config file ### You can set defaults for values in the config file on an individual diff --git a/docs/content/drive.md b/docs/content/drive.md index a83bd71cb..444886677 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -77,15 +77,16 @@ Configure this as a Shared Drive (Team Drive)? y) Yes n) No y/n> n --------------------- -[remote] -client_id = -client_secret = -scope = drive -root_folder_id = -service_account_file = -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} --------------------- +Configuration complete. +Options: +type: drive +- client_id: +- client_secret: +- scope: drive +- root_folder_id: +- service_account_file: +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2014-03-16T13:57:58.955387075Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -317,13 +318,14 @@ Choose a number from below, or type in your own value 3 / Rclone Test 3 \ "zzzzzzzzzzzzzzzzzzzz" Enter a Shared Drive ID> 1 --------------------- -[remote] -client_id = -client_secret = -token = {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} -team_drive = xxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: drive +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx","Expiry":"2014-03-16T13:57:58.955387075Z","Extra":null} +- team_drive: xxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -534,6 +536,7 @@ represent the currently available conversions. | html | text/html | An HTML Document | | jpg | image/jpeg | A JPEG Image File | | json | application/vnd.google-apps.script+json | JSON Text Format for Google Apps scripts | +| md | text/markdown | Markdown Text Format | | odp | application/vnd.oasis.opendocument.presentation | Openoffice Presentation | | ods | application/vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet | | ods | application/x-vnd.oasis.opendocument.spreadsheet | Openoffice Spreadsheet | diff --git a/docs/content/dropbox.md b/docs/content/dropbox.md index 5a06e8f21..17cda68b4 100644 --- a/docs/content/dropbox.md +++ b/docs/content/dropbox.md @@ -44,12 +44,13 @@ Remote config Please visit: https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX --------------------- -[remote] -app_key = -app_secret = -token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: dropbox +- app_key: +- app_secret: +- token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/fichier.md b/docs/content/fichier.md index f89844c93..b8f12e5e0 100644 --- a/docs/content/fichier.md +++ b/docs/content/fichier.md @@ -51,11 +51,11 @@ y) Yes n) No y/n> Remote config --------------------- -[remote] -type = fichier -api_key = example_key --------------------- +Configuration complete. +Options: +- type: fichier +- api_key: example_key +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/filefabric.md b/docs/content/filefabric.md index 50814052a..ce98cd731 100644 --- a/docs/content/filefabric.md +++ b/docs/content/filefabric.md @@ -75,12 +75,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = filefabric -url = https://yourfabric.smestorage.com/ -permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: filefabric +- url: https://yourfabric.smestorage.com/ +- permanent_token: xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/filtering.md b/docs/content/filtering.md index db16cf7cc..ffc898c25 100644 --- a/docs/content/filtering.md +++ b/docs/content/filtering.md @@ -673,7 +673,7 @@ remote or flag value. The fix then is to quote values containing spaces. ### `--min-size` - Don't transfer any file smaller than this Controls the minimum size file within the scope of an rclone command. -Default units are `KiB` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. +Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are valid. E.g. `rclone ls remote: --min-size 50k` lists files on `remote:` of 50 KiB size or larger. @@ -683,7 +683,7 @@ See [the size option docs](/docs/#size-option) for more info. ### `--max-size` - Don't transfer any file larger than this Controls the maximum size file within the scope of an rclone command. -Default units are `KiB` but abbreviations `K`, `M`, `G`, `T` or `P` are valid. +Default units are `KiB` but abbreviations `B`, `K`, `M`, `G`, `T` or `P` are valid. E.g. `rclone ls remote: --max-size 1G` lists files on `remote:` of 1 GiB size or smaller. diff --git a/docs/content/flags.md b/docs/content/flags.md index fca12d85c..c2908561a 100644 --- a/docs/content/flags.md +++ b/docs/content/flags.md @@ -1,6 +1,7 @@ --- title: "Global Flags" description: "Rclone Global Flags" +# autogenerated - DO NOT EDIT --- # Global Flags @@ -11,12 +12,12 @@ split into groups. ## Copy -Flags for anything which can Copy a file. +Flags for anything which can copy a file. ``` --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -51,7 +52,7 @@ Flags for anything which can Copy a file. ## Sync -Flags just used for `rclone sync`. +Flags used for sync commands. ``` --backup-dir string Make backups into hierarchy based in DIR @@ -82,7 +83,7 @@ Important flags useful for most commands. ## Check -Flags used for `rclone check`. +Flags used for check commands. ``` --max-backlog int Maximum number of objects in sync or check backlog (default 10000) @@ -91,7 +92,7 @@ Flags used for `rclone check`. ## Networking -General networking and HTTP stuff. +Flags for general networking and HTTP stuff. ``` --bind string Local address to bind to for outgoing connections, IPv4, IPv6 or name @@ -101,7 +102,7 @@ General networking and HTTP stuff. --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth --contimeout Duration Connect timeout (default 1m0s) - --disable-http-keep-alives Disable HTTP keep-alives and use each connection once. + --disable-http-keep-alives Disable HTTP keep-alives and use each connection once --disable-http2 Disable HTTP/2 in the global transport --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21 --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) @@ -114,7 +115,7 @@ General networking and HTTP stuff. --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --use-cookies Enable session cookiejar - --user-agent string Set the user-agent to a specified string (default "rclone/v1.67.0") + --user-agent string Set the user-agent to a specified string (default "rclone/v1.68.0") ``` @@ -131,7 +132,7 @@ Flags helpful for increasing performance. ## Config -General configuration of rclone. +Flags for general configuration of rclone. ``` --ask-password Allow prompt for password for encrypted configuration (default true) @@ -215,7 +216,7 @@ Flags for listing directories. ## Logging -Logging and statistics. +Flags for logging and statistics. ``` --log-file string Log everything to this file @@ -234,7 +235,7 @@ Logging and statistics. --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes ("), see https://golang.org/pkg/time/#Time.Format --stats-unit string Show data rate in stats as either 'bits' or 'bytes' per second (default "bytes") --syslog Use Syslog for logging - --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default "DAEMON") + --syslog-facility string Facility for syslog, e.g. KERN,USER (default "DAEMON") --use-json-log Use json log format -v, --verbose count Print lots more stuff (repeat for more) ``` @@ -263,12 +264,12 @@ Flags to control the Remote Control API. ``` --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default ["localhost:5572"]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -294,9 +295,34 @@ Flags to control the Remote Control API. ``` +## Metrics + +Flags to control the Metrics HTTP endpoint.. + +``` + --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [""]) + --metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from + --metrics-baseurl string Prefix for URLs - leave blank for root + --metrics-cert string TLS PEM key (concatenation of certificate and CA certificate) + --metrics-client-ca string Client certificate authority to verify clients with + --metrics-htpasswd string A htpasswd file - if not provided no authentication is done + --metrics-key string TLS PEM Private key + --metrics-max-header-bytes int Maximum size of request header (default 4096) + --metrics-min-tls-version string Minimum TLS version that is acceptable (default "tls1.0") + --metrics-pass string Password for authentication + --metrics-realm string Realm for authentication + --metrics-salt string Password hashing salt (default "dlPL2MqE") + --metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) + --metrics-template string User-specified template + --metrics-user string User name for authentication + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server +``` + + ## Backend -Backend only flags. These can be set in the config file also. +Backend-only flags (these can be set in the config file also). ``` --alias-description string Description of the remote @@ -520,6 +546,12 @@ Backend only flags. These can be set in the config file also. --filefabric-token-expiry string Token expiry time --filefabric-url string URL of the Enterprise File Fabric to connect to --filefabric-version string Version read from the file fabric + --filescom-api-key string The API key used to authenticate with Files.com + --filescom-description string Description of the remote + --filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --filescom-password string The password used to authenticate with Files.com (obscured) + --filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + --filescom-username string The username used to authenticate with Files.com --ftp-ask-password Allow asking for FTP password when needed --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited @@ -563,6 +595,12 @@ Backend only flags. These can be set in the config file also. --gcs-token string OAuth Access Token as a JSON blob --gcs-token-url string Token server url --gcs-user-project string User project + --gofile-access-token string API Access token + --gofile-account-id string Account ID + --gofile-description string Description of the remote + --gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation) + --gofile-list-chunk int Number of items to list in each call (default 1000) + --gofile-root-folder-id string ID of the root folder --gphotos-auth-url string Auth server URL --gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --gphotos-batch-mode string Upload file batching sync|async|off (default "sync") @@ -654,6 +692,7 @@ Backend only flags. These can be set in the config file also. --local-description string Description of the remote --local-encoding Encoding The encoding for the backend (default Slash,Dot) --local-no-check-updated Don't check to see if the files change during upload + --local-no-clone Disable reflink cloning for server-side copies --local-no-preallocate Disable preallocation of disk space for transferred files --local-no-set-modtime Disable setting modtime --local-no-sparse Disable sparse files for multi-thread downloads @@ -770,6 +809,10 @@ Backend only flags. These can be set in the config file also. --pikpak-upload-concurrency int Concurrency for multipart uploads (default 5) --pikpak-use-trash Send files to the trash instead of deleting permanently (default true) --pikpak-user string Pikpak username + --pixeldrain-api-key string API key for your pixeldrain account + --pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it's fine to leave (default "https://pixeldrain.com/api") + --pixeldrain-description string Description of the remote + --pixeldrain-root-folder-id string Root of the filesystem to use (default "me") --premiumizeme-auth-url string Auth server URL --premiumizeme-client-id string OAuth Client Id --premiumizeme-client-secret string OAuth Client Secret @@ -844,6 +887,7 @@ Backend only flags. These can be set in the config file also. --s3-provider string Choose your S3 provider --s3-region string Region to connect to --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-sdk-log-mode Bits Set to debug the SDK (default Off) --s3-secret-access-key string AWS Secret Access Key (password) --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 --s3-session-token string An AWS session token @@ -854,7 +898,6 @@ Backend only flags. These can be set in the config file also. --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-sts-endpoint string Endpoint for STS --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint @@ -864,6 +907,7 @@ Backend only flags. These can be set in the config file also. --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset) --s3-v2-auth If true use v2 authentication --s3-version-at Time Show file versions as they were at the specified time (default off) --s3-version-deleted Show deleted file markers when using versions @@ -972,10 +1016,12 @@ Backend only flags. These can be set in the config file also. --swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8) --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default "public") --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page --swift-key string API key or password (OS_PASSWORD) --swift-leave-parts-on-error If true avoid calling abort upload on a failure --swift-no-chunk Don't chunk files during streaming upload --swift-no-large-objects Disable support for static and dynamic large objects + --swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit --swift-region string Region name - optional (OS_REGION_NAME) --swift-storage-policy string The storage policy to use when creating a new container --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) @@ -1013,6 +1059,7 @@ Backend only flags. These can be set in the config file also. --webdav-owncloud-exclude-shares Exclude ownCloud shares --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms) --webdav-pass string Password (obscured) + --webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly --webdav-url string URL of http host to connect to --webdav-user string User name --webdav-vendor string Name of the WebDAV site/service/software you are using @@ -1022,6 +1069,7 @@ Backend only flags. These can be set in the config file also. --yandex-description string Description of the remote --yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true) --yandex-token string OAuth Access Token as a JSON blob --yandex-token-url string Token server url --zoho-auth-url string Auth server URL diff --git a/docs/content/ftp.md b/docs/content/ftp.md index 9c076011f..68b3d54c2 100644 --- a/docs/content/ftp.md +++ b/docs/content/ftp.md @@ -72,12 +72,12 @@ Use FTP over TLS (Explicit) Enter a boolean value (true or false). Press Enter for the default ("false"). explicit_tls> Remote config --------------------- -[remote] -type = ftp -host = ftp.example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: ftp +- host: ftp.example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/gofile.md b/docs/content/gofile.md index 8ce0d7036..45fe18f7c 100644 --- a/docs/content/gofile.md +++ b/docs/content/gofile.md @@ -202,6 +202,17 @@ Properties: - Type: string - Required: false +#### --gofile-list-chunk + +Number of items to list in each call + +Properties: + +- Config: list_chunk +- Env Var: RCLONE_GOFILE_LIST_CHUNK +- Type: int +- Default: 1000 + #### --gofile-encoding The encoding for the backend. diff --git a/docs/content/googlecloudstorage.md b/docs/content/googlecloudstorage.md index db1928be3..be52111a6 100644 --- a/docs/content/googlecloudstorage.md +++ b/docs/content/googlecloudstorage.md @@ -128,16 +128,16 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = google cloud storage -client_id = -client_secret = -token = {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} -project_number = 12345678 -object_acl = private -bucket_acl = private --------------------- +Configuration complete. +Options: +- type: google cloud storage +- client_id: +- client_secret: +- token: {"AccessToken":"xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","RefreshToken":"x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx","Expiry":"2014-07-17T20:49:14.929208288+01:00","Extra":null} +- project_number: 12345678 +- object_acl: private +- bucket_acl: private +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -363,6 +363,20 @@ Properties: - Type: string - Required: false +#### --gcs-access-token + +Short-lived access token. + +Leave blank normally. +Needed only if you want use short-lived access tokens instead of interactive login. + +Properties: + +- Config: access_token +- Env Var: RCLONE_GCS_ACCESS_TOKEN +- Type: string +- Required: false + #### --gcs-anonymous Access public buckets and objects without credentials. diff --git a/docs/content/googlephotos.md b/docs/content/googlephotos.md index 7e536aa92..31154a92b 100644 --- a/docs/content/googlephotos.md +++ b/docs/content/googlephotos.md @@ -78,11 +78,11 @@ Got code *** are stored in full resolution at original quality. These uploads *** will count towards storage in your Google Account. --------------------- -[remote] -type = google photos -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} --------------------- +Configuration complete. +Options: +- type: google photos +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2019-06-28T17:38:04.644930156+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -502,12 +502,18 @@ is covered by [bug #112096115](https://issuetracker.google.com/issues/112096115) **The current google API does not allow photos to be downloaded at original resolution. This is very important if you are, for example, relying on "Google Photos" as a backup of your photos. You will not be able to use rclone to redownload original images. You could use 'google takeout' to recover the original photos as a last resort** +**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a +headless browser to download images in full resolution. + ### Downloading Videos When videos are downloaded they are downloaded in a really compressed version of the video compared to downloading it via the Google Photos web interface. This is covered by [bug #113672044](https://issuetracker.google.com/issues/113672044). +**NB** you **can** use the [--gphotos-proxy](#gphotos-proxy) flag to use a +headless browser to download images in full resolution. + ### Duplicates If a file name is duplicated in a directory then rclone will add the diff --git a/docs/content/hdfs.md b/docs/content/hdfs.md index c7022586e..b0f5453b4 100644 --- a/docs/content/hdfs.md +++ b/docs/content/hdfs.md @@ -53,12 +53,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = hdfs -namenode = namenode.hadoop:8020 -username = root --------------------- +Configuration complete. +Options: +- type: hdfs +- namenode: namenode.hadoop:8020 +- username: root +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/hidrive.md b/docs/content/hidrive.md index e19649310..38c161409 100644 --- a/docs/content/hidrive.md +++ b/docs/content/hidrive.md @@ -54,11 +54,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = hidrive -token = {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} --------------------- +Configuration complete. +Options: +- type: hidrive +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxx","token_type":"Bearer","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxx","expiry":"xxxxxxxxxxxxxxxxxxxxxxx"} +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/http.md b/docs/content/http.md index 27e048285..4266ddf7c 100644 --- a/docs/content/http.md +++ b/docs/content/http.md @@ -63,10 +63,11 @@ Choose a number from below, or type in your own value \ "https://example.com" url> https://beta.rclone.org Remote config --------------------- -[remote] -url = https://beta.rclone.org --------------------- +Configuration complete. +Options: +- type: http +- url: https://beta.rclone.org +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/iclouddrive.md b/docs/content/iclouddrive.md new file mode 100644 index 000000000..e4982cc4b --- /dev/null +++ b/docs/content/iclouddrive.md @@ -0,0 +1,156 @@ +--- +title: "iCloud Drive" +description: "Rclone docs for iCloud Drive" +versionIntroduced: "v1.69" +--- + +# {{< icon "fa fa-cloud" >}} iCloud Drive + + +## Configuration + +The initial setup for an iCloud Drive backend involves getting a trust token/session. +`rclone config` walks you through it. The trust token is valid for 30 days. After which you will have to reauthenticate with rclone reconnect or rclone config. + +Here is an example of how to make a remote called `iclouddrive`. First run: + + rclone config + +This will guide you through an interactive setup process: + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +name> iclouddrive +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / iCloud Drive + \ (iclouddrive) +[snip] +Storage> iclouddrive +Option apple_id. +Apple ID. +Enter a value. +apple_id> APPLEID +Option password. +Password. +Choose an alternative below. +y) Yes, type in my own password +g) Generate random password +y/g> y +Enter the password: +password: +Confirm the password: +password: +Edit advanced config? +y) Yes +n) No (default) +y/n> n +Option config_2fa. +Two-factor authentication: please enter your 2FA code +Enter a value. +config_2fa> 2FACODE +Remote config +-------------------- +[koofr] +- type: iclouddrive +- apple_id: APPLEID +- password: *** ENCRYPTED *** +- cookies: **************************** +- trust_token: **************************** +-------------------- +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +## Advanced Data Protection + +ADP is currently unsupported and need to be disabled + +{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/iclouddrive/iclouddrive.go then run make backenddocs" >}} +### Standard options + +Here are the Standard options specific to iclouddrive (iCloud Drive). + +#### --iclouddrive-apple-id + +Apple ID. + +Properties: + +- Config: apple_id +- Env Var: RCLONE_ICLOUDDRIVE_APPLE_ID +- Type: string +- Required: true + +#### --iclouddrive-password + +Password. + +**NB** Input to this must be obscured - see [rclone obscure](/commands/rclone_obscure/). + +Properties: + +- Config: password +- Env Var: RCLONE_ICLOUDDRIVE_PASSWORD +- Type: string +- Required: true + +#### --iclouddrive-trust-token + +trust token (internal use) + +Properties: + +- Config: trust_token +- Env Var: RCLONE_ICLOUDDRIVE_TRUST_TOKEN +- Type: string +- Required: false + +#### --iclouddrive-cookies + +cookies (internal use only) + +Properties: + +- Config: cookies +- Env Var: RCLONE_ICLOUDDRIVE_COOKIES +- Type: string +- Required: false + +### Advanced options + +Here are the Advanced options specific to iclouddrive (iCloud Drive). + +#### --iclouddrive-encoding + +The encoding for the backend. + +See the [encoding section in the overview](/overview/#encoding) for more info. + +Properties: + +- Config: encoding +- Env Var: RCLONE_ICLOUDDRIVE_ENCODING +- Type: Encoding +- Default: Slash,BackSlash,Del,Ctl,InvalidUtf8,Dot + +#### --iclouddrive-description + +Description of the remote. + +Properties: + +- Config: description +- Env Var: RCLONE_ICLOUDDRIVE_DESCRIPTION +- Type: string +- Required: false + +{{< rem autogenerated options stop >}} diff --git a/docs/content/internetarchive.md b/docs/content/internetarchive.md index 1117b696b..aacafb65b 100644 --- a/docs/content/internetarchive.md +++ b/docs/content/internetarchive.md @@ -148,12 +148,12 @@ Edit advanced config? y) Yes n) No (default) y/n> n --------------------- -[remote] -type = internetarchive -access_key_id = XXXX -secret_access_key = XXXX --------------------- +Configuration complete. +Options: +- type: internetarchive +- access_key_id: XXXX +- secret_access_key: XXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/jottacloud.md b/docs/content/jottacloud.md index 3a2ac05dc..f2e368e1e 100644 --- a/docs/content/jottacloud.md +++ b/docs/content/jottacloud.md @@ -175,18 +175,18 @@ Press Enter for the default (Archive). 2 > Shared 3 > Sync config_mountpoint> 1 --------------------- -[remote] -type = jottacloud -configVersion = 1 -client_id = jottacli -client_secret = -tokenURL = https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token -token = {........} -username = 2940e57271a93d987d6f8a21 -device = Jotta -mountpoint = Archive --------------------- +Configuration complete. +Options: +- type: jottacloud +- configVersion: 1 +- client_id: jottacli +- client_secret: +- tokenURL: https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token +- token: {........} +- username: 2940e57271a93d987d6f8a21 +- device: Jotta +- mountpoint: Archive +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/local.md b/docs/content/local.md index 849e2cc54..9d5c8363c 100644 --- a/docs/content/local.md +++ b/docs/content/local.md @@ -130,12 +130,8 @@ This format requires absolute paths and the use of prefix `\\?\`, e.g. `\\?\D:\some\very\long\path`. For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more [below](#long-paths)). - -Note that Windows supports using the same prefix `\\?\` to -specify path to volumes identified by their GUID, e.g. -`\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path`. -This is *not* supported in rclone, due to an [issue](https://github.com/golang/go/issues/39785) -in go. +Using the same prefix `\\?\` it is also possible to specify path to volumes +identified by their GUID, e.g. `\\?\Volume{b75e2c83-0000-0000-0000-602f00000000}\some\path`. #### Long paths #### @@ -516,6 +512,32 @@ Properties: - Type: bool - Default: false +#### --local-no-clone + +Disable reflink cloning for server-side copies. + +Normally, for local-to-local transfers, rclone will "clone" the file when +possible, and fall back to "copying" only when cloning is not supported. + +Cloning creates a shallow copy (or "reflink") which initially shares blocks with +the original file. Unlike a "hardlink", the two files are independent and +neither will affect the other if subsequently modified. + +Cloning is usually preferable to copying, as it is much faster and is +deduplicated by default (i.e. having two identical files does not consume more +storage than having just one.) However, for use cases where data redundancy is +preferable, --local-no-clone can be used to disable cloning and force "deep" copies. + +Currently, cloning is only supported when using APFS on macOS (support for other +platforms may be added in the future.) + +Properties: + +- Config: no_clone +- Env Var: RCLONE_LOCAL_NO_CLONE +- Type: bool +- Default: false + #### --local-no-preallocate Disable preallocation of disk space for transferred files. diff --git a/docs/content/mailru.md b/docs/content/mailru.md index fe6e4bb93..2ecbf152f 100644 --- a/docs/content/mailru.md +++ b/docs/content/mailru.md @@ -90,13 +90,13 @@ y) Yes n) No y/n> n Remote config --------------------- -[remote] -type = mailru -user = username@mail.ru -pass = *** ENCRYPTED *** -speedup_enable = true --------------------- +Configuration complete. +Options: +- type: mailru +- user: username@mail.ru +- pass: *** ENCRYPTED *** +- speedup_enable: true +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/mega.md b/docs/content/mega.md index d62d527fc..2886c05f1 100644 --- a/docs/content/mega.md +++ b/docs/content/mega.md @@ -53,12 +53,12 @@ password: Confirm the password: password: Remote config --------------------- -[remote] -type = mega -user = you@example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: mega +- user: you@example.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/memory.md b/docs/content/memory.md index d8637771b..c6f872d27 100644 --- a/docs/content/memory.md +++ b/docs/content/memory.md @@ -37,10 +37,10 @@ Storage> memory Remote config --------------------- -[remote] -type = memory --------------------- +Configuration complete. +Options: +- type: memory +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/onedrive.md b/docs/content/onedrive.md index 1caefc4a8..26242ba10 100644 --- a/docs/content/onedrive.md +++ b/docs/content/onedrive.md @@ -84,13 +84,13 @@ Is that okay? y) Yes n) No y/n> y --------------------- -[remote] -type = onedrive -token = {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} -drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk -drive_type = business --------------------- +Configuration complete. +Options: +- type: onedrive +- token: {"access_token":"youraccesstoken","token_type":"Bearer","refresh_token":"yourrefreshtoken","expiry":"2018-08-26T22:39:52.486512262+08:00"} +- drive_id: b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk +- drive_type: business +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/opendrive.md b/docs/content/opendrive.md index 4205dc498..6908cfdf1 100644 --- a/docs/content/opendrive.md +++ b/docs/content/opendrive.md @@ -41,11 +41,12 @@ Enter the password: password: Confirm the password: password: --------------------- -[remote] -username = -password = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: opendrive +- username: +- password: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/overview.md b/docs/content/overview.md index a07f993ac..6376dc253 100644 --- a/docs/content/overview.md +++ b/docs/content/overview.md @@ -33,6 +33,7 @@ Here is an overview of the major features of each cloud storage system. | HDFS | - | R/W | No | No | - | - | | HiDrive | HiDrive ¹² | R/W | No | No | - | - | | HTTP | - | R | No | No | R | - | +| iCloud Drive | - | R | No | No | - | - | | Internet Archive | MD5, SHA1, CRC32 | R/W ¹¹ | No | No | - | RWU | | Jottacloud | MD5 | R/W | Yes | No | R | RW | | Koofr | MD5 | - | Yes | No | - | - | @@ -46,7 +47,7 @@ Here is an overview of the major features of each cloud storage system. | OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - | | OpenStack Swift | MD5 | R/W | No | No | R/W | - | | Oracle Object Storage | MD5 | R/W | No | No | R/W | - | -| pCloud | MD5, SHA1 ⁷ | R | No | No | W | - | +| pCloud | MD5, SHA1 ⁷ | R/W | No | No | W | - | | PikPak | MD5 | R | No | No | R | - | | Pixeldrain | SHA256 | R/W | No | No | R | RW | | premiumize.me | - | - | Yes | No | R | - | @@ -505,12 +506,13 @@ upon backend-specific capabilities. | Files.com | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | No | Yes | | FTP | No | No | Yes | Yes | No | No | Yes | No | No | No | Yes | | Gofile | Yes | Yes | Yes | Yes | No | No | Yes | No | Yes | Yes | Yes | -| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No | No | No | No | +| Google Cloud Storage | Yes | Yes | No | No | No | No | Yes | No | No | No | No | | Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes | | Google Photos | No | No | No | No | No | No | No | No | No | No | No | | HDFS | Yes | No | Yes | Yes | No | No | Yes | No | No | Yes | Yes | | HiDrive | Yes | Yes | Yes | Yes | No | No | Yes | No | No | No | Yes | | HTTP | No | No | No | No | No | No | No | No | No | No | Yes | +| iCloud Drive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes | | ImageKit | Yes | Yes | Yes | No | No | No | No | No | No | No | Yes | | Internet Archive | No | Yes | No | No | Yes | Yes | No | No | Yes | Yes | No | | Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | No | Yes | Yes | Yes | @@ -521,7 +523,7 @@ upon backend-specific capabilities. | Microsoft Azure Blob Storage | Yes | Yes | No | No | No | Yes | Yes | Yes | No | No | No | | Microsoft Azure Files Storage | No | Yes | Yes | Yes | No | No | Yes | Yes | No | Yes | Yes | | Microsoft OneDrive | Yes | Yes | Yes | Yes | Yes | Yes ⁵ | No | No | Yes | Yes | Yes | -| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | No | Yes | +| OpenDrive | Yes | Yes | Yes | Yes | No | No | No | No | No | Yes | Yes | | OpenStack Swift | Yes ¹ | Yes | No | No | No | Yes | Yes | No | No | Yes | No | | Oracle Object Storage | No | Yes | No | No | Yes | Yes | Yes | Yes | No | No | No | | pCloud | Yes | Yes | Yes | Yes | Yes | No | No | No | Yes | Yes | Yes | diff --git a/docs/content/pcloud.md b/docs/content/pcloud.md index be032c491..10b17dbdf 100644 --- a/docs/content/pcloud.md +++ b/docs/content/pcloud.md @@ -51,12 +51,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} --------------------- +Configuration complete. +Options: +- type: pcloud +- client_id: +- client_secret: +- token: {"access_token":"XXX","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/premiumizeme.md b/docs/content/premiumizeme.md index ebda9a13a..6ec591d2f 100644 --- a/docs/content/premiumizeme.md +++ b/docs/content/premiumizeme.md @@ -50,11 +50,11 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = premiumizeme -token = {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} --------------------- +Configuration complete. +Options: +- type: premiumizeme +- token: {"access_token":"XXX","token_type":"Bearer","refresh_token":"XXX","expiry":"2029-08-07T18:44:15.548915378+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/protondrive.md b/docs/content/protondrive.md index d9f59738d..2b24a0b54 100644 --- a/docs/content/protondrive.md +++ b/docs/content/protondrive.md @@ -65,12 +65,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you@protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you@protonmail.com +- pass: *** ENCRYPTED *** +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/qingstor.md b/docs/content/qingstor.md index 47d9aa668..6fdebf006 100644 --- a/docs/content/qingstor.md +++ b/docs/content/qingstor.md @@ -60,15 +60,16 @@ Number of connection retry. Leave blank will use the default value "3". connection_retries> Remote config --------------------- -[remote] -env_auth = false -access_key_id = access_key -secret_access_key = secret_key -endpoint = -zone = pek3a -connection_retries = --------------------- +Configuration complete. +Options: +- type: qingstor +- env_auth: false +- access_key_id: access_key +- secret_access_key: secret_key +- endpoint: +- zone: pek3a +- connection_retries: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/quatrix.md b/docs/content/quatrix.md index d580e0dfb..4bb4dea6c 100644 --- a/docs/content/quatrix.md +++ b/docs/content/quatrix.md @@ -44,11 +44,12 @@ api_key> your_api_key Host name of Quatrix account. host> example.quatrix.it --------------------- -[remote] -api_key = your_api_key -host = example.quatrix.it --------------------- +Configuration complete. +Options: +- type: quatrix +- api_key: your_api_key +- host: example.quatrix.it +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -94,12 +95,12 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? Edit remote Option api_key. API key for accessing Quatrix account @@ -109,12 +110,12 @@ Option host. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it). --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/rc.md b/docs/content/rc.md index 32e93867b..7a8ef34e4 100644 --- a/docs/content/rc.md +++ b/docs/content/rc.md @@ -100,6 +100,7 @@ Default Off. ### --rc-enable-metrics Enable OpenMetrics/Prometheus compatible endpoint at `/metrics`. +If more control over the metrics is desired (for example running it on a different port or with different auth) then endpoint can be enabled with the `--metrics-*` flags instead. Default Off. @@ -1715,6 +1716,11 @@ Returns: Returns an object where keys are option block names and values are an object with the current option values in. +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + Note that these are the global options which are unaffected by use of the _config and _filter parameters. If you wish to read the parameters set in _config then use options/config and for _filter use options/filter. @@ -1727,6 +1733,11 @@ map to the external options very easily with a few exceptions. Returns an object where keys are option block names and values are an array of objects with info about each options. +Parameters: + +- blocks: optional string of comma separated blocks to include + - all are included if this is missing or "" + These objects are in the same format as returned by "config/providers". They are described in the [option blocks](#option-blocks) section. @@ -2013,6 +2024,73 @@ If poll-interval is updated or disabled temporarily, some changes might not get picked up by the polling function, depending on the used remote. +This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied. + +### vfs/queue: Queue info for a VFS. {#vfs-queue} + +This returns info about the upload queue for the selected VFS. + +This is only useful if `--vfs-cache-mode` > off. If you call it when +the `--vfs-cache-mode` is off, it will return an empty result. + + { + "queued": // an array of files queued for upload + [ + { + "name": "file", // string: name (full path) of the file, + "id": 123, // integer: id of this item in the queue, + "size": 79, // integer: size of the file in bytes + "expiry": 1.5 // float: time until file is eligible for transfer, lowest goes first + "tries": 1, // integer: number of times we have tried to upload + "delay": 5.0, // float: seconds between upload attempts + "uploading": false, // boolean: true if item is being uploaded + }, + ], + } + +The `expiry` time is the time until the file is elegible for being +uploaded in floating point seconds. This may go negative. As rclone +only transfers `--transfers` files at once, only the lowest +`--transfers` expiry times will have `uploading` as `true`. So there +may be files with negative expiry times for which `uploading` is +`false`. + + +This command takes an "fs" parameter. If this parameter is not +supplied and if there is only one VFS in use then that VFS will be +used. If there is more than one VFS in use then the "fs" parameter +must be supplied. + +### vfs/queue-set-expiry: Set the expiry time for an item queued for upload. {#vfs-queue-set-expiry} + +Use this to adjust the `expiry` time for an item in the upload queue. +You will need to read the `id` of the item using `vfs/queue` before +using this call. + +You can then set `expiry` to a floating point number of seconds from +now when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed +for a long time then set it to a large positive number. + +Setting the `expiry` of an item which has already has started uploading +will have no effect - the item will carry on being uploaded. + +This will return an error if called with `--vfs-cache-mode` off or if +the `id` passed is not found. + +This takes the following parameters + +- `fs` - select the VFS in use (optional) +- `id` - a numeric ID as returned from `vfs/queue` +- `expiry` - a new expiry time as floating point seconds + +This returns an empty result on success, or an error. + + This command takes an "fs" parameter. If this parameter is not supplied and if there is only one VFS in use then that VFS will be used. If there is more than one VFS in use then the "fs" parameter diff --git a/docs/content/release_signing.md b/docs/content/release_signing.md index 8e19303dc..824d42d6b 100644 --- a/docs/content/release_signing.md +++ b/docs/content/release_signing.md @@ -149,7 +149,7 @@ $ rclone hashsum sha256 -C SHA256SUMS rclone-v1.63.1-windows-amd64.zip You can verify the signatures and hashes in one command line like this: ``` -$ gpg --decrypt SHA256SUMS | sha256sum -c --ignore-missing +$ h=$(gpg --decrypt SHA256SUMS) && echo "$h" | sha256sum - -c --ignore-missing gpg: Signature made Mon 17 Jul 2023 15:03:17 BST gpg: using DSA key FBF737ECE9F8AB18604BD2AC93935E02FF3B54FA gpg: Good signature from "Nick Craig-Wood " [ultimate] diff --git a/docs/content/remote_setup.md b/docs/content/remote_setup.md index 17cdeef42..63318cd9e 100644 --- a/docs/content/remote_setup.md +++ b/docs/content/remote_setup.md @@ -15,37 +15,33 @@ two ways of doing it, described below. ## Configuring using rclone authorize ## -On the headless box run `rclone` config but answer `N` to the `Use web browser -to automatically authenticate?` question. +On the headless box run `rclone` config but answer `N` to the `Use auto config?` question. ``` -... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> n + +Option config_token. For this to work, you will need rclone available on a machine that has a web browser available. - For more help and alternate methods see: https://rclone.org/remote_setup/ - Execute the following on the machine with the web browser (same rclone version recommended): - - rclone authorize "dropbox" - -Then paste the result below: -result> + rclone authorize "onedrive" +Then paste the result. +Enter a value. +config_token> ``` Then on your main desktop machine ``` -rclone authorize "dropbox" +rclone authorize "onedrive" If your browser doesn't open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... @@ -58,7 +54,7 @@ SECRET_TOKEN Then back to the headless box, paste in the code ``` -result> SECRET_TOKEN +config_token> SECRET_TOKEN -------------------- [acd12] client_id = @@ -100,16 +96,13 @@ Linux and MacOS users can utilize SSH Tunnel to redirect the headless box port 5 ``` ssh -L localhost:53682:localhost:53682 username@remote_server ``` -Then on the headless box run `rclone` config and answer `Y` to the `Use web -browser to automatically authenticate?` question. +Then on the headless box run `rclone config` and answer `Y` to the `Use auto config?` question. ``` -... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> y diff --git a/docs/content/s3.md b/docs/content/s3.md index 6f9aef1b9..d5192542f 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -27,6 +27,7 @@ The S3 backend can be used with a number of different providers: {{< provider name="Linode Object Storage" home="https://www.linode.com/products/object-storage/" config="/s3/#linode" >}} {{< provider name="Magalu Object Storage" home="https://magalu.cloud/object-storage/" config="/s3/#magalu" >}} {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}} +{{< provider name="Outscale" home="https://en.outscale.com/storage/outscale-object-storage/" config="/s3/#outscale" >}} {{< provider name="Petabox" home="https://petabox.io/" config="/s3/#petabox" >}} {{< provider name="Qiniu Cloud Object Storage (Kodo)" home="https://www.qiniu.com/en/products/kodo" config="/s3/#qiniu" >}} {{< provider name="RackCorp Object Storage" home="https://www.rackcorp.com/" config="/s3/#RackCorp" >}} @@ -252,20 +253,20 @@ Choose a number from below, or type in your own value \ "GLACIER_IR" storage_class> 1 Remote config --------------------- -[remote] -type = s3 -provider = AWS -env_auth = false -access_key_id = XXX -secret_access_key = YYY -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = --------------------- +Configuration complete. +Options: +- type: s3 +- provider: AWS +- env_auth: false +- access_key_id: XXX +- secret_access_key: YYY +- region: us-east-1 +- endpoint: +- location_constraint: +- acl: private +- server_side_encryption: +- storage_class: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -401,6 +402,38 @@ there for more details. Setting this flag increases the chance for undetected upload failures. +### Increasing performance + +#### Using server-side copy + +If you are copying objects between S3 buckets in the same region, you should +use server-side copy. +This is much faster than downloading and re-uploading the objects, as no data is transferred. + +For rclone to use server-side copy, you must use the same remote for the source and destination. + + rclone copy s3:source-bucket s3:destination-bucket + +When using server-side copy, the performance is limited by the rate at which rclone issues +API requests to S3. +See below for how to increase the number of API requests rclone makes. + +#### Increasing the rate of API requests + +You can increase the rate of API requests to S3 by increasing the parallelism using `--transfers` and `--checkers` +options. + +Rclone uses a very conservative defaults for these settings, as not all providers support high rates of requests. +Depending on your provider, you can increase significantly the number of transfers and checkers. + +For example, with AWS S3, if you can increase the number of checkers to values like 200. +If you are doing a server-side copy, you can also increase the number of transfers to 200. + + rclone sync --transfers 200 --checkers 200 --checksum s3:source-bucket s3:destination-bucket + +You will need to experiment with these values to find the optimal settings for your setup. + + ### Versions When bucket versioning is enabled (this can be done with rclone with @@ -1409,6 +1442,10 @@ Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this false - rclone will do this automatically based on the provider setting. +Note that if your bucket isn't a valid DNS name, i.e. has '.' or '_' in, +you'll need to set this to true. + + Properties: - Config: force_path_style @@ -1544,8 +1581,6 @@ It can also be needed if the user you are using does not have bucket creation permissions. Before v1.52.0 this would have passed silently due to a bug. -We recommend setting `no_check_bucket = true` when the provider is Alibaba, otherwise the bucket ACL may changed after every `rclone mkdir`. See [#7889](https://github.com/rclone/rclone/issues/7889). - Properties: @@ -1700,6 +1735,24 @@ Properties: - Type: Tristate - Default: unset +#### --s3-use-unsigned-payload + +Whether to use an unsigned payload in PutObject + +Rclone has to avoid the AWS SDK seeking the body when calling +PutObject. The AWS provider can add checksums in the trailer to avoid +seeking but other providers can't. + +This should be true, false or left unset to use the default for the provider. + + +Properties: + +- Config: use_unsigned_payload +- Env Var: RCLONE_S3_USE_UNSIGNED_PAYLOAD +- Type: Tristate +- Default: unset + #### --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads @@ -1857,7 +1910,7 @@ Properties: #### --s3-sts-endpoint -Endpoint for STS. +Endpoint for STS (deprecated). Leave blank if using AWS to use the default endpoint for the region. @@ -1920,6 +1973,33 @@ Properties: - Type: Tristate - Default: unset +#### --s3-sdk-log-mode + +Set to debug the SDK + +This can be set to a comma separated list of the following functions: + +- `Signing` +- `Retries` +- `Request` +- `RequestWithBody` +- `Response` +- `ResponseWithBody` +- `DeprecatedUsage` +- `RequestEventMessage` +- `ResponseEventMessage` + +Use `Off` to disable and `All` to set all log levels. You will need to +use `-vv` to see the debug level logs. + + +Properties: + +- Config: sdk_log_mode +- Env Var: RCLONE_S3_SDK_LOG_MODE +- Type: Bits +- Default: Off + #### --s3-description Description of the remote. @@ -1968,18 +2048,19 @@ These can be run on a running backend using the rc command ### restore -Restore objects from GLACIER to normal storage +Restore objects from GLACIER or INTELLIGENT-TIERING archive tier rclone backend restore remote: [options] [+] -This command can be used to restore one or more objects from GLACIER -to normal storage. +This command can be used to restore one or more objects from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS + rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags @@ -2009,17 +2090,17 @@ if not. Options: - "description": The optional description for the job. -- "lifetime": Lifetime of the active copy in days +- "lifetime": Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING storage - "priority": Priority of restore: Standard|Expedited|Bulk ### restore-status -Show the restore status for objects being restored from GLACIER to normal storage +Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage rclone backend restore-status remote: [options] [+] -This command can be used to show the status for objects being restored from GLACIER -to normal storage. +This command can be used to show the status for objects being restored from GLACIER to normal storage +or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier. Usage Examples: @@ -2049,6 +2130,15 @@ It returns a list of status dictionaries. "RestoreExpiryDate": "2023-09-06T12:29:19+01:00" }, "StorageClass": "DEEP_ARCHIVE" + }, + { + "Remote": "test.gz", + "VersionID": null, + "RestoreStatus": { + "IsRestoreInProgress": true, + "RestoreExpiryDate": "null" + }, + "StorageClass": "INTELLIGENT_TIERING" } ] @@ -2205,6 +2295,21 @@ You can also do this entirely on the command line This is the provider used as main example and described in the [configuration](#configuration) section above. +### AWS Directory Buckets + +From rclone v1.69 [Directory Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +are supported. + +You will need to set the `directory_buckets = true` config parameter +or use `--s3-directory-buckets`. + +Note that rclone cannot yet: + +- Create directory buckets +- List directory buckets + +See [the --s3-directory-buckets flag](#s3-directory-buckets) for more info + ### AWS Snowball Edge [AWS Snowball](https://aws.amazon.com/snowball/) is a hardware @@ -3121,6 +3226,168 @@ So once set up, for example, to copy files into a bucket rclone copy /path/to/files minio:bucket ``` +### Outscale + +[OUTSCALE Object Storage (OOS)](https://en.outscale.com/storage/outscale-object-storage/) is an enterprise-grade, S3-compatible storage service provided by OUTSCALE, a brand of Dassault Systèmes. For more information about OOS, see the [official documentation](https://docs.outscale.com/en/userguide/OUTSCALE-Object-Storage-OOS.html). + +Here is an example of an OOS configuration that you can paste into your rclone configuration file: + +``` +[outscale] +type = s3 +provider = Outscale +env_auth = false +access_key_id = ABCDEFGHIJ0123456789 +secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +region = eu-west-2 +endpoint = oos.eu-west-2.outscale.com +acl = private +``` + +You can also run `rclone config` to go through the interactive setup process: + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n +``` + +``` +Enter name for new remote. +name> outscale +``` + +``` +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] + X / Amazon S3 Compliant Storage Providers including AWS, ...Outscale, ...and others + \ (s3) +[snip] +Storage> outscale +``` + +``` +Option provider. +Choose your S3 provider. +Choose a number from below, or type in your own value. +Press Enter to leave empty. +[snip] +XX / OUTSCALE Object Storage (OOS) + \ (Outscale) +[snip] +provider> Outscale +``` + +``` +Option env_auth. +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). +Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own boolean value (true or false). +Press Enter for the default (false). + 1 / Enter AWS credentials in the next step. + \ (false) + 2 / Get AWS credentials from the environment (env vars or IAM). + \ (true) +env_auth> +``` + +``` +Option access_key_id. +AWS Access Key ID. +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +access_key_id> ABCDEFGHIJ0123456789 +``` + +``` +Option secret_access_key. +AWS Secret Access Key (password). +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +secret_access_key> XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +``` + +``` +Option region. +Region where your bucket will be created and your data stored. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Paris, France + \ (eu-west-2) + 2 / New Jersey, USA + \ (us-east-2) + 3 / California, USA + \ (us-west-1) + 4 / SecNumCloud, Paris, France + \ (cloudgouv-eu-west-1) + 5 / Tokyo, Japan + \ (ap-northeast-1) +region> 1 +``` + +``` +Option endpoint. +Endpoint for S3 API. +Required when using an S3 clone. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Outscale EU West 2 (Paris) + \ (oos.eu-west-2.outscale.com) + 2 / Outscale US east 2 (New Jersey) + \ (oos.us-east-2.outscale.com) + 3 / Outscale EU West 1 (California) + \ (oos.us-west-1.outscale.com) + 4 / Outscale SecNumCloud (Paris) + \ (oos.cloudgouv-eu-west-1.outscale.com) + 5 / Outscale AP Northeast 1 (Japan) + \ (oos.ap-northeast-1.outscale.com) +endpoint> 1 +``` + +``` +Option acl. +Canned ACL used when creating buckets and storing or copying objects. +This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl +Note that this ACL is applied when server-side copying objects as S3 +doesn't copy the ACL from the source but rather writes a fresh one. +If the acl is an empty string then no X-Amz-Acl: header is added and +the default (private) will be used. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + / Owner gets FULL_CONTROL. + 1 | No one else has access rights (default). + \ (private) +[snip] +acl> 1 +``` + +``` +Edit advanced config? +y) Yes +n) No (default) +y/n> n +``` + +``` +Configuration complete. +Options: +- type: s3 +- provider: Outscale +- access_key_id: ABCDEFGHIJ0123456789 +- secret_access_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +- endpoint: oos.eu-west-2.outscale.com +Keep this "outscale" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + ### Qiniu Cloud Object Storage (Kodo) {#qiniu} [Qiniu Cloud Object Storage (Kodo)](https://www.qiniu.com/en/products/kodo), a completely independent-researched core technology which is proven by repeated customer experience has occupied absolute leading market leader position. Kodo can be widely applied to mass data management. diff --git a/docs/content/sftp.md b/docs/content/sftp.md index 4e905ee9b..2ba4a48a7 100644 --- a/docs/content/sftp.md +++ b/docs/content/sftp.md @@ -74,14 +74,15 @@ y/g/n> n Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config --------------------- -[remote] -host = example.com -user = sftpuser -port = -pass = -key_file = --------------------- +Configuration complete. +Options: +- type: sftp +- host: example.com +- user: sftpuser +- port: +- pass: +- key_file: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -445,7 +446,15 @@ Properties: Raw PEM-encoded private key. -If specified, will override key_file parameter. +Note that this should be on a single line with line endings replaced with '\n', eg + + key_pem = -----BEGIN RSA PRIVATE KEY-----\nMaMbaIXtE\n0gAMbMbaSsd\nMbaass\n-----END RSA PRIVATE KEY----- + +This will generate the single line correctly: + + awk '{printf "%s\\n", $0}' < ~/.ssh/id_rsa + +If specified, it will override the key_file parameter. Properties: @@ -902,13 +911,13 @@ Maximum number of SFTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. -If you are doing a sync or copy then make sure concurrency is one more +If you are doing a sync or copy then make sure connections is one more than the sum of `--transfers` and `--checkers`. If you use `--check-first` then it just needs to be one more than the maximum of `--checkers` and `--transfers`. -So for `concurrency 3` you'd use `--checkers 2 --transfers 2 +So for `connections 3` you'd use `--checkers 2 --transfers 2 --check-first` or `--checkers 1 --transfers 1`. diff --git a/docs/content/sharefile.md b/docs/content/sharefile.md index 6a66af439..ee201f55e 100644 --- a/docs/content/sharefile.md +++ b/docs/content/sharefile.md @@ -68,12 +68,12 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = sharefile -endpoint = https://XXX.sharefile.com -token = {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} --------------------- +Configuration complete. +Options: +- type: sharefile +- endpoint: https://XXX.sharefile.com +- token: {"access_token":"XXX","token_type":"bearer","refresh_token":"XXX","expiry":"2019-09-30T19:41:45.878561877+01:00"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/sponsor.md b/docs/content/sponsor.md index f6638a4fc..429127980 100644 --- a/docs/content/sponsor.md +++ b/docs/content/sponsor.md @@ -61,3 +61,4 @@ Thank you very much to our sponsors: {{< sponsor src="/img/logos/warp.svg" width="300" height="200" title="Visit our sponsor warp.dev" link="https://www.warp.dev/?utm_source=rclone&utm_medium=referral&utm_campaign=rclone_20231103">}} {{< sponsor src="/img/logos/sia.svg" width="200" height="200" title="Visit our sponsor sia" link="https://sia.tech">}} {{< sponsor src="/img/logos/route4me.svg" width="400" height="200" title="Visit our sponsor Route4Me" link="https://route4me.com/">}} +{{< sponsor src="/img/logos/rcloneview.svg" width="300" height="200" title="Visit our sponsor RcloneView" link="https://rcloneview.com/">}} diff --git a/docs/content/storj.md b/docs/content/storj.md index 667acba49..8c7046de4 100644 --- a/docs/content/storj.md +++ b/docs/content/storj.md @@ -1,6 +1,8 @@ --- title: "Storj" description: "Rclone docs for Storj" +aliases: + - tardigrade versionIntroduced: "v1.52" --- @@ -150,11 +152,11 @@ Access Grant. Enter a string value. Press Enter for the default (""). access_grant> your-access-grant-received-by-someone-else Remote config --------------------- -[remote] -type = storj -access_grant = your-access-grant-received-by-someone-else --------------------- +Configuration complete. +Options: +- type: storj +- access_grant: your-access-grant-received-by-someone-else +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -205,14 +207,14 @@ Encryption Passphrase. To access existing objects enter passphrase used for uplo Enter a string value. Press Enter for the default (""). passphrase> your-human-readable-encryption-passphrase Remote config --------------------- -[remote] -type = storj -satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 -api_key = your-api-key-for-your-storj-project -passphrase = your-human-readable-encryption-passphrase -access_grant = the-access-grant-generated-from-the-api-key-and-passphrase --------------------- +Configuration complete. +Options: +- type: storj +- satellite_address: 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us1.storj.io:7777 +- api_key: your-api-key-for-your-storj-project +- passphrase: your-human-readable-encryption-passphrase +- access_grant: the-access-grant-generated-from-the-api-key-and-passphrase +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/sugarsync.md b/docs/content/sugarsync.md index c3eaf2047..dc789db28 100644 --- a/docs/content/sugarsync.md +++ b/docs/content/sugarsync.md @@ -62,11 +62,11 @@ Remote config Username (email address)> nick@craig-wood.com Your Sugarsync password is only required during setup and will not be stored. password: --------------------- -[remote] -type = sugarsync -refresh_token = https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: sugarsync +- refresh_token: https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote diff --git a/docs/content/swift.md b/docs/content/swift.md index f15ba7e81..700263a3c 100644 --- a/docs/content/swift.md +++ b/docs/content/swift.md @@ -508,6 +508,48 @@ Properties: - Type: bool - Default: false +#### --swift-fetch-until-empty-page + +When paginating, always fetch unless we received an empty page. + +Consider using this option if rclone listings show fewer objects +than expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations +of the Swift API that do not implement pagination as expected. See +also "partial_page_fetch_threshold". + +Properties: + +- Config: fetch_until_empty_page +- Env Var: RCLONE_SWIFT_FETCH_UNTIL_EMPTY_PAGE +- Type: bool +- Default: false + +#### --swift-partial-page-fetch-threshold + +When paginating, fetch if the current page is within this percentage of the limit. + +Consider using this option if rclone listings show fewer objects +than expected, or if repeated syncs copy unchanged objects. + +It is safe to enable this, but rclone may make more API calls than +necessary. + +This is one of a pair of workarounds to handle implementations +of the Swift API that do not implement pagination as expected. See +also "fetch_until_empty_page". + +Properties: + +- Config: partial_page_fetch_threshold +- Env Var: RCLONE_SWIFT_PARTIAL_PAGE_FETCH_THRESHOLD +- Type: int +- Default: 0 + #### --swift-chunk-size Above this size files will be chunked. diff --git a/docs/content/tardigrade.md b/docs/content/tardigrade.md deleted file mode 100644 index bead13e37..000000000 --- a/docs/content/tardigrade.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: "Tardigrade" -description: "Rclone docs for Tardigrade" -versionIntroduced: "v1.52" ---- - -# {{< icon "fas fa-dove" >}} Tardigrade - -The Tardigrade backend has been renamed to be the [Storj backend](/storj/). -Old configuration files will continue to work. diff --git a/docs/content/union.md b/docs/content/union.md index 4d56022f3..f1fe536ab 100644 --- a/docs/content/union.md +++ b/docs/content/union.md @@ -68,11 +68,11 @@ Cache time of usage and free space (in seconds). This option is only useful when Enter a signed integer. Press Enter for the default ("120"). cache_time> Remote config --------------------- -[remote] -type = union -upstreams = remote1:dir1 remote2:dir2 remote3:dir3 --------------------- +Configuration complete. +Options: +- type: union +- upstreams: remote1:dir1 remote2:dir2 remote3:dir3 +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/webdav.md b/docs/content/webdav.md index c42fcd57b..548b3b9d4 100644 --- a/docs/content/webdav.md +++ b/docs/content/webdav.md @@ -72,15 +72,15 @@ password: Bearer token instead of user/pass (e.g. a Macaroon) bearer_token> Remote config --------------------- -[remote] -type = webdav -url = https://example.com/remote.php/webdav/ -vendor = nextcloud -user = user -pass = *** ENCRYPTED *** -bearer_token = --------------------- +Configuration complete. +Options: +- type: webdav +- url: https://example.com/remote.php/webdav/ +- vendor: nextcloud +- user: user +- pass: *** ENCRYPTED *** +- bearer_token: +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote diff --git a/docs/content/yandex.md b/docs/content/yandex.md index 8d0ab27bc..6c65b1adc 100644 --- a/docs/content/yandex.md +++ b/docs/content/yandex.md @@ -45,12 +45,13 @@ If your browser doesn't open automatically go to the following link: http://127. Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} --------------------- +Configuration complete. +Options: +- type: yandex +- client_id: +- client_secret: +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"OAuth","expiry":"2016-12-29T12:27:11.362788025Z"} +Keep this "remote" remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -209,6 +210,17 @@ Properties: - Type: Encoding - Default: Slash,Del,Ctl,InvalidUtf8,Dot +#### --yandex-spoof-ua + +Set the user agent to match an official version of the yandex disk client. May help with upload performance. + +Properties: + +- Config: spoof_ua +- Env Var: RCLONE_YANDEX_SPOOF_UA +- Type: bool +- Default: true + #### --yandex-description Description of the remote. diff --git a/docs/content/zoho.md b/docs/content/zoho.md index 9fd5377b5..e352f25d2 100644 --- a/docs/content/zoho.md +++ b/docs/content/zoho.md @@ -64,12 +64,12 @@ Choose a number from below, or type in your own value 1 / General \ "4u2869d2aa6fca04f4f2f896b6539243b85b1" Enter a Workspace ID> 1 --------------------- -[remote] -type = zoho -token = {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} -root_folder_id = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: zoho +- token: {"access_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","token_type":"Zoho-oauthtoken","refresh_token":"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx","expiry":"2020-10-12T00:54:52.370275223+02:00"} +- root_folder_id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +Keep this "remote" remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -224,6 +224,17 @@ Properties: - Type: string - Required: false +#### --zoho-upload-cutoff + +Cutoff for switching to large file upload api (>= 10 MiB). + +Properties: + +- Config: upload_cutoff +- Env Var: RCLONE_ZOHO_UPLOAD_CUTOFF +- Type: SizeSuffix +- Default: 10Mi + #### --zoho-encoding The encoding for the backend. diff --git a/docs/layouts/chrome/navbar.html b/docs/layouts/chrome/navbar.html index 6b2938c75..c6fa0db6f 100644 --- a/docs/layouts/chrome/navbar.html +++ b/docs/layouts/chrome/navbar.html @@ -75,6 +75,7 @@ HDFS (Hadoop Distributed Filesystem) HiDrive HTTP + iCloud Drive ImageKit Internet Archive Jottacloud diff --git a/docs/layouts/partials/version.html b/docs/layouts/partials/version.html index 334e2fab0..294e240e9 100644 --- a/docs/layouts/partials/version.html +++ b/docs/layouts/partials/version.html @@ -1 +1 @@ -v1.68.0 \ No newline at end of file +v1.69.0 \ No newline at end of file diff --git a/fs/accounting/accounting.go b/fs/accounting/accounting.go index d7d7e02dd..5a6be7df4 100644 --- a/fs/accounting/accounting.go +++ b/fs/accounting/accounting.go @@ -44,7 +44,9 @@ func Start(ctx context.Context) { // // We can't do this in an init() method as it uses fs.Config // and that isn't set up then. - fs.CountError = GlobalStats().Error + fs.CountError = func(ctx context.Context, err error) error { + return Stats(ctx).Error(err) + } } // Account limits and accounts for one transfer diff --git a/fs/accounting/accounting_unix.go b/fs/accounting/accounting_unix.go index 76c0a3509..bacb0498a 100644 --- a/fs/accounting/accounting_unix.go +++ b/fs/accounting/accounting_unix.go @@ -35,12 +35,13 @@ func (tb *tokenBucket) startSignalHandler() { tb.toggledOff = !tb.toggledOff tb.curr, tb.prev = tb.prev, tb.curr - s := "disabled" + s, limit := "disabled", "off" if !tb.curr._isOff() { s = "enabled" + limit = tb.currLimit.Bandwidth.String() } - fs.Logf(nil, "Bandwidth limit %s by user", s) + fs.Logf(nil, "Bandwidth limit %s by user (now %s)", s, limit) }() } }() diff --git a/fs/accounting/stats_groups_test.go b/fs/accounting/stats_groups_test.go index dda9dccef..8c02229e7 100644 --- a/fs/accounting/stats_groups_test.go +++ b/fs/accounting/stats_groups_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fstest/testy" "github.com/stretchr/testify/assert" @@ -206,6 +208,34 @@ func TestStatsGroupOperations(t *testing.T) { }) } +func TestCountError(t *testing.T) { + ctx := context.Background() + Start(ctx) + defer func() { + groups = newStatsGroups() + }() + t.Run("global stats", func(t *testing.T) { + GlobalStats().ResetCounters() + err := fs.CountError(ctx, fmt.Errorf("global err")) + assert.Equal(t, int64(1), GlobalStats().errors) + + assert.True(t, fserrors.IsCounted(err)) + }) + t.Run("group stats", func(t *testing.T) { + statGroupName := fmt.Sprintf("%s-error_group", t.Name()) + GlobalStats().ResetCounters() + stCtx := WithStatsGroup(ctx, statGroupName) + st := StatsGroup(stCtx, statGroupName) + + err := fs.CountError(stCtx, fmt.Errorf("group err")) + + assert.Equal(t, int64(0), GlobalStats().errors) + assert.Equal(t, int64(1), st.errors) + assert.True(t, fserrors.IsCounted(err)) + }) + +} + func percentDiff(start, end uint64) uint64 { return (start - end) * 100 / start } diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index a306096e8..a8b4bcd2d 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -41,7 +41,12 @@ type tokenBucket struct { // // Call with lock held func (bs *buckets) _isOff() bool { //nolint:unused // Don't include unused when running golangci-lint in case its on windows where this is not called - return bs[0] == nil + for i := range bs { + if bs[i] != nil { + return false + } + } + return true } // Disable the limits diff --git a/fs/cache/cache.go b/fs/cache/cache.go index ae86ac082..63afa94e8 100644 --- a/fs/cache/cache.go +++ b/fs/cache/cache.go @@ -4,6 +4,7 @@ package cache import ( "context" "runtime" + "strings" "sync" "github.com/rclone/rclone/fs" @@ -12,10 +13,11 @@ import ( ) var ( - once sync.Once // creation - c *cache.Cache - mu sync.Mutex // mutex to protect remap - remap = map[string]string{} // map user supplied names to canonical names + once sync.Once // creation + c *cache.Cache + mu sync.Mutex // mutex to protect remap + remap = map[string]string{} // map user supplied names to canonical names - [fsString]canonicalName + childParentMap = map[string]string{} // tracks a one-to-many relationship between parent dirs and their direct children files - [child]parent ) // Create the cache just once @@ -27,7 +29,7 @@ func createOnFirstUse() { c.SetExpireInterval(ci.FsCacheExpireInterval) c.SetFinalizer(func(value interface{}) { if s, ok := value.(fs.Shutdowner); ok { - _ = fs.CountError(s.Shutdown(context.Background())) + _ = fs.CountError(context.Background(), s.Shutdown(context.Background())) } }) }) @@ -57,6 +59,39 @@ func addMapping(fsString, canonicalName string) { mu.Unlock() } +// addChild tracks known file (child) to directory (parent) relationships. +// Note that the canonicalName of a child will always equal that of its parent, +// but not everything with an equal canonicalName is a child. +// It could be an alias or overridden version of a directory. +func addChild(child, parent string) { + if child == parent { + return + } + mu.Lock() + childParentMap[child] = parent + mu.Unlock() +} + +// returns true if name is definitely known to be a child (i.e. a file, not a dir). +// returns false if name is a dir or if we don't know. +func isChild(child string) bool { + mu.Lock() + _, found := childParentMap[child] + mu.Unlock() + return found +} + +// ensures that we return fs.ErrorIsFile when necessary +func getError(fsString string, err error) error { + if err != nil && err != fs.ErrorIsFile { + return err + } + if isChild(fsString) { + return fs.ErrorIsFile + } + return nil +} + // GetFn gets an fs.Fs named fsString either from the cache or creates // it afresh with the create function func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) { @@ -69,31 +104,39 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context created = ok return f, ok, err }) + f, ok := value.(fs.Fs) if err != nil && err != fs.ErrorIsFile { + if ok { + return f, err // for possible future uses of PutErr + } return nil, err } - f = value.(fs.Fs) // Check we stored the Fs at the canonical name if created { canonicalName := fs.ConfigString(f) if canonicalName != canonicalFsString { - // Note that if err == fs.ErrorIsFile at this moment - // then we can't rename the remote as it will have the - // wrong error status, we need to add a new one. - if err == nil { + if err == nil { // it's a dir fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", canonicalFsString, canonicalName) value, found := c.Rename(canonicalFsString, canonicalName) if found { f = value.(fs.Fs) } addMapping(canonicalFsString, canonicalName) - } else { - fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", canonicalFsString, canonicalName) - Put(canonicalName, f) + } else { // it's a file + // the fs we cache is always the file's parent, never the file, + // but we use the childParentMap to return the correct error status based on the fsString passed in. + fs.Debugf(nil, "fs cache: renaming child cache item %q to be canonical for parent %q", canonicalFsString, canonicalName) + value, found := c.Rename(canonicalFsString, canonicalName) // rename the file entry to parent + if found { + f = value.(fs.Fs) // if parent already exists, use it + } + Put(canonicalName, f) // force err == nil for the cache + addMapping(canonicalFsString, canonicalName) // note the fsString-canonicalName connection for future lookups + addChild(fsString, canonicalName) // note the file-directory connection for future lookups } } } - return f, err + return f, getError(fsString, err) // ensure fs.ErrorIsFile is returned when necessary } // Pin f into the cache until Unpin is called @@ -111,7 +154,6 @@ func PinUntilFinalized(f fs.Fs, x interface{}) { runtime.SetFinalizer(x, func(_ interface{}) { Unpin(f) }) - } // Unpin f from the cache @@ -174,6 +216,9 @@ func PutErr(fsString string, f fs.Fs, err error) { canonicalName := fs.ConfigString(f) c.PutErr(canonicalName, f, err) addMapping(fsString, canonicalName) + if err == fs.ErrorIsFile { + addChild(fsString, canonicalName) + } } // Put puts an fs.Fs named fsString into the cache @@ -186,6 +231,7 @@ func Put(fsString string, f fs.Fs) { // Returns number of entries deleted func ClearConfig(name string) (deleted int) { createOnFirstUse() + ClearMappingsPrefix(name) return c.DeletePrefix(name + ":") } @@ -193,6 +239,7 @@ func ClearConfig(name string) (deleted int) { func Clear() { createOnFirstUse() c.Clear() + ClearMappings() } // Entries returns the number of entries in the cache @@ -200,3 +247,39 @@ func Entries() int { createOnFirstUse() return c.Entries() } + +// ClearMappings removes everything from remap and childParentMap +func ClearMappings() { + mu.Lock() + defer mu.Unlock() + remap = map[string]string{} + childParentMap = map[string]string{} +} + +// ClearMappingsPrefix deletes all mappings to parents with given prefix +// +// Returns number of entries deleted +func ClearMappingsPrefix(prefix string) (deleted int) { + mu.Lock() + do := func(mapping map[string]string) { + for key, val := range mapping { + if !strings.HasPrefix(val, prefix) { + continue + } + delete(mapping, key) + deleted++ + } + } + do(remap) + do(childParentMap) + mu.Unlock() + return deleted +} + +// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache +// +// Each entry is counted only once, regardless of entry.pinCount +func EntriesWithPinCount() (pinned, unpinned int) { + createOnFirstUse() + return c.EntriesWithPinCount() +} diff --git a/fs/cache/cache_test.go b/fs/cache/cache_test.go index acddac80c..d9d54fa2e 100644 --- a/fs/cache/cache_test.go +++ b/fs/cache/cache_test.go @@ -24,7 +24,7 @@ func mockNewFs(t *testing.T) func(ctx context.Context, path string) (fs.Fs, erro switch path { case "mock:/": return mockfs.NewFs(ctx, "mock", "/", nil) - case "mock:/file.txt", "mock:file.txt": + case "mock:/file.txt", "mock:file.txt", "mock:/file2.txt", "mock:file2.txt": fMock, err := mockfs.NewFs(ctx, "mock", "/", nil) require.NoError(t, err) return fMock, fs.ErrorIsFile @@ -55,6 +55,7 @@ func TestGet(t *testing.T) { } func TestGetFile(t *testing.T) { + defer ClearMappings() create := mockNewFs(t) assert.Equal(t, 0, Entries()) @@ -63,7 +64,7 @@ func TestGetFile(t *testing.T) { require.Equal(t, fs.ErrorIsFile, err) require.NotNil(t, f) - assert.Equal(t, 2, Entries()) + assert.Equal(t, 1, Entries()) f2, err := GetFn(context.Background(), "mock:/file.txt", create) require.Equal(t, fs.ErrorIsFile, err) @@ -71,7 +72,7 @@ func TestGetFile(t *testing.T) { assert.Equal(t, f, f2) - // check parent is there too + // check it is also found when referred to by parent name f2, err = GetFn(context.Background(), "mock:/", create) require.Nil(t, err) require.NotNil(t, f2) @@ -80,6 +81,7 @@ func TestGetFile(t *testing.T) { } func TestGetFile2(t *testing.T) { + defer ClearMappings() create := mockNewFs(t) assert.Equal(t, 0, Entries()) @@ -88,7 +90,7 @@ func TestGetFile2(t *testing.T) { require.Equal(t, fs.ErrorIsFile, err) require.NotNil(t, f) - assert.Equal(t, 2, Entries()) + assert.Equal(t, 1, Entries()) f2, err := GetFn(context.Background(), "mock:file.txt", create) require.Equal(t, fs.ErrorIsFile, err) @@ -96,7 +98,7 @@ func TestGetFile2(t *testing.T) { assert.Equal(t, f, f2) - // check parent is there too + // check it is also found when referred to by parent name f2, err = GetFn(context.Background(), "mock:/", create) require.Nil(t, err) require.NotNil(t, f2) @@ -124,22 +126,22 @@ func TestPutErr(t *testing.T) { assert.Equal(t, 0, Entries()) - PutErr("mock:file.txt", f, fs.ErrorIsFile) + PutErr("mock:/", f, fs.ErrorNotFoundInConfigFile) assert.Equal(t, 1, Entries()) - fNew, err := GetFn(context.Background(), "mock:file.txt", create) - require.Equal(t, fs.ErrorIsFile, err) + fNew, err := GetFn(context.Background(), "mock:/", create) + require.Equal(t, fs.ErrorNotFoundInConfigFile, err) require.Equal(t, f, fNew) assert.Equal(t, 1, Entries()) // Check canonicalisation - PutErr("mock:/file.txt", f, fs.ErrorIsFile) + PutErr("mock:/file.txt", f, fs.ErrorNotFoundInConfigFile) fNew, err = GetFn(context.Background(), "mock:/file.txt", create) - require.Equal(t, fs.ErrorIsFile, err) + require.Equal(t, fs.ErrorNotFoundInConfigFile, err) require.Equal(t, f, fNew) assert.Equal(t, 1, Entries()) @@ -190,6 +192,75 @@ func TestPin(t *testing.T) { Unpin(f2) } +func TestPinFile(t *testing.T) { + defer ClearMappings() + create := mockNewFs(t) + + // Test pinning and unpinning nonexistent + f, err := mockfs.NewFs(context.Background(), "mock", "/file.txt", nil) + require.NoError(t, err) + Pin(f) + Unpin(f) + + // Now test pinning an existing + f2, err := GetFn(context.Background(), "mock:/file.txt", create) + require.Equal(t, fs.ErrorIsFile, err) + assert.Equal(t, 1, len(childParentMap)) + + Pin(f2) + assert.Equal(t, 1, Entries()) + pinned, unpinned := EntriesWithPinCount() + assert.Equal(t, 1, pinned) + assert.Equal(t, 0, unpinned) + + Unpin(f2) + assert.Equal(t, 1, Entries()) + pinned, unpinned = EntriesWithPinCount() + assert.Equal(t, 0, pinned) + assert.Equal(t, 1, unpinned) + + // try a different child of the same parent, and parent + // should not add additional cache items + called = 0 // this one does create() because we haven't seen it before and don't yet know it's a file + f3, err := GetFn(context.Background(), "mock:/file2.txt", create) + assert.Equal(t, fs.ErrorIsFile, err) + assert.Equal(t, 1, Entries()) + assert.Equal(t, 2, len(childParentMap)) + + parent, err := GetFn(context.Background(), "mock:/", create) + assert.NoError(t, err) + assert.Equal(t, 1, Entries()) + assert.Equal(t, 2, len(childParentMap)) + + Pin(f3) + assert.Equal(t, 1, Entries()) + pinned, unpinned = EntriesWithPinCount() + assert.Equal(t, 1, pinned) + assert.Equal(t, 0, unpinned) + + Unpin(f3) + assert.Equal(t, 1, Entries()) + pinned, unpinned = EntriesWithPinCount() + assert.Equal(t, 0, pinned) + assert.Equal(t, 1, unpinned) + + Pin(parent) + assert.Equal(t, 1, Entries()) + pinned, unpinned = EntriesWithPinCount() + assert.Equal(t, 1, pinned) + assert.Equal(t, 0, unpinned) + + Unpin(parent) + assert.Equal(t, 1, Entries()) + pinned, unpinned = EntriesWithPinCount() + assert.Equal(t, 0, pinned) + assert.Equal(t, 1, unpinned) + + // all 3 should have equal configstrings + assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(f3)) + assert.Equal(t, fs.ConfigString(f2), fs.ConfigString(parent)) +} + func TestClearConfig(t *testing.T) { create := mockNewFs(t) @@ -198,9 +269,9 @@ func TestClearConfig(t *testing.T) { _, err := GetFn(context.Background(), "mock:/file.txt", create) require.Equal(t, fs.ErrorIsFile, err) - assert.Equal(t, 2, Entries()) // file + parent + assert.Equal(t, 1, Entries()) - assert.Equal(t, 2, ClearConfig("mock")) + assert.Equal(t, 1, ClearConfig("mock")) assert.Equal(t, 0, Entries()) } diff --git a/fs/config.go b/fs/config.go index caf242fdf..1b11da821 100644 --- a/fs/config.go +++ b/fs/config.go @@ -41,7 +41,7 @@ var ( // // This is a function pointer to decouple the config // implementation from the fs - CountError = func(err error) error { return err } + CountError = func(ctx context.Context, err error) error { return err } // ConfigProvider is the config key used for provider options ConfigProvider = "provider" diff --git a/fs/config/config.go b/fs/config/config.go index be0a2a914..0e078e131 100644 --- a/fs/config/config.go +++ b/fs/config/config.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "log" mathrand "math/rand" "os" "path/filepath" @@ -372,7 +371,7 @@ func LoadedData() Storage { } dataLoaded = true } else { - log.Fatalf("Failed to load config file %q: %v", configPath, err) + fs.Fatalf(nil, "Failed to load config file %q: %v", configPath, err) } } return data diff --git a/fs/config/config_read_password.go b/fs/config/config_read_password.go index 8f3c79933..688126fb1 100644 --- a/fs/config/config_read_password.go +++ b/fs/config/config_read_password.go @@ -7,9 +7,9 @@ package config import ( "fmt" - "log" "os" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/terminal" ) @@ -22,7 +22,7 @@ func ReadPassword() string { line, err := terminal.ReadPassword(stdin) _, _ = fmt.Fprintln(os.Stderr) if err != nil { - log.Fatalf("Failed to read password: %v", err) + fs.Fatalf(nil, "Failed to read password: %v", err) } return string(line) } diff --git a/fs/config/config_test.go b/fs/config/config_test.go index 612105a6c..7c4a503bf 100644 --- a/fs/config/config_test.go +++ b/fs/config/config_test.go @@ -10,6 +10,10 @@ import ( "github.com/stretchr/testify/assert" ) +func init() { + configfile.Install() +} + func TestConfigLoad(t *testing.T) { oldConfigPath := config.GetConfigPath() assert.NoError(t, config.SetConfigPath("./testdata/plain.conf")) @@ -17,7 +21,6 @@ func TestConfigLoad(t *testing.T) { assert.NoError(t, config.SetConfigPath(oldConfigPath)) }() config.ClearConfigPassword() - configfile.Install() sections := config.Data().GetSectionList() var expect = []string{"RCLONE_ENCRYPT_V0", "nounc", "unc"} assert.Equal(t, expect, sections) diff --git a/fs/config/configfile/configfile.go b/fs/config/configfile/configfile.go index 659df705d..c6a09fe50 100644 --- a/fs/config/configfile/configfile.go +++ b/fs/config/configfile/configfile.go @@ -9,10 +9,10 @@ import ( "strings" "sync" - "github.com/Unknwon/goconfig" //nolint:misspell // Don't include misspell when running golangci-lint "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/lib/file" + "github.com/unknwon/goconfig" //nolint:misspell // Don't include misspell when running golangci-lint ) // Install installs the config file handler diff --git a/fs/config/configfile/configfile_other.go b/fs/config/configfile/configfile_other.go index ef509430c..c70a51603 100644 --- a/fs/config/configfile/configfile_other.go +++ b/fs/config/configfile/configfile_other.go @@ -5,6 +5,6 @@ package configfile -// attemptCopyGroups tries to keep the group the same, which only makes sense +// attemptCopyGroup tries to keep the group the same, which only makes sense // for system with user-group-world permission model. func attemptCopyGroup(fromPath, toPath string) {} diff --git a/fs/config/configfile/configfile_unix.go b/fs/config/configfile/configfile_unix.go index 542e24621..d5e8ee3dd 100644 --- a/fs/config/configfile/configfile_unix.go +++ b/fs/config/configfile/configfile_unix.go @@ -14,7 +14,7 @@ import ( "github.com/rclone/rclone/fs" ) -// attemptCopyGroups tries to keep the group the same. User will be the one +// attemptCopyGroup tries to keep the group the same. User will be the one // who is currently running this process. func attemptCopyGroup(fromPath, toPath string) { info, err := os.Stat(fromPath) diff --git a/fs/config/configflags/configflags.go b/fs/config/configflags/configflags.go index d3beb9a3e..ab8f10286 100644 --- a/fs/config/configflags/configflags.go +++ b/fs/config/configflags/configflags.go @@ -5,7 +5,6 @@ package configflags // Options set by command line flags import ( "context" - "log" "net" "os" "strconv" @@ -68,7 +67,7 @@ func ParseHeaders(headers []string) []*fs.HTTPOption { for _, header := range headers { parts := strings.SplitN(header, ":", 2) if len(parts) == 1 { - log.Fatalf("Failed to parse '%s' as an HTTP header. Expecting a string like: 'Content-Encoding: gzip'", header) + fs.Fatalf(nil, "Failed to parse '%s' as an HTTP header. Expecting a string like: 'Content-Encoding: gzip'", header) } option := &fs.HTTPOption{ Key: strings.TrimSpace(parts[0]), @@ -101,7 +100,7 @@ func SetFlags(ci *fs.ConfigInfo) { // Process -q flag if quiet { if verbose > 0 { - log.Fatalf("Can't set -v and -q") + fs.Fatalf(nil, "Can't set -v and -q") } ci.LogLevel = fs.LogLevelError } @@ -110,10 +109,10 @@ func SetFlags(ci *fs.ConfigInfo) { logLevelFlag := pflag.Lookup("log-level") if logLevelFlag != nil && logLevelFlag.Changed { if verbose > 0 { - log.Fatalf("Can't set -v and --log-level") + fs.Fatalf(nil, "Can't set -v and --log-level") } if quiet { - log.Fatalf("Can't set -q and --log-level") + fs.Fatalf(nil, "Can't set -q and --log-level") } } @@ -121,7 +120,7 @@ func SetFlags(ci *fs.ConfigInfo) { switch { case deleteBefore && (deleteDuring || deleteAfter), deleteDuring && deleteAfter: - log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`) + fs.Fatalf(nil, `Only one of --delete-before, --delete-during or --delete-after can be used.`) case deleteBefore: ci.DeleteMode = fs.DeleteModeBefore case deleteDuring: @@ -136,10 +135,10 @@ func SetFlags(ci *fs.ConfigInfo) { if bindAddr != "" { addrs, err := net.LookupIP(bindAddr) if err != nil { - log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err) + fs.Fatalf(nil, "--bind: Failed to parse %q as IP address: %v", bindAddr, err) } if len(addrs) != 1 { - log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs)) + fs.Fatalf(nil, "--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs)) } ci.BindAddr = addrs[0] } @@ -147,7 +146,7 @@ func SetFlags(ci *fs.ConfigInfo) { // Process --disable if disableFeatures != "" { if disableFeatures == "help" { - log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", ")) + fs.Fatalf(nil, "Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", ")) } ci.DisableFeatures = strings.Split(disableFeatures, ",") } @@ -169,7 +168,7 @@ func SetFlags(ci *fs.ConfigInfo) { for _, kv := range metadataSet { equal := strings.IndexRune(kv, '=') if equal < 0 { - log.Fatalf("Failed to parse '%s' as metadata key=value.", kv) + fs.Fatalf(nil, "Failed to parse '%s' as metadata key=value.", kv) } ci.MetadataSet[strings.ToLower(kv[:equal])] = kv[equal+1:] } @@ -181,23 +180,23 @@ func SetFlags(ci *fs.ConfigInfo) { if value, ok := parseDSCP(dscp); ok { ci.TrafficClass = value << 2 } else { - log.Fatalf("--dscp: Invalid DSCP name: %v", dscp) + fs.Fatalf(nil, "--dscp: Invalid DSCP name: %v", dscp) } } // Process --config path if err := config.SetConfigPath(configPath); err != nil { - log.Fatalf("--config: Failed to set %q as config path: %v", configPath, err) + fs.Fatalf(nil, "--config: Failed to set %q as config path: %v", configPath, err) } // Process --cache-dir path if err := config.SetCacheDir(cacheDir); err != nil { - log.Fatalf("--cache-dir: Failed to set %q as cache dir: %v", cacheDir, err) + fs.Fatalf(nil, "--cache-dir: Failed to set %q as cache dir: %v", cacheDir, err) } // Process --temp-dir path if err := config.SetTempDir(tempDir); err != nil { - log.Fatalf("--temp-dir: Failed to set %q as temp dir: %v", tempDir, err) + fs.Fatalf(nil, "--temp-dir: Failed to set %q as temp dir: %v", tempDir, err) } // Process --multi-thread-streams - set whether multi-thread-streams was set @@ -206,11 +205,11 @@ func SetFlags(ci *fs.ConfigInfo) { // Reload any changes if err := ci.Reload(context.Background()); err != nil { - log.Fatalf("Failed to reload config changes: %v", err) + fs.Fatalf(nil, "Failed to reload config changes: %v", err) } } -// parseHeaders converts DSCP names to value +// parseDSCP converts DSCP names to value func parseDSCP(dscp string) (uint8, bool) { if s, err := strconv.ParseUint(dscp, 10, 6); err == nil { return uint8(s), true diff --git a/fs/config/configstruct/configstruct.go b/fs/config/configstruct/configstruct.go index 4d8c941f9..78322af9a 100644 --- a/fs/config/configstruct/configstruct.go +++ b/fs/config/configstruct/configstruct.go @@ -2,7 +2,7 @@ package configstruct import ( - "encoding/json" + "encoding/csv" "errors" "fmt" "reflect" @@ -31,7 +31,7 @@ func camelToSnake(in string) string { // // Builtin types are expected to be encoding as their natural // stringificatons as produced by fmt.Sprint except for []string which -// is expected to be encoded as JSON with empty array encoded as "". +// is expected to be encoded a a CSV with empty array encoded as "". // // Any other types are expected to be encoded by their String() // methods and decoded by their `Set(s string) error` methods. @@ -58,14 +58,18 @@ func StringToInterface(def interface{}, in string) (newValue interface{}, err er case time.Duration: newValue, err = time.ParseDuration(in) case []string: - // JSON decode arrays of strings - if in != "" { - var out []string - err = json.Unmarshal([]byte(in), &out) - newValue = out - } else { - // Empty string we will treat as empty array + // CSV decode arrays of strings - ideally we would use + // fs.CommaSepList here but we can't as it would cause + // a circular import. + if len(in) == 0 { newValue = []string{} + } else { + r := csv.NewReader(strings.NewReader(in)) + newValue, err = r.Read() + switch _err := err.(type) { + case *csv.ParseError: + err = _err.Err // remove line numbers from the error message + } } default: // Try using a Set method diff --git a/fs/config/configstruct/configstruct_test.go b/fs/config/configstruct/configstruct_test.go index a31d70222..fa30684be 100644 --- a/fs/config/configstruct/configstruct_test.go +++ b/fs/config/configstruct/configstruct_test.go @@ -204,9 +204,11 @@ func TestStringToInterface(t *testing.T) { {"1m1s", fs.Duration(0), fs.Duration(61 * time.Second), ""}, {"1potato", fs.Duration(0), nil, `parsing "1potato" as fs.Duration failed: parsing time "1potato" as "2006-01-02": cannot parse "1potato" as "2006"`}, {``, []string{}, []string{}, ""}, - {`[]`, []string(nil), []string{}, ""}, - {`["hello"]`, []string{}, []string{"hello"}, ""}, - {`["hello","world!"]`, []string(nil), []string{"hello", "world!"}, ""}, + {`""`, []string(nil), []string{""}, ""}, + {`hello`, []string{}, []string{"hello"}, ""}, + {`"hello"`, []string{}, []string{"hello"}, ""}, + {`hello,world!`, []string(nil), []string{"hello", "world!"}, ""}, + {`"hello","world!"`, []string(nil), []string{"hello", "world!"}, ""}, {"1s", time.Duration(0), time.Second, ""}, {"1m1s", time.Duration(0), 61 * time.Second, ""}, {"1potato", time.Duration(0), nil, `parsing "1potato" as time.Duration failed: time: unknown unit "potato" in duration "1potato"`}, diff --git a/fs/config/crypt.go b/fs/config/crypt.go index 8fc476f82..4a68d4159 100644 --- a/fs/config/crypt.go +++ b/fs/config/crypt.go @@ -41,6 +41,11 @@ var ( PassConfigKeyForDaemonization = false ) +// IsEncrypted returns true if the config file is encrypted +func IsEncrypted() bool { + return len(configKey) > 0 +} + // Decrypt will automatically decrypt a reader func Decrypt(b io.ReadSeeker) (io.Reader, error) { ctx := context.Background() @@ -78,37 +83,19 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) { } if len(configKey) == 0 { - if len(ci.PasswordCommand) != 0 { - var stdout bytes.Buffer - var stderr bytes.Buffer - - cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...) - - cmd.Stdout = &stdout - cmd.Stderr = &stderr - cmd.Stdin = os.Stdin - - if err := cmd.Run(); err != nil { - // One does not always get the stderr returned in the wrapped error. - fs.Errorf(nil, "Using --password-command returned: %v", err) - if ers := strings.TrimSpace(stderr.String()); ers != "" { - fs.Errorf(nil, "--password-command stderr: %s", ers) - } - return nil, fmt.Errorf("password command failed: %w", err) + pass, err := GetPasswordCommand(ctx) + if err != nil { + return nil, err + } + if pass != "" { + usingPasswordCommand = true + err = SetConfigPassword(pass) + if err != nil { + return nil, fmt.Errorf("incorrect password: %w", err) } - if pass := strings.Trim(stdout.String(), "\r\n"); pass != "" { - err := SetConfigPassword(pass) - if err != nil { - return nil, fmt.Errorf("incorrect password: %w", err) - } - } else { - return nil, errors.New("password-command returned empty string") - } - if len(configKey) == 0 { return nil, errors.New("unable to decrypt configuration: incorrect password") } - usingPasswordCommand = true } else { usingPasswordCommand = false @@ -183,6 +170,40 @@ func Decrypt(b io.ReadSeeker) (io.Reader, error) { return bytes.NewReader(out), nil } +// GetPasswordCommand gets the password using the --password-command setting +// +// If the the --password-command flag was not in use it returns "", nil +func GetPasswordCommand(ctx context.Context) (pass string, err error) { + ci := fs.GetConfig(ctx) + if len(ci.PasswordCommand) == 0 { + return "", nil + } + + var stdout bytes.Buffer + var stderr bytes.Buffer + + cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...) + + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmd.Stdin = os.Stdin + + err = cmd.Run() + if err != nil { + // One does not always get the stderr returned in the wrapped error. + fs.Errorf(nil, "Using --password-command returned: %v", err) + if ers := strings.TrimSpace(stderr.String()); ers != "" { + fs.Errorf(nil, "--password-command stderr: %s", ers) + } + return pass, fmt.Errorf("password command failed: %w", err) + } + pass = strings.Trim(stdout.String(), "\r\n") + if pass == "" { + return pass, errors.New("--password-command returned empty string") + } + return pass, nil +} + // Encrypt the config file func Encrypt(src io.Reader, dst io.Writer) error { if len(configKey) == 0 { @@ -294,10 +315,46 @@ func ClearConfigPassword() { // changeConfigPassword will query the user twice // for a password. If the same password is entered // twice the key is updated. +// +// This will use --password-command if configured to read the password. func changeConfigPassword() { - err := SetConfigPassword(ChangePassword("NEW configuration")) + // Set RCLONE_PASSWORD_CHANGE to "1" when calling the --password-command tool + _ = os.Setenv("RCLONE_PASSWORD_CHANGE", "1") + defer func() { + _ = os.Unsetenv("RCLONE_PASSWORD_CHANGE") + }() + pass, err := GetPasswordCommand(context.Background()) + if err != nil { + fmt.Printf("Failed to read new password with --password-command: %v\n", err) + return + } + if pass == "" { + pass = ChangePassword("NEW configuration") + } else { + fmt.Printf("Read password using --password-command\n") + } + err = SetConfigPassword(pass) if err != nil { fmt.Printf("Failed to set config password: %v\n", err) return } } + +// ChangeConfigPasswordAndSave will query the user twice +// for a password. If the same password is entered +// twice the key is updated. +// +// This will use --password-command if configured to read the password. +// +// It will then save the config +func ChangeConfigPasswordAndSave() { + changeConfigPassword() + SaveConfig() +} + +// RemoveConfigPasswordAndSave will clear the config password and save +// the unencrypted config file. +func RemoveConfigPasswordAndSave() { + configKey = nil + SaveConfig() +} diff --git a/fs/config/crypt_internal_test.go b/fs/config/crypt_internal_test.go index 05dc71e0f..2d19643c5 100644 --- a/fs/config/crypt_internal_test.go +++ b/fs/config/crypt_internal_test.go @@ -1,8 +1,12 @@ package config import ( + "context" + "os" + "path/filepath" "testing" + "github.com/rclone/rclone/fs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -46,3 +50,57 @@ func TestPassword(t *testing.T) { hashedKeyCompare(t, "abcdef", "ABCDEF", false) } + +func TestChangeConfigPassword(t *testing.T) { + ci := fs.GetConfig(context.Background()) + + var err error + oldConfigPath := GetConfigPath() + assert.NoError(t, SetConfigPath("./testdata/encrypted.conf")) + defer func() { + assert.NoError(t, SetConfigPath(oldConfigPath)) + ClearConfigPassword() + ci.PasswordCommand = nil + }() + + // Get rid of any config password + ClearConfigPassword() + + // Return the password, checking the state of the environment variable + checkCode := ` +package main + +import ( + "fmt" + "os" + "log" +) + +func main() { + v := os.Getenv("RCLONE_PASSWORD_CHANGE") + if v == "" { + log.Fatal("Env var not found") + } else if v != "1" { + log.Fatal("Env var wrong value") + } else { + fmt.Println("asdf") + } +} +` + dir := t.TempDir() + code := filepath.Join(dir, "file.go") + require.NoError(t, os.WriteFile(code, []byte(checkCode), 0777)) + + // Set correct password using --password-command + ci.PasswordCommand = fs.SpaceSepList{"go", "run", code} + changeConfigPassword() + err = Data().Load() + require.NoError(t, err) + sections := Data().GetSectionList() + var expect = []string{"nounc", "unc"} + assert.Equal(t, expect, sections) + + keys := Data().GetKeyList("nounc") + expect = []string{"type", "nounc"} + assert.Equal(t, expect, keys) +} diff --git a/fs/config/crypt_test.go b/fs/config/crypt_test.go index 04f54864d..73c3bf6f6 100644 --- a/fs/config/crypt_test.go +++ b/fs/config/crypt_test.go @@ -6,6 +6,8 @@ package config_test import ( "context" + "os" + "path/filepath" "testing" "github.com/rclone/rclone/fs" @@ -24,8 +26,10 @@ func TestConfigLoadEncrypted(t *testing.T) { }() // Set correct password + assert.False(t, config.IsEncrypted()) err = config.SetConfigPassword("asdf") require.NoError(t, err) + assert.True(t, config.IsEncrypted()) err = config.Data().Load() require.NoError(t, err) sections := config.Data().GetSectionList() @@ -113,3 +117,56 @@ func TestConfigLoadEncryptedFailures(t *testing.T) { err = config.Data().Load() assert.Equal(t, config.ErrorConfigFileNotFound, err) } + +func TestGetPasswordCommand(t *testing.T) { + ctx, ci := fs.AddConfig(context.Background()) + + // Not configured + ci.PasswordCommand = fs.SpaceSepList{} + pass, err := config.GetPasswordCommand(ctx) + require.NoError(t, err) + assert.Equal(t, "", pass) + + // With password - happy path + ci.PasswordCommand = fs.SpaceSepList{"echo", "asdf"} + pass, err = config.GetPasswordCommand(ctx) + require.NoError(t, err) + assert.Equal(t, "asdf", pass) + + // Empty password returned + ci.PasswordCommand = fs.SpaceSepList{"echo", ""} + _, err = config.GetPasswordCommand(ctx) + assert.ErrorContains(t, err, "returned empty string") + + // Error when running command + ci.PasswordCommand = fs.SpaceSepList{"XXX non-existent command XXX", ""} + _, err = config.GetPasswordCommand(ctx) + assert.ErrorContains(t, err, "not found") + + // Check the state of the environment variable in --password-command + checkCode := ` +package main + +import ( + "fmt" + "os" +) + +func main() { + if _, found := os.LookupEnv("RCLONE_PASSWORD_CHANGE"); found { + fmt.Println("Env var set") + } else { + fmt.Println("OK") + } +} +` + dir := t.TempDir() + code := filepath.Join(dir, "file.go") + require.NoError(t, os.WriteFile(code, []byte(checkCode), 0777)) + + // Check the environment variable unset when called directly + ci.PasswordCommand = fs.SpaceSepList{"go", "run", code} + pass, err = config.GetPasswordCommand(ctx) + require.NoError(t, err) + assert.Equal(t, "OK", pass) +} diff --git a/fs/config/flags/flags.go b/fs/config/flags/flags.go index db76f5d71..953a3d0fd 100644 --- a/fs/config/flags/flags.go +++ b/fs/config/flags/flags.go @@ -3,7 +3,6 @@ package flags import ( - "log" "os" "regexp" "strings" @@ -74,7 +73,7 @@ func (gs *Groups) Include(groupsString string) *Groups { for _, groupName := range strings.Split(groupsString, ",") { _, ok := All.ByName[groupName] if !ok { - log.Fatalf("Couldn't find group %q in command annotation", groupName) + fs.Fatalf(nil, "Couldn't find group %q in command annotation", groupName) } want[groupName] = true } @@ -124,6 +123,7 @@ func init() { All.NewGroup("Logging", "Flags for logging and statistics") All.NewGroup("Metadata", "Flags to control metadata") All.NewGroup("RC", "Flags to control the Remote Control API") + All.NewGroup("Metrics", "Flags to control the Metrics HTTP endpoint.") } // installFlag constructs a name from the flag passed in and @@ -137,18 +137,38 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) { // Find flag flag := flags.Lookup(name) if flag == nil { - log.Fatalf("Couldn't find flag --%q", name) + fs.Fatalf(nil, "Couldn't find flag --%q", name) } // Read default from environment if possible envKey := fs.OptionToEnv(name) if envValue, envFound := os.LookupEnv(envKey); envFound { - err := flags.Set(name, envValue) - if err != nil { - log.Fatalf("Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err) + isStringArray := false + opt, isOption := flag.Value.(*fs.Option) + if isOption { + _, isStringArray = opt.Default.([]string) + } + if isStringArray { + // Treat stringArray differently, treating the environment variable as a CSV array + var list fs.CommaSepList + err := list.Set(envValue) + if err != nil { + fs.Fatalf(nil, "Invalid value when setting stringArray --%s from environment variable %s=%q: %v", name, envKey, envValue, err) + } + // Set both the Value (so items on the command line get added) and DefValue so the help is correct + opt.Value = ([]string)(list) + flag.DefValue = list.String() + for _, v := range list { + fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, v, envKey, envValue) + } + } else { + err := flags.Set(name, envValue) + if err != nil { + fs.Fatalf(nil, "Invalid value when setting --%s from environment variable %s=%q: %v", name, envKey, envValue, err) + } + fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue) + flag.DefValue = envValue } - fs.Debugf(nil, "Setting --%s %q from environment variable %s=%q", name, flag.Value, envKey, envValue) - flag.DefValue = envValue } // Add flag to Group if it is a global flag @@ -159,7 +179,7 @@ func installFlag(flags *pflag.FlagSet, name string, groupsString string) { } group, ok := All.ByName[groupName] if !ok { - log.Fatalf("Couldn't find group %q for flag --%s", groupName, name) + fs.Fatalf(nil, "Couldn't find group %q for flag --%s", groupName, name) } group.Add(flag) } diff --git a/fs/config/obscure/obscure.go b/fs/config/obscure/obscure.go index 17aae165b..713e4d248 100644 --- a/fs/config/obscure/obscure.go +++ b/fs/config/obscure/obscure.go @@ -9,7 +9,9 @@ import ( "errors" "fmt" "io" - "log" + "math" + + "github.com/rclone/rclone/fs" ) // crypt internals @@ -47,6 +49,9 @@ func crypt(out, in, iv []byte) error { // This is done by encrypting with AES-CTR func Obscure(x string) (string, error) { plaintext := []byte(x) + if math.MaxInt32-aes.BlockSize < len(plaintext) { + return "", fmt.Errorf("value too large") + } ciphertext := make([]byte, aes.BlockSize+len(plaintext)) iv := ciphertext[:aes.BlockSize] if _, err := io.ReadFull(cryptRand, iv); err != nil { @@ -62,7 +67,7 @@ func Obscure(x string) (string, error) { func MustObscure(x string) string { out, err := Obscure(x) if err != nil { - log.Fatalf("Obscure failed: %v", err) + fs.Fatalf(nil, "Obscure failed: %v", err) } return out } @@ -88,7 +93,7 @@ func Reveal(x string) (string, error) { func MustReveal(x string) string { out, err := Reveal(x) if err != nil { - log.Fatalf("Reveal failed: %v", err) + fs.Fatalf(nil, "Reveal failed: %v", err) } return out } diff --git a/fs/config/ui.go b/fs/config/ui.go index ff8acda0f..fcee469b1 100644 --- a/fs/config/ui.go +++ b/fs/config/ui.go @@ -7,7 +7,6 @@ import ( "context" "errors" "fmt" - "log" "os" "sort" "strconv" @@ -29,7 +28,7 @@ var ReadLine = func() string { buf := bufio.NewReader(os.Stdin) line, err := buf.ReadString('\n') if err != nil { - log.Fatalf("Failed to read line: %v", err) + fs.Fatalf(nil, "Failed to read line: %v", err) } return strings.TrimSpace(line) } @@ -233,7 +232,7 @@ func ChoosePassword(defaultValue string, required bool) string { bits := ChooseNumber("Bits", 64, 1024) password, err = Password(bits) if err != nil { - log.Fatalf("Failed to make password: %v", err) + fs.Fatalf(nil, "Failed to make password: %v", err) } fmt.Printf("Your password is: %s\n", password) fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " + @@ -297,7 +296,7 @@ func ChooseRemote() string { func mustFindByName(name string) *fs.RegInfo { fsType := GetValue(name, "type") if fsType == "" { - log.Fatalf("Couldn't find type of fs for %q", name) + fs.Fatalf(nil, "Couldn't find type of fs for %q", name) } return fs.MustFind(fsType) } @@ -654,7 +653,7 @@ func ShowConfigLocation() { func ShowConfig() { str, err := LoadedData().Serialize() if err != nil { - log.Fatalf("Failed to serialize config: %v", err) + fs.Fatalf(nil, "Failed to serialize config: %v", err) } if str == "" { str = "; empty config\n" @@ -797,13 +796,11 @@ func SetPassword() { what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"} switch i := Command(what); i { case 'c': - changeConfigPassword() - SaveConfig() + ChangeConfigPasswordAndSave() fmt.Println("Password changed") continue case 'u': - configKey = nil - SaveConfig() + RemoveConfigPasswordAndSave() continue case 'q': return @@ -815,8 +812,7 @@ func SetPassword() { what := []string{"aAdd Password", "qQuit to main menu"} switch i := Command(what); i { case 'a': - changeConfigPassword() - SaveConfig() + ChangeConfigPasswordAndSave() fmt.Println("Password set") continue case 'q': diff --git a/fs/filter/filter.go b/fs/filter/filter.go index 61c1a2556..ebaa68119 100644 --- a/fs/filter/filter.go +++ b/fs/filter/filter.go @@ -5,7 +5,6 @@ import ( "context" "errors" "fmt" - "log" "path" "strings" "time" @@ -190,7 +189,7 @@ func NewFilter(opt *Options) (f *Filter, err error) { if f.Opt.MaxAge.IsSet() { f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge)) if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) { - log.Fatalf("filter: --min-age %q can't be larger than --max-age %q", opt.MinAge, opt.MaxAge) + fs.Fatalf(nil, "filter: --min-age %q can't be larger than --max-age %q", opt.MinAge, opt.MaxAge) } fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom) } diff --git a/fs/fshttp/http.go b/fs/fshttp/http.go index 5a3fea073..9d405df1d 100644 --- a/fs/fshttp/http.go +++ b/fs/fshttp/http.go @@ -6,7 +6,6 @@ import ( "context" "crypto/tls" "crypto/x509" - "log" "net" "net/http" "net/http/cookiejar" @@ -71,17 +70,17 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht // Load client certs if ci.ClientCert != "" || ci.ClientKey != "" { if ci.ClientCert == "" || ci.ClientKey == "" { - log.Fatalf("Both --client-cert and --client-key must be set") + fs.Fatalf(nil, "Both --client-cert and --client-key must be set") } cert, err := tls.LoadX509KeyPair(ci.ClientCert, ci.ClientKey) if err != nil { - log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err) + fs.Fatalf(nil, "Failed to load --client-cert/--client-key pair: %v", err) } if cert.Leaf == nil { // Leaf is always the first certificate cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { - log.Fatalf("Failed to parse the certificate") + fs.Fatalf(nil, "Failed to parse the certificate") } } t.TLSClientConfig.Certificates = []tls.Certificate{cert} @@ -95,11 +94,11 @@ func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) ht for _, cert := range ci.CaCert { caCert, err := os.ReadFile(cert) if err != nil { - log.Fatalf("Failed to read --ca-cert file %q : %v", cert, err) + fs.Fatalf(nil, "Failed to read --ca-cert file %q : %v", cert, err) } ok := caCertPool.AppendCertsFromPEM(caCert) if !ok { - log.Fatalf("Failed to add certificates from --ca-cert file %q", cert) + fs.Fatalf(nil, "Failed to add certificates from --ca-cert file %q", cert) } } t.TLSClientConfig.RootCAs = caCertPool @@ -303,7 +302,7 @@ func (t *Transport) reloadCertificates() { cert, err := tls.LoadX509KeyPair(t.clientCert, t.clientKey) if err != nil { - log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err) + fs.Fatalf(nil, "Failed to load --client-cert/--client-key pair: %v", err) } // Check if we need to parse the certificate again, we need it // for checking the expiration date @@ -311,7 +310,7 @@ func (t *Transport) reloadCertificates() { // Leaf is always the first certificate cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) if err != nil { - log.Fatalf("Failed to parse the certificate") + fs.Fatalf(nil, "Failed to parse the certificate") } } t.TLSClientConfig.Certificates = []tls.Certificate{cert} diff --git a/fs/hash/hash_test.go b/fs/hash/hash_test.go index 403a0d6e7..9215e9b9e 100644 --- a/fs/hash/hash_test.go +++ b/fs/hash/hash_test.go @@ -2,10 +2,11 @@ package hash_test import ( "bytes" + "fmt" "io" - "log" "testing" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/hash" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" @@ -24,7 +25,7 @@ func TestHashSet(t *testing.T) { assert.Len(t, a, 0) h = h.Add(hash.MD5) - log.Println(h) + fs.Log(nil, fmt.Sprint(h)) assert.Equal(t, 1, h.Count()) assert.Equal(t, hash.MD5, h.GetOne()) a = h.Array() diff --git a/fs/log.go b/fs/log.go index 40ccdbb27..4b3abf611 100644 --- a/fs/log.go +++ b/fs/log.go @@ -60,8 +60,8 @@ var LogPrintPid = false // InstallJSONLogger is a hook that --use-json-log calls var InstallJSONLogger = func(logLevel LogLevel) {} -// LogPrint sends the text to the logger of level -var LogPrint = func(level LogLevel, text string) { +// LogOutput sends the text to the logger of level +var LogOutput = func(level LogLevel, text string) { text = fmt.Sprintf("%-6s: %s", level, text) if LogPrintPid { text = fmt.Sprintf("[%d] %s", os.Getpid(), text) @@ -108,42 +108,82 @@ func (j LogValueItem) String() string { return fmt.Sprint(j.value) } +func logLogrus(level LogLevel, text string, fields logrus.Fields) { + switch level { + case LogLevelDebug: + logrus.WithFields(fields).Debug(text) + case LogLevelInfo: + logrus.WithFields(fields).Info(text) + case LogLevelNotice, LogLevelWarning: + logrus.WithFields(fields).Warn(text) + case LogLevelError: + logrus.WithFields(fields).Error(text) + case LogLevelCritical: + logrus.WithFields(fields).Fatal(text) + case LogLevelEmergency, LogLevelAlert: + logrus.WithFields(fields).Panic(text) + } +} + +func logLogrusWithObject(level LogLevel, o interface{}, text string, fields logrus.Fields) { + if o != nil { + if fields == nil { + fields = logrus.Fields{} + } + fields["object"] = fmt.Sprintf("%+v", o) + fields["objectType"] = fmt.Sprintf("%T", o) + } + logLogrus(level, text, fields) +} + +func logJSON(level LogLevel, o interface{}, text string) { + logLogrusWithObject(level, o, text, nil) +} + +func logJSONf(level LogLevel, o interface{}, text string, args ...interface{}) { + text = fmt.Sprintf(text, args...) + fields := logrus.Fields{} + for _, arg := range args { + if item, ok := arg.(LogValueItem); ok { + fields[item.key] = item.value + } + } + logLogrusWithObject(level, o, text, fields) +} + +func logPlain(level LogLevel, o interface{}, text string) { + if o != nil { + text = fmt.Sprintf("%v: %s", o, text) + } + LogOutput(level, text) +} + +func logPlainf(level LogLevel, o interface{}, text string, args ...interface{}) { + logPlain(level, o, fmt.Sprintf(text, args...)) +} + +// LogPrint produces a log string from the arguments passed in +func LogPrint(level LogLevel, o interface{}, text string) { + if GetConfig(context.TODO()).UseJSONLog { + logJSON(level, o, text) + } else { + logPlain(level, o, text) + } +} + // LogPrintf produces a log string from the arguments passed in func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { - out := fmt.Sprintf(text, args...) - if GetConfig(context.TODO()).UseJSONLog { - fields := logrus.Fields{} - if o != nil { - fields = logrus.Fields{ - "object": fmt.Sprintf("%+v", o), - "objectType": fmt.Sprintf("%T", o), - } - } - for _, arg := range args { - if item, ok := arg.(LogValueItem); ok { - fields[item.key] = item.value - } - } - switch level { - case LogLevelDebug: - logrus.WithFields(fields).Debug(out) - case LogLevelInfo: - logrus.WithFields(fields).Info(out) - case LogLevelNotice, LogLevelWarning: - logrus.WithFields(fields).Warn(out) - case LogLevelError: - logrus.WithFields(fields).Error(out) - case LogLevelCritical: - logrus.WithFields(fields).Fatal(out) - case LogLevelEmergency, LogLevelAlert: - logrus.WithFields(fields).Panic(out) - } + logJSONf(level, o, text, args...) } else { - if o != nil { - out = fmt.Sprintf("%v: %s", o, out) - } - LogPrint(level, out) + logPlainf(level, o, text, args...) + } +} + +// LogLevelPrint writes logs at the given level +func LogLevelPrint(level LogLevel, o interface{}, text string) { + if GetConfig(context.TODO()).LogLevel >= level { + LogPrint(level, o, text) } } @@ -154,12 +194,71 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac } } +// Panic writes alert log output for this Object or Fs and calls panic(). +// It should always be seen by the user. +func Panic(o interface{}, text string) { + if GetConfig(context.TODO()).LogLevel >= LogLevelAlert { + LogPrint(LogLevelAlert, o, text) + } + panic(text) +} + +// Panicf writes alert log output for this Object or Fs and calls panic(). +// It should always be seen by the user. +func Panicf(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelAlert { + LogPrintf(LogLevelAlert, o, text, args...) + } + panic(fmt.Sprintf(text, args...)) +} + +// Fatal writes critical log output for this Object or Fs and calls os.Exit(1). +// It should always be seen by the user. +func Fatal(o interface{}, text string) { + if GetConfig(context.TODO()).LogLevel >= LogLevelCritical { + LogPrint(LogLevelCritical, o, text) + } + os.Exit(1) +} + +// Fatalf writes critical log output for this Object or Fs and calls os.Exit(1). +// It should always be seen by the user. +func Fatalf(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelCritical { + LogPrintf(LogLevelCritical, o, text, args...) + } + os.Exit(1) +} + +// Error writes error log output for this Object or Fs. It +// should always be seen by the user. +func Error(o interface{}, text string) { + LogLevelPrint(LogLevelError, o, text) +} + // Errorf writes error log output for this Object or Fs. It // should always be seen by the user. func Errorf(o interface{}, text string, args ...interface{}) { - if GetConfig(context.TODO()).LogLevel >= LogLevelError { - LogPrintf(LogLevelError, o, text, args...) - } + LogLevelPrintf(LogLevelError, o, text, args...) +} + +// Print writes log output for this Object or Fs, same as Logf. +func Print(o interface{}, text string) { + LogLevelPrint(LogLevelNotice, o, text) +} + +// Printf writes log output for this Object or Fs, same as Logf. +func Printf(o interface{}, text string, args ...interface{}) { + LogLevelPrintf(LogLevelNotice, o, text, args...) +} + +// Log writes log output for this Object or Fs. This should be +// considered to be Notice level logging. It is the default level. +// By default rclone should not log very much so only use this for +// important things the user should see. The user can filter these +// out with the -q flag. +func Log(o interface{}, text string) { + LogLevelPrint(LogLevelNotice, o, text) } // Logf writes log output for this Object or Fs. This should be @@ -168,26 +267,34 @@ func Errorf(o interface{}, text string, args ...interface{}) { // important things the user should see. The user can filter these // out with the -q flag. func Logf(o interface{}, text string, args ...interface{}) { - if GetConfig(context.TODO()).LogLevel >= LogLevelNotice { - LogPrintf(LogLevelNotice, o, text, args...) - } + LogLevelPrintf(LogLevelNotice, o, text, args...) +} + +// Infoc writes info on transfers for this Object or Fs. Use this +// level for logging transfers, deletions and things which should +// appear with the -v flag. +// There is name class on "Info", hence the name "Infoc", "c" for constant. +func Infoc(o interface{}, text string) { + LogLevelPrint(LogLevelInfo, o, text) } // Infof writes info on transfers for this Object or Fs. Use this // level for logging transfers, deletions and things which should // appear with the -v flag. func Infof(o interface{}, text string, args ...interface{}) { - if GetConfig(context.TODO()).LogLevel >= LogLevelInfo { - LogPrintf(LogLevelInfo, o, text, args...) - } + LogLevelPrintf(LogLevelInfo, o, text, args...) +} + +// Debug writes debugging output for this Object or Fs. Use this for +// debug only. The user must have to specify -vv to see this. +func Debug(o interface{}, text string) { + LogLevelPrint(LogLevelDebug, o, text) } // Debugf writes debugging output for this Object or Fs. Use this for // debug only. The user must have to specify -vv to see this. func Debugf(o interface{}, text string, args ...interface{}) { - if GetConfig(context.TODO()).LogLevel >= LogLevelDebug { - LogPrintf(LogLevelDebug, o, text, args...) - } + LogLevelPrintf(LogLevelDebug, o, text, args...) } // LogDirName returns an object for the logger, logging a root diff --git a/fs/log/log.go b/fs/log/log.go index 0877f5d72..1400d4e7e 100644 --- a/fs/log/log.go +++ b/fs/log/log.go @@ -144,7 +144,7 @@ func InitLogging() { if Opt.File != "" { f, err := os.OpenFile(Opt.File, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0640) if err != nil { - log.Fatalf("Failed to open log file: %v", err) + fs.Fatalf(nil, "Failed to open log file: %v", err) } _, err = f.Seek(0, io.SeekEnd) if err != nil { @@ -158,7 +158,7 @@ func InitLogging() { // Syslog output if Opt.UseSyslog { if Opt.File != "" { - log.Fatalf("Can't use --syslog and --log-file together") + fs.Fatalf(nil, "Can't use --syslog and --log-file together") } startSysLog() } diff --git a/fs/log/redirect_stderr_unix.go b/fs/log/redirect_stderr_unix.go index f37a89204..dd7aa4d6f 100644 --- a/fs/log/redirect_stderr_unix.go +++ b/fs/log/redirect_stderr_unix.go @@ -5,9 +5,9 @@ package log import ( - "log" "os" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "golang.org/x/sys/unix" ) @@ -16,11 +16,11 @@ import ( func redirectStderr(f *os.File) { passPromptFd, err := unix.Dup(int(os.Stderr.Fd())) if err != nil { - log.Fatalf("Failed to duplicate stderr: %v", err) + fs.Fatalf(nil, "Failed to duplicate stderr: %v", err) } config.PasswordPromptOutput = os.NewFile(uintptr(passPromptFd), "passPrompt") err = unix.Dup2(int(f.Fd()), int(os.Stderr.Fd())) if err != nil { - log.Fatalf("Failed to redirect stderr to file: %v", err) + fs.Fatalf(nil, "Failed to redirect stderr to file: %v", err) } } diff --git a/fs/log/redirect_stderr_windows.go b/fs/log/redirect_stderr_windows.go index f20ede7b0..6d6c6abb8 100644 --- a/fs/log/redirect_stderr_windows.go +++ b/fs/log/redirect_stderr_windows.go @@ -9,9 +9,10 @@ package log import ( - "log" "os" "syscall" + + "github.com/rclone/rclone/fs" ) var ( @@ -34,6 +35,6 @@ func setStdHandle(stdhandle int32, handle syscall.Handle) error { func redirectStderr(f *os.File) { err := setStdHandle(syscall.STD_ERROR_HANDLE, syscall.Handle(f.Fd())) if err != nil { - log.Fatalf("Failed to redirect stderr to file: %v", err) + fs.Fatalf(nil, "Failed to redirect stderr to file: %v", err) } } diff --git a/fs/log/syslog.go b/fs/log/syslog.go index 58b82ac9f..fd98f9fb6 100644 --- a/fs/log/syslog.go +++ b/fs/log/syslog.go @@ -5,12 +5,13 @@ package log import ( - "log" "runtime" + + "github.com/rclone/rclone/fs" ) // Starts syslog if configured, returns true if it was started func startSysLog() bool { - log.Fatalf("--syslog not supported on %s platform", runtime.GOOS) + fs.Fatalf(nil, "--syslog not supported on %s platform", runtime.GOOS) return false } diff --git a/fs/log/syslog_unix.go b/fs/log/syslog_unix.go index 7d7efe8ee..4fbbd2099 100644 --- a/fs/log/syslog_unix.go +++ b/fs/log/syslog_unix.go @@ -42,16 +42,16 @@ var ( func startSysLog() bool { facility, ok := syslogFacilityMap[Opt.SyslogFacility] if !ok { - log.Fatalf("Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility) + fs.Fatalf(nil, "Unknown syslog facility %q - man syslog for list", Opt.SyslogFacility) } Me := path.Base(os.Args[0]) w, err := syslog.New(syslog.LOG_NOTICE|facility, Me) if err != nil { - log.Fatalf("Failed to start syslog: %v", err) + fs.Fatalf(nil, "Failed to start syslog: %v", err) } log.SetFlags(0) log.SetOutput(w) - fs.LogPrint = func(level fs.LogLevel, text string) { + fs.LogOutput = func(level fs.LogLevel, text string) { switch level { case fs.LogLevelEmergency: _ = w.Emerg(text) diff --git a/fs/log/systemd.go b/fs/log/systemd.go index 2caf675da..84e9a3bd3 100644 --- a/fs/log/systemd.go +++ b/fs/log/systemd.go @@ -5,13 +5,14 @@ package log import ( - "log" "runtime" + + "github.com/rclone/rclone/fs" ) // Enables systemd logs if configured or if auto-detected func startSystemdLog() bool { - log.Fatalf("--log-systemd not supported on %s platform", runtime.GOOS) + fs.Fatalf(nil, "--log-systemd not supported on %s platform", runtime.GOOS) return false } diff --git a/fs/log/systemd_unix.go b/fs/log/systemd_unix.go index cf78d35a8..22c82ad11 100644 --- a/fs/log/systemd_unix.go +++ b/fs/log/systemd_unix.go @@ -26,7 +26,7 @@ func startSystemdLog() bool { } log.SetFlags(flags) // TODO: Use the native journal.Print approach rather than a custom implementation - fs.LogPrint = func(level fs.LogLevel, text string) { + fs.LogOutput = func(level fs.LogLevel, text string) { text = fmt.Sprintf("<%s>%-6s: %s", systemdLogPrefix(level), level, text) _ = log.Output(4, text) } diff --git a/fs/march/march.go b/fs/march/march.go index 856be0e7c..1116220db 100644 --- a/fs/march/march.go +++ b/fs/march/march.go @@ -415,7 +415,7 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) { } else { fs.Errorf(m.Fsrc, "error reading source root directory: %v", srcListErr) } - srcListErr = fs.CountError(srcListErr) + srcListErr = fs.CountError(m.Ctx, srcListErr) return nil, srcListErr } if dstListErr == fs.ErrorDirNotFound { @@ -426,7 +426,7 @@ func (m *March) processJob(job listDirJob) ([]listDirJob, error) { } else { fs.Errorf(m.Fdst, "error reading destination root directory: %v", dstListErr) } - dstListErr = fs.CountError(dstListErr) + dstListErr = fs.CountError(m.Ctx, dstListErr) return nil, dstListErr } diff --git a/fs/mount_helper.go b/fs/mount_helper.go index 035b75284..ef81777fc 100644 --- a/fs/mount_helper.go +++ b/fs/mount_helper.go @@ -3,7 +3,6 @@ package fs import ( "errors" "fmt" - "log" "os" "path/filepath" "runtime" @@ -16,7 +15,7 @@ func init() { if args, err := convertMountHelperArgs(os.Args); err == nil { os.Args = args } else { - log.Fatalf("Failed to parse command line: %v", err) + Fatalf(nil, "Failed to parse command line: %v", err) } } } diff --git a/fs/operations/check.go b/fs/operations/check.go index b9fe36aab..c3c747071 100644 --- a/fs/operations/check.go +++ b/fs/operations/check.go @@ -49,6 +49,7 @@ type CheckOpt struct { // checkMarch is used to march over two Fses in the same way as // sync/copy type checkMarch struct { + ctx context.Context ioMu sync.Mutex wg sync.WaitGroup tokens chan struct{} @@ -83,7 +84,7 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { } err := fmt.Errorf("file not in %v", c.opt.Fsrc) fs.Errorf(dst, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(c.ctx, err) c.differences.Add(1) c.srcFilesMissing.Add(1) c.report(dst, c.opt.MissingOnSrc, '-') @@ -105,7 +106,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { case fs.Object: err := fmt.Errorf("file not in %v", c.opt.Fdst) fs.Errorf(src, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(c.ctx, err) c.differences.Add(1) c.dstFilesMissing.Add(1) c.report(src, c.opt.MissingOnDst, '+') @@ -155,13 +156,13 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b differ, noHash, err := c.checkIdentical(ctx, dstX, srcX) if err != nil { fs.Errorf(src, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) c.report(src, c.opt.Error, '!') } else if differ { c.differences.Add(1) err := errors.New("files differ") // the checkFn has already logged the reason - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) c.report(src, c.opt.Differ, '*') } else { c.matches.Add(1) @@ -177,7 +178,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b } else { err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst) fs.Errorf(src, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) c.differences.Add(1) c.dstFilesMissing.Add(1) c.report(src, c.opt.MissingOnDst, '+') @@ -190,7 +191,7 @@ func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse b } err := fmt.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc) fs.Errorf(dst, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) c.differences.Add(1) c.srcFilesMissing.Add(1) c.report(dst, c.opt.MissingOnSrc, '-') @@ -214,6 +215,7 @@ func CheckFn(ctx context.Context, opt *CheckOpt) error { return errors.New("internal error: nil check function") } c := &checkMarch{ + ctx: ctx, tokens: make(chan struct{}, ci.Checkers), opt: *opt, } @@ -430,6 +432,7 @@ func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType ha ci := fs.GetConfig(ctx) c := &checkMarch{ + ctx: ctx, tokens: make(chan struct{}, ci.Checkers), opt: *opt, } @@ -450,7 +453,7 @@ func CheckSum(ctx context.Context, fsrc, fsum fs.Fs, sumFile string, hashType ha // filesystem missed the file, sum wasn't consumed err := fmt.Errorf("file not in %v", opt.Fdst) fs.Errorf(filename, "%v", err) - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) if lastErr == nil { lastErr = err } @@ -479,7 +482,7 @@ func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool, if !sumFound { err = errors.New("sum not found") - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) fs.Errorf(obj, "%v", err) c.differences.Add(1) c.srcFilesMissing.Add(1) @@ -528,12 +531,12 @@ func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool, func (c *checkMarch) matchSum(ctx context.Context, sumHash, objHash string, obj fs.Object, err error, hashType hash.Type) { switch { case err != nil: - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) fs.Errorf(obj, "Failed to calculate hash: %v", err) c.report(obj, c.opt.Error, '!') case sumHash == "": err = errors.New("duplicate file") - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) fs.Errorf(obj, "%v", err) c.report(obj, c.opt.Error, '!') case objHash == "": @@ -548,7 +551,7 @@ func (c *checkMarch) matchSum(ctx context.Context, sumHash, objHash string, obj c.report(obj, c.opt.Match, '=') default: err = errors.New("files differ") - _ = fs.CountError(err) + _ = fs.CountError(ctx, err) fs.Debugf(nil, "%v = %s (sum)", hashType, sumHash) fs.Debugf(obj, "%v = %s (%v)", hashType, objHash, c.opt.Fdst) fs.Errorf(obj, "%v", err) diff --git a/fs/operations/copy.go b/fs/operations/copy.go index 43afc5f01..8228a14c7 100644 --- a/fs/operations/copy.go +++ b/fs/operations/copy.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "hash/crc32" "io" "path" "strings" @@ -20,7 +21,6 @@ import ( "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/lib/atexit" "github.com/rclone/rclone/lib/pacer" - "github.com/rclone/rclone/lib/random" ) // State of the copy @@ -87,7 +87,7 @@ func TruncateString(s string, n int) string { } // Check to see if we should be using a partial name and return the name for the copy and the inplace flag -func (c *copy) checkPartial() (remoteForCopy string, inplace bool, err error) { +func (c *copy) checkPartial(ctx context.Context) (remoteForCopy string, inplace bool, err error) { remoteForCopy = c.remote if c.ci.Inplace || c.dstFeatures.Move == nil || !c.dstFeatures.PartialUploads || strings.HasSuffix(c.remote, ".rclonelink") { return remoteForCopy, true, nil @@ -97,7 +97,11 @@ func (c *copy) checkPartial() (remoteForCopy string, inplace bool, err error) { } // Avoid making the leaf name longer if it's already lengthy to avoid // trouble with file name length limits. - suffix := "." + random.String(8) + c.ci.PartialSuffix + + // generate a stable random suffix by hashing the fingerprint + hash := crc32.ChecksumIEEE([]byte(fs.Fingerprint(ctx, c.src, true))) + + suffix := fmt.Sprintf(".%x%s", hash, c.ci.PartialSuffix) base := path.Base(remoteForCopy) if len(base) > 100 { remoteForCopy = TruncateString(remoteForCopy, len(remoteForCopy)-len(suffix)) + suffix @@ -327,7 +331,7 @@ func (c *copy) copy(ctx context.Context) (newDst fs.Object, err error) { } } if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(c.src, "Failed to copy: %v", err) if !c.inplace { c.removeFailedPartialCopy(ctx, c.f, c.remoteForCopy) @@ -339,7 +343,7 @@ func (c *copy) copy(ctx context.Context) (newDst fs.Object, err error) { err = c.verify(ctx, newDst) if err != nil { fs.Errorf(newDst, "%v", err) - err = fs.CountError(err) + err = fs.CountError(ctx, err) c.removeFailedCopy(ctx, newDst) return nil, err } @@ -349,7 +353,7 @@ func (c *copy) copy(ctx context.Context) (newDst fs.Object, err error) { movedNewDst, err := c.dstFeatures.Move(ctx, newDst, c.remote) if err != nil { fs.Errorf(newDst, "partial file rename failed: %v", err) - err = fs.CountError(err) + err = fs.CountError(ctx, err) c.removeFailedCopy(ctx, newDst) return nil, err } @@ -400,7 +404,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj // Are we using partials? // // If so set the flag and update the name we use for the copy - c.remoteForCopy, c.inplace, err = c.checkPartial() + c.remoteForCopy, c.inplace, err = c.checkPartial(ctx) if err != nil { return nil, err } diff --git a/fs/operations/dedupe.go b/fs/operations/dedupe.go index e5e356c72..60c66c4a3 100644 --- a/fs/operations/dedupe.go +++ b/fs/operations/dedupe.go @@ -5,7 +5,6 @@ package operations import ( "context" "fmt" - "log" "path" "sort" "strings" @@ -21,7 +20,7 @@ import ( func dedupeRename(ctx context.Context, f fs.Fs, remote string, objs []fs.Object) { doMove := f.Features().Move if doMove == nil { - log.Fatalf("Fs %v doesn't support Move", f) + fs.Fatalf(nil, "Fs %v doesn't support Move", f) } ext := path.Ext(remote) base := remote[:len(remote)-len(ext)] @@ -33,7 +32,7 @@ outer: _, err := f.NewObject(ctx, newName) for ; err != fs.ErrorObjectNotFound; suffix++ { if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "Failed to check for existing object: %v", err) continue outer } @@ -47,7 +46,7 @@ outer: if !SkipDestructive(ctx, o, "rename") { newObj, err := doMove(ctx, o, newName) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "Failed to rename: %v", err) continue } @@ -375,7 +374,7 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]*d fs.Infof(fsDirs[0], "Merging contents of duplicate directories") err := mergeDirs(ctx, fsDirs) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(nil, "merge duplicate dirs: %v", err) } } diff --git a/fs/operations/operations.go b/fs/operations/operations.go index f14ecb61e..53d846525 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -101,11 +101,11 @@ func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash. return true, hash.None, srcHash, dstHash, nil } if srcErr != nil { - err = fs.CountError(srcErr) + err = fs.CountError(ctx, srcErr) fs.Errorf(src, "Failed to calculate src hash: %v", err) } if dstErr != nil { - err = fs.CountError(dstErr) + err = fs.CountError(ctx, dstErr) fs.Errorf(dst, "Failed to calculate dst hash: %v", err) } if err != nil { @@ -340,7 +340,7 @@ func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) logger(ctx, Differ, src, dst, nil) return false } else if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(dst, "Failed to set modification time: %v", err) } else { fs.Infof(src, "Updated modification time in destination") @@ -481,7 +481,7 @@ func move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs. fs.Debugf(src, "Can't move, switching to copy") _ = in.Close() default: - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(src, "Couldn't move: %v", err) _ = in.Close() return newDst, err @@ -566,7 +566,7 @@ func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs } if err != nil { fs.Errorf(dst, "Couldn't %s: %v", action, err) - err = fs.CountError(err) + err = fs.CountError(ctx, err) } else if !skip { fs.Infof(dst, "%s", actioned) } @@ -974,7 +974,7 @@ func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFl }() sum, err := HashSum(ctx, ht, outputBase64, downloadFlag, o) if err != nil { - fs.Errorf(o, "%v", fs.CountError(err)) + fs.Errorf(o, "%v", fs.CountError(ctx, err)) return } SyncFprintf(w, "%*s %s\n", width, sum, o.Remote()) @@ -1053,7 +1053,7 @@ func Mkdir(ctx context.Context, f fs.Fs, dir string) error { fs.Debugf(fs.LogDirName(f, dir), "Making directory") err := f.Mkdir(ctx, dir) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) return err } return nil @@ -1075,7 +1075,7 @@ func MkdirMetadata(ctx context.Context, f fs.Fs, dir string, metadata fs.Metadat fs.Debugf(fs.LogDirName(f, dir), "Making directory with metadata") newDst, err = do(ctx, dir, metadata) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) return nil, err } if mtime, ok := metadata["mtime"]; ok { @@ -1133,7 +1133,7 @@ func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { func Rmdir(ctx context.Context, f fs.Fs, dir string) error { err := TryRmdir(ctx, f, dir) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) return err } return err @@ -1162,7 +1162,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) { err = Rmdirs(ctx, f, dir, false) } if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) return err } return nil @@ -1207,7 +1207,7 @@ func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { }) if err != nil && err != fs.ErrorDirNotFound { err = fmt.Errorf("failed to list: %w", err) - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(nil, "%v", err) } }() @@ -1267,7 +1267,7 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64, sep []b var in io.ReadCloser in, err = Open(ctx, o, options...) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "Failed to open: %v", err) return } @@ -1280,13 +1280,13 @@ func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64, sep []b defer mu.Unlock() _, err = io.Copy(w, in) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "Failed to send to output: %v", err) } if len(sep) > 0 { _, err = w.Write(sep) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "Failed to send separator to output: %v", err) } } @@ -1423,7 +1423,7 @@ func rcatSrc(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadClos src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, sums, fdst).WithMetadata(meta) if !equal(ctx, src, dst, opt) { err = fmt.Errorf("corrupted on transfer") - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(dst, "%v", err) return dst, err } @@ -1450,7 +1450,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { dirEmpty[dir] = !leaveRoot err := walk.Walk(ctx, f, dir, false, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(f, "Failed to list %q: %v", dirPath, err) return nil } @@ -1526,7 +1526,7 @@ func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { g.Go(func() error { err := TryRmdir(gCtx, f, dir) if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(dir, "Failed to rmdir: %v", err) errCount.Add(err) } @@ -2096,7 +2096,7 @@ func TouchDir(ctx context.Context, f fs.Fs, remote string, t time.Time, recursiv err := o.SetModTime(ctx, t) if err != nil { err = fmt.Errorf("failed to touch: %w", err) - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(o, "%v", err) } } diff --git a/fs/rc/cache_test.go b/fs/rc/cache_test.go index 0a351cee7..15952fa04 100644 --- a/fs/rc/cache_test.go +++ b/fs/rc/cache_test.go @@ -22,7 +22,7 @@ func mockNewFs(t *testing.T) func() { require.NoError(t, err) cache.Put("mock:/", f) cache.Put(":mock:/", f) - f, err = mockfs.NewFs(ctx, "mock", "dir/file.txt", nil) + f, err = mockfs.NewFs(ctx, "mock", "dir/", nil) require.NoError(t, err) cache.PutErr("mock:dir/file.txt", f, fs.ErrorIsFile) return func() { diff --git a/fs/rc/rc.go b/fs/rc/rc.go index 55287ed6b..e41ac9546 100644 --- a/fs/rc/rc.go +++ b/fs/rc/rc.go @@ -71,8 +71,8 @@ var OptionsInfo = fs.Options{{ }, { Name: "rc_enable_metrics", Default: false, - Help: "Enable prometheus metrics on /metrics", - Groups: "RC", + Help: "Enable the Prometheus metrics path at the remote control server", + Groups: "RC,Metrics", }, { Name: "rc_job_expire_duration", Default: 60 * time.Second, @@ -83,10 +83,18 @@ var OptionsInfo = fs.Options{{ Default: 10 * time.Second, Help: "Interval to check for expired async jobs", Groups: "RC", +}, { + Name: "metrics_addr", + Default: []string{}, + Help: "IPaddress:Port or :Port to bind metrics server to", + Groups: "Metrics", }}. AddPrefix(libhttp.ConfigInfo, "rc", "RC"). AddPrefix(libhttp.AuthConfigInfo, "rc", "RC"). AddPrefix(libhttp.TemplateConfigInfo, "rc", "RC"). + AddPrefix(libhttp.ConfigInfo, "metrics", "Metrics"). + AddPrefix(libhttp.AuthConfigInfo, "metrics", "Metrics"). + AddPrefix(libhttp.TemplateConfigInfo, "metrics", "Metrics"). SetDefault("rc_addr", []string{"localhost:5572"}) func init() { @@ -109,6 +117,9 @@ type Options struct { WebGUINoOpenBrowser bool `config:"rc_web_gui_no_open_browser"` // set to disable auto opening browser WebGUIFetchURL string `config:"rc_web_fetch_url"` // set the default url for fetching webgui EnableMetrics bool `config:"rc_enable_metrics"` // set to disable prometheus metrics on /metrics + MetricsHTTP libhttp.Config `config:"metrics"` + MetricsAuth libhttp.AuthConfig `config:"metrics"` + MetricsTemplate libhttp.TemplateConfig `config:"metrics"` JobExpireDuration time.Duration `config:"rc_job_expire_duration"` JobExpireInterval time.Duration `config:"rc_job_expire_interval"` } diff --git a/fs/rc/rcserver/metrics.go b/fs/rc/rcserver/metrics.go new file mode 100644 index 000000000..367e6aeab --- /dev/null +++ b/fs/rc/rcserver/metrics.go @@ -0,0 +1,97 @@ +// Package rcserver implements the HTTP endpoint to serve the remote control +package rcserver + +import ( + "context" + "fmt" + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/fs/rc/jobs" + libhttp "github.com/rclone/rclone/lib/http" +) + +const path = "/metrics" + +var promHandlerFunc http.HandlerFunc + +func init() { + rcloneCollector := accounting.NewRcloneCollector(context.Background()) + prometheus.MustRegister(rcloneCollector) + + m := fshttp.NewMetrics("rclone") + for _, c := range m.Collectors() { + prometheus.MustRegister(c) + } + fshttp.DefaultMetrics = m + + promHandlerFunc = promhttp.Handler().ServeHTTP +} + +// MetricsStart the remote control server if configured +// +// If the server wasn't configured the *Server returned may be nil +func MetricsStart(ctx context.Context, opt *rc.Options) (*MetricsServer, error) { + jobs.SetOpt(opt) // set the defaults for jobs + if len(opt.MetricsHTTP.ListenAddr) > 0 { + // Serve on the DefaultServeMux so can have global registrations appear + s, err := newMetricsServer(ctx, opt) + if err != nil { + return nil, err + } + return s, s.Serve() + } + return nil, nil +} + +// MetricsServer contains everything to run the rc server +type MetricsServer struct { + ctx context.Context // for global config + server *libhttp.Server + promHandlerFunc http.Handler + opt *rc.Options +} + +func newMetricsServer(ctx context.Context, opt *rc.Options) (*MetricsServer, error) { + s := &MetricsServer{ + ctx: ctx, + opt: opt, + promHandlerFunc: promHandlerFunc, + } + + var err error + s.server, err = libhttp.NewServer(ctx, + libhttp.WithConfig(opt.MetricsHTTP), + libhttp.WithAuth(opt.MetricsAuth), + libhttp.WithTemplate(opt.MetricsTemplate), + ) + if err != nil { + return nil, fmt.Errorf("failed to init server: %w", err) + } + + router := s.server.Router() + router.Get(path, promHandlerFunc) + return s, nil +} + +// Serve runs the http server in the background. +// +// Use s.Close() and s.Wait() to shutdown server +func (s *MetricsServer) Serve() error { + s.server.Serve() + return nil +} + +// Wait blocks while the server is serving requests +func (s *MetricsServer) Wait() { + s.server.Wait() +} + +// Shutdown gracefully shuts down the server +func (s *MetricsServer) Shutdown() error { + return s.server.Shutdown() +} diff --git a/fs/rc/rcserver/metrics_test.go b/fs/rc/rcserver/metrics_test.go new file mode 100644 index 000000000..6141cc2a7 --- /dev/null +++ b/fs/rc/rcserver/metrics_test.go @@ -0,0 +1,88 @@ +package rcserver + +import ( + "context" + "fmt" + "net/http" + "regexp" + "testing" + + _ "github.com/rclone/rclone/backend/local" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/config/configfile" + "github.com/rclone/rclone/fs/rc" + "github.com/stretchr/testify/require" +) + +// Run a suite of tests +func testMetricsServer(t *testing.T, tests []testRun, opt *rc.Options) { + t.Helper() + ctx := context.Background() + configfile.Install() + rcServer, err := newMetricsServer(ctx, opt) + require.NoError(t, err) + testURL := rcServer.server.URLs()[0] + mux := rcServer.server.Router() + emulateCalls(t, tests, mux, testURL) +} + +// return an enabled rc +func newMetricsTestOpt() rc.Options { + opt := rc.Opt + opt.MetricsHTTP.ListenAddr = []string{testBindAddress} + return opt +} + +func TestMetrics(t *testing.T) { + stats := accounting.GlobalStats() + tests := makeMetricsTestCases(stats) + opt := newMetricsTestOpt() + testMetricsServer(t, tests, &opt) + + // Test changing a couple options + stats.Bytes(500) + for i := 0; i < 30; i++ { + require.NoError(t, stats.DeleteFile(context.Background(), 0)) + } + stats.Errors(2) + stats.Bytes(324) + + tests = makeMetricsTestCases(stats) + testMetricsServer(t, tests, &opt) +} + +func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) { + tests = []testRun{{ + Name: "Bytes Transferred Metric", + URL: "metrics", + Method: "GET", + Status: http.StatusOK, + Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())), + }, { + Name: "Checked Files Metric", + URL: "metrics", + Method: "GET", + Status: http.StatusOK, + Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())), + }, { + Name: "Errors Metric", + URL: "metrics", + Method: "GET", + Status: http.StatusOK, + Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())), + }, { + Name: "Deleted Files Metric", + URL: "metrics", + Method: "GET", + Status: http.StatusOK, + Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.GetDeletes())), + }, { + Name: "Files Transferred Metric", + URL: "metrics", + Method: "GET", + Status: http.StatusOK, + Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())), + }, + } + return +} diff --git a/fs/rc/rcserver/rcserver.go b/fs/rc/rcserver/rcserver.go index 88f82a484..8cc666f3c 100644 --- a/fs/rc/rcserver/rcserver.go +++ b/fs/rc/rcserver/rcserver.go @@ -7,7 +7,6 @@ import ( "encoding/json" "flag" "fmt" - "log" "mime" "net/http" "net/url" @@ -18,13 +17,9 @@ import ( "time" "github.com/go-chi/chi/v5/middleware" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rclone/rclone/fs" - "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/config" - "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/list" "github.com/rclone/rclone/fs/rc" "github.com/rclone/rclone/fs/rc/jobs" @@ -35,21 +30,6 @@ import ( "github.com/skratchdot/open-golang/open" ) -var promHandler http.Handler - -func init() { - rcloneCollector := accounting.NewRcloneCollector(context.Background()) - prometheus.MustRegister(rcloneCollector) - - m := fshttp.NewMetrics("rclone") - for _, c := range m.Collectors() { - prometheus.MustRegister(c) - } - fshttp.DefaultMetrics = m - - promHandler = promhttp.Handler() -} - // Start the remote control server if configured // // If the server wasn't configured the *Server returned may be nil @@ -105,7 +85,7 @@ func newServer(ctx context.Context, opt *rc.Options, mux *http.ServeMux) (*Serve if opt.Auth.BasicPass == "" && opt.Auth.HtPasswd == "" { randomPass, err := random.Password(128) if err != nil { - log.Fatalf("Failed to make password: %v", err) + fs.Fatalf(nil, "Failed to make password: %v", err) } opt.Auth.BasicPass = randomPass fs.Infof(nil, "No password specified. Using random password: %s \n", randomPass) @@ -376,7 +356,7 @@ func (s *Server) handleGet(w http.ResponseWriter, r *http.Request, path string) s.serveRemote(w, r, fsMatchResult[2], fsMatchResult[1]) return case path == "metrics" && s.opt.EnableMetrics: - promHandler.ServeHTTP(w, r) + promHandlerFunc(w, r) return case path == "*" && s.opt.Serve: // Serve /* as the remote listing diff --git a/fs/rc/rcserver/rcserver_test.go b/fs/rc/rcserver/rcserver_test.go index 7b55f2e03..29738b614 100644 --- a/fs/rc/rcserver/rcserver_test.go +++ b/fs/rc/rcserver/rcserver_test.go @@ -15,9 +15,10 @@ import ( "testing" "time" + "github.com/go-chi/chi/v5" + _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/fs" - "github.com/rclone/rclone/fs/accounting" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/fs/rc" "github.com/stretchr/testify/assert" @@ -115,6 +116,10 @@ func testServer(t *testing.T, tests []testRun, opt *rc.Options) { require.NoError(t, err) testURL := rcServer.server.URLs()[0] mux := rcServer.server.Router() + emulateCalls(t, tests, mux, testURL) +} + +func emulateCalls(t *testing.T, tests []testRun, mux chi.Router, testURL string) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { t.Helper() @@ -568,61 +573,6 @@ Unknown command testServer(t, tests, &opt) } -func TestMetrics(t *testing.T) { - stats := accounting.GlobalStats() - tests := makeMetricsTestCases(stats) - opt := newTestOpt() - opt.EnableMetrics = true - testServer(t, tests, &opt) - - // Test changing a couple options - stats.Bytes(500) - for i := 0; i < 30; i++ { - require.NoError(t, stats.DeleteFile(context.Background(), 0)) - } - stats.Errors(2) - stats.Bytes(324) - - tests = makeMetricsTestCases(stats) - testServer(t, tests, &opt) -} - -func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) { - tests = []testRun{{ - Name: "Bytes Transferred Metric", - URL: "/metrics", - Method: "GET", - Status: http.StatusOK, - Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())), - }, { - Name: "Checked Files Metric", - URL: "/metrics", - Method: "GET", - Status: http.StatusOK, - Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())), - }, { - Name: "Errors Metric", - URL: "/metrics", - Method: "GET", - Status: http.StatusOK, - Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())), - }, { - Name: "Deleted Files Metric", - URL: "/metrics", - Method: "GET", - Status: http.StatusOK, - Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.GetDeletes())), - }, { - Name: "Files Transferred Metric", - URL: "/metrics", - Method: "GET", - Status: http.StatusOK, - Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())), - }, - } - return -} - var matchRemoteDirListing = regexp.MustCompile(`Directory listing of /`) func TestServingRoot(t *testing.T) { diff --git a/fs/registry.go b/fs/registry.go index ad9191d43..e341bd2d5 100644 --- a/fs/registry.go +++ b/fs/registry.go @@ -6,7 +6,6 @@ import ( "context" "encoding/json" "fmt" - "log" "reflect" "regexp" "sort" @@ -265,14 +264,9 @@ func (o *Option) String() string { if len(stringArray) == 0 { return "" } - // Encode string arrays as JSON + // Encode string arrays as CSV // The default Go encoding can't be decoded uniquely - buf, err := json.Marshal(stringArray) - if err != nil { - Errorf(nil, "Can't encode default value for %q key - ignoring: %v", o.Name, err) - return "[]" - } - return string(buf) + return CommaSepList(stringArray).String() } return fmt.Sprint(v) } @@ -408,7 +402,7 @@ func Find(name string) (*RegInfo, error) { func MustFind(name string) *RegInfo { fs, err := Find(name) if err != nil { - log.Fatalf("Failed to find remote: %v", err) + Fatalf(nil, "Failed to find remote: %v", err) } return fs } @@ -434,7 +428,7 @@ func RegisterGlobalOptions(oi OptionsInfo) { if oi.Opt != nil && oi.Options != nil { err := oi.Check() if err != nil { - log.Fatalf("%v", err) + Fatalf(nil, "%v", err) } } // Load the default values into the options. @@ -446,7 +440,7 @@ func RegisterGlobalOptions(oi OptionsInfo) { // again when the flags are ready. err := oi.load() if err != nil { - log.Fatalf("Failed to load %q default values: %v", oi.Name, err) + Fatalf(nil, "Failed to load %q default values: %v", oi.Name, err) } } @@ -532,7 +526,22 @@ func (oi *OptionsInfo) load() error { // their values read from the options, environment variables and // command line parameters. func GlobalOptionsInit() error { - for _, opt := range OptionsRegistry { + var keys []string + for key := range OptionsRegistry { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + // Sort alphabetically, but with "main" first + if keys[i] == "main" { + return true + } + if keys[j] == "main" { + return false + } + return keys[i] < keys[j] + }) + for _, key := range keys { + opt := OptionsRegistry[key] err := opt.load() if err != nil { return err diff --git a/fs/sync/sync.go b/fs/sync/sync.go index 67998268f..e0a3e23ac 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -400,7 +400,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W if needTransfer { // If files are treated as immutable, fail if destination exists and does not match if s.ci.Immutable && pair.Dst != nil { - err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified)) + err := fs.CountError(s.ctx, fserrors.NoRetryError(fs.ErrorImmutableModified)) fs.Errorf(pair.Dst, "Source and destination exist but do not match: %v", err) s.processError(err) } else { @@ -1204,7 +1204,7 @@ func (s *syncCopyMove) setDelayedDirModTimes(ctx context.Context) error { _, err = operations.SetDirModTime(gCtx, s.fdst, item.dst, item.dir, item.modTime) } if err != nil { - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(item.dir, "Failed to update directory timestamp or metadata: %v", err) errCount.Add(err) } @@ -1399,7 +1399,7 @@ func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, cop fs.Infof(fdst, "Server side directory move succeeded") return nil default: - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(fdst, "Server side directory move failed: %v", err) return err } diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index f1c07921e..f4f1c556a 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -597,6 +597,108 @@ func TestServerSideCopy(t *testing.T) { fstest.CheckItems(t, FremoteCopy, file1) } +// Test copying a file over itself +func TestCopyOverSelf(t *testing.T) { + ctx := context.Background() + r := fstest.NewRun(t) + file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1) + r.CheckRemoteItems(t, file1) + file2 := r.WriteFile("sub dir/hello world", "hello world again", t2) + r.CheckLocalItems(t, file2) + + ctx = predictDstFromLogger(ctx) + err := CopyDir(ctx, r.Fremote, r.Flocal, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + r.CheckRemoteItems(t, file2) +} + +// Test server-side copying a file over itself +func TestServerSideCopyOverSelf(t *testing.T) { + ctx := context.Background() + r := fstest.NewRun(t) + file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1) + r.CheckRemoteItems(t, file1) + + FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote() + require.NoError(t, err) + defer finaliseCopy() + t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy) + + ctx = predictDstFromLogger(ctx) + err = CopyDir(ctx, FremoteCopy, r.Fremote, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + fstest.CheckItems(t, FremoteCopy, file1) + + file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2) + r.CheckRemoteItems(t, file2) + + ctx = predictDstFromLogger(ctx) + err = CopyDir(ctx, FremoteCopy, r.Fremote, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + fstest.CheckItems(t, FremoteCopy, file2) +} + +// Test moving a file over itself +func TestMoveOverSelf(t *testing.T) { + ctx := context.Background() + r := fstest.NewRun(t) + file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1) + r.CheckRemoteItems(t, file1) + file2 := r.WriteFile("sub dir/hello world", "hello world again", t2) + r.CheckLocalItems(t, file2) + + ctx = predictDstFromLogger(ctx) + err := MoveDir(ctx, r.Fremote, r.Flocal, false, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + r.CheckLocalItems(t) + r.CheckRemoteItems(t, file2) +} + +// Test server-side moving a file over itself +func TestServerSideMoveOverSelf(t *testing.T) { + ctx := context.Background() + r := fstest.NewRun(t) + file1 := r.WriteObject(ctx, "sub dir/hello world", "hello world", t1) + r.CheckRemoteItems(t, file1) + + FremoteCopy, _, finaliseCopy, err := fstest.RandomRemote() + require.NoError(t, err) + defer finaliseCopy() + t.Logf("Server side copy (if possible) %v -> %v", r.Fremote, FremoteCopy) + + ctx = predictDstFromLogger(ctx) + err = CopyDir(ctx, FremoteCopy, r.Fremote, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) + fstest.CheckItems(t, FremoteCopy, file1) + + file2 := r.WriteObject(ctx, "sub dir/hello world", "hello world again", t2) + r.CheckRemoteItems(t, file2) + + // ctx = predictDstFromLogger(ctx) + err = MoveDir(ctx, FremoteCopy, r.Fremote, false, false) + require.NoError(t, err) + // testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) // not currently supported + r.CheckRemoteItems(t) + fstest.CheckItems(t, FremoteCopy, file2) + + // check that individual file moves also work without MoveDir + file3 := r.WriteObject(ctx, "sub dir/hello world", "hello world a third time", t3) + r.CheckRemoteItems(t, file3) + + ctx = predictDstFromLogger(ctx) + fs.Debugf(nil, "testing file moves") + err = moveDir(ctx, FremoteCopy, r.Fremote, false, false) + require.NoError(t, err) + testLoggerVsLsf(ctx, FremoteCopy, operations.GetLoggerOpt(ctx).JSON, t) + r.CheckRemoteItems(t) + fstest.CheckItems(t, FremoteCopy, file3) +} + // Check that if the local file doesn't exist when we copy it up, // nothing happens to the remote file func TestCopyAfterDelete(t *testing.T) { @@ -844,7 +946,7 @@ func TestSyncIgnoreErrors(t *testing.T) { accounting.GlobalStats().ResetCounters() ctx = predictDstFromLogger(ctx) - _ = fs.CountError(errors.New("boom")) + _ = fs.CountError(ctx, errors.New("boom")) assert.NoError(t, Sync(ctx, r.Fremote, r.Flocal, false)) testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) @@ -1165,7 +1267,7 @@ func TestSyncAfterRemovingAFileAndAddingAFileSubDirWithErrors(t *testing.T) { ctx = predictDstFromLogger(ctx) accounting.GlobalStats().ResetCounters() - _ = fs.CountError(errors.New("boom")) + _ = fs.CountError(ctx, errors.New("boom")) err := Sync(ctx, r.Fremote, r.Flocal, false) assert.Equal(t, fs.ErrorNotDeleting, err) testLoggerVsLsf(ctx, r.Fremote, operations.GetLoggerOpt(ctx).JSON, t) @@ -2320,15 +2422,19 @@ func testSyncBackupDir(t *testing.T, backupDir string, suffix string, suffixKeep r.CheckRemoteItems(t, file1b, file2, file3a, file1a) } + func TestSyncBackupDir(t *testing.T) { testSyncBackupDir(t, "backup", "", false) } + func TestSyncBackupDirWithSuffix(t *testing.T) { testSyncBackupDir(t, "backup", ".bak", false) } + func TestSyncBackupDirWithSuffixKeepExtension(t *testing.T) { testSyncBackupDir(t, "backup", "-2019-01-01", true) } + func TestSyncBackupDirSuffixOnly(t *testing.T) { testSyncBackupDir(t, "", ".bak", false) } @@ -2806,7 +2912,7 @@ func predictDstFromLogger(ctx context.Context) context.Context { } func DstLsf(ctx context.Context, Fremote fs.Fs) *bytes.Buffer { - var opt = operations.ListJSONOpt{ + opt := operations.ListJSONOpt{ NoModTime: false, NoMimeType: true, DirsOnly: false, diff --git a/fs/versiontag.go b/fs/versiontag.go index a82e25e04..0080a00a5 100644 --- a/fs/versiontag.go +++ b/fs/versiontag.go @@ -1,4 +1,4 @@ package fs // VersionTag of rclone -var VersionTag = "v1.68.0" +var VersionTag = "v1.69.0" diff --git a/fs/walk/walk.go b/fs/walk/walk.go index ada7a284d..51e567a4f 100644 --- a/fs/walk/walk.go +++ b/fs/walk/walk.go @@ -170,7 +170,7 @@ func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLe // Carry on listing but return the error at the end if err != nil { listErr = err - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(path, "error listing: %v", err) return nil } @@ -415,7 +415,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i // NB once we have passed entries to fn we mustn't touch it again if err != nil && err != ErrorSkipDir { traversing.Done() - err = fs.CountError(err) + err = fs.CountError(ctx, err) fs.Errorf(job.remote, "error listing: %v", err) closeQuit() // Send error to error channel if space diff --git a/fstest/fstest.go b/fstest/fstest.go index 032a50853..69a05d76a 100644 --- a/fstest/fstest.go +++ b/fstest/fstest.go @@ -10,7 +10,6 @@ import ( "flag" "fmt" "io" - "log" "os" "path" "path/filepath" @@ -99,7 +98,7 @@ func NewItem(Path, Content string, modTime time.Time) Item { buf := bytes.NewBufferString(Content) _, err := io.Copy(hash, buf) if err != nil { - log.Fatalf("Failed to create item: %v", err) + fs.Fatalf(nil, "Failed to create item: %v", err) } i.Hashes = hash.Sums() return i @@ -398,7 +397,7 @@ func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDir func Time(timeString string) time.Time { t, err := time.Parse(time.RFC3339Nano, timeString) if err != nil { - log.Fatalf("Failed to parse time %q: %v", timeString, err) + fs.Fatalf(nil, "Failed to parse time %q: %v", timeString, err) } return t } @@ -433,7 +432,7 @@ func RandomRemoteName(remoteName string) (string, string, error) { } leafName = "rclone-test-" + random.String(12) if !MatchTestRemote.MatchString(leafName) { - log.Fatalf("%q didn't match the test remote name regexp", leafName) + fs.Fatalf(nil, "%q didn't match the test remote name regexp", leafName) } remoteName += leafName } @@ -467,7 +466,7 @@ func RandomRemote() (fs.Fs, string, func(), error) { if parentRemote != nil { Purge(parentRemote) if err != nil { - log.Printf("Failed to purge %v: %v", parentRemote, err) + fs.Logf(nil, "Failed to purge %v: %v", parentRemote, err) } } } @@ -499,7 +498,7 @@ func Purge(f fs.Fs) { fs.Debugf(f, "Purge object %q", obj.Remote()) err = obj.Remove(ctx) if err != nil { - log.Printf("purge failed to remove %q: %v", obj.Remote(), err) + fs.Logf(nil, "purge failed to remove %q: %v", obj.Remote(), err) } }) entries.ForDir(func(dir fs.Directory) { @@ -513,12 +512,12 @@ func Purge(f fs.Fs) { fs.Debugf(f, "Purge dir %q", dir) err := f.Rmdir(ctx, dir) if err != nil { - log.Printf("purge failed to rmdir %q: %v", dir, err) + fs.Logf(nil, "purge failed to rmdir %q: %v", dir, err) } } } if err != nil { - log.Printf("purge failed: %v", err) + fs.Logf(nil, "purge failed: %v", err) } } diff --git a/fstest/test_all/clean.go b/fstest/test_all/clean.go index 788061cf3..0e386dcaf 100644 --- a/fstest/test_all/clean.go +++ b/fstest/test_all/clean.go @@ -5,7 +5,6 @@ package main import ( "context" "fmt" - "log" "regexp" "github.com/rclone/rclone/fs" @@ -27,7 +26,7 @@ func cleanFs(ctx context.Context, remote string, cleanup bool) error { } var lastErr error if cleanup { - log.Printf("%q - running cleanup", remote) + fs.Logf(nil, "%q - running cleanup", remote) err = operations.CleanUp(ctx, f) if err != nil { lastErr = err @@ -43,10 +42,10 @@ func cleanFs(ctx context.Context, remote string, cleanup bool) error { fullPath := fspath.JoinRootPath(remote, dirPath) if MatchTestRemote.MatchString(dirPath) { if *dryRun { - log.Printf("Not Purging %s - -dry-run", fullPath) + fs.Logf(nil, "Not Purging %s - -dry-run", fullPath) return nil } - log.Printf("Purging %s", fullPath) + fs.Logf(nil, "Purging %s", fullPath) dir, err := fs.NewFs(context.Background(), fullPath) if err != nil { err = fmt.Errorf("NewFs failed: %w", err) @@ -75,11 +74,11 @@ func cleanRemotes(conf *Config) error { var lastError error for _, backend := range conf.Backends { remote := backend.Remote - log.Printf("%q - Cleaning", remote) + fs.Logf(nil, "%q - Cleaning", remote) err := cleanFs(context.Background(), remote, backend.CleanUp) if err != nil { lastError = err - log.Printf("Failed to purge %q: %v", remote, err) + fs.Logf(nil, "Failed to purge %q: %v", remote, err) } } return lastError diff --git a/fstest/test_all/config.go b/fstest/test_all/config.go index 45920e25e..c296013d8 100644 --- a/fstest/test_all/config.go +++ b/fstest/test_all/config.go @@ -4,9 +4,9 @@ package main import ( "fmt" - "log" "os" "path" + "slices" "github.com/rclone/rclone/fs" yaml "gopkg.in/yaml.v2" @@ -36,6 +36,7 @@ type Backend struct { CleanUp bool // when running clean, run cleanup first Ignore []string // test names to ignore the failure of Tests []string // paths of tests to run, blank for all + IgnoreTests []string // paths of tests not to run, blank for none ListRetries int // -list-retries if > 0 ExtraTime float64 // factor to multiply the timeout by } @@ -43,15 +44,15 @@ type Backend struct { // includeTest returns true if this backend should be included in this // test func (b *Backend) includeTest(t *Test) bool { + // Is this test ignored + if slices.Contains(b.IgnoreTests, t.Path) { + return false + } + // Empty b.Tests imples do all of them except the ignored if len(b.Tests) == 0 { return true } - for _, testPath := range b.Tests { - if testPath == t.Path { - return true - } - } - return false + return slices.Contains(b.Tests, t.Path) } // MakeRuns creates Run objects the Backend and Test @@ -65,7 +66,7 @@ func (b *Backend) MakeRuns(t *Test) (runs []*Run) { maxSize := fs.SizeSuffix(0) if b.MaxFile != "" { if err := maxSize.Set(b.MaxFile); err != nil { - log.Printf("Invalid maxfile value %q: %v", b.MaxFile, err) + fs.Logf(nil, "Invalid maxfile value %q: %v", b.MaxFile, err) } } fastlists := []bool{false} @@ -152,11 +153,11 @@ func (c *Config) filterBackendsByRemotes(remotes []string) { } } if !found { - log.Printf("Remote %q not found - inserting with default flags", name) + fs.Logf(nil, "Remote %q not found - inserting with default flags", name) // Lookup which backend fsInfo, _, _, _, err := fs.ConfigFs(name) if err != nil { - log.Fatalf("couldn't find remote %q: %v", name, err) + fs.Fatalf(nil, "couldn't find remote %q: %v", name, err) } newBackends = append(newBackends, Backend{Backend: fsInfo.FileName(), Remote: name}) } diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml index b3fa32c87..3aa68afab 100644 --- a/fstest/test_all/config.yaml +++ b/fstest/test_all/config.yaml @@ -395,6 +395,10 @@ backends: - backend: "cache" remote: "TestCache:" fastlist: false + ignoretests: + - TestBisyncLocalRemote + - TestBisyncRemoteLocal + - TestBisyncRemoteRemote - backend: "mega" remote: "TestMega:" fastlist: false @@ -422,6 +426,7 @@ backends: - TestCaseInsensitiveMoveFile - TestFixCase - TestListDirSorted # Can't upload files starting with . - FIXME fix with encoding + - TestSyncOverlapWithFilter # Can't upload files starting with . - FIXME fix with encoding - backend: "premiumizeme" remote: "TestPremiumizeMe:" fastlist: false @@ -498,3 +503,6 @@ backends: - backend: "ulozto" remote: "TestUlozto:" fastlist: false + - backend: "iclouddrive" + remote: "TestICloudDrive:" + fastlist: false diff --git a/fstest/test_all/report.go b/fstest/test_all/report.go index 0dd4167d5..c03e226bf 100644 --- a/fstest/test_all/report.go +++ b/fstest/test_all/report.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" "html/template" - "log" "os" "os/exec" "path" @@ -77,7 +76,7 @@ func NewReport() *Report { r.LogDir = path.Join(*outputDir, r.DateTime) err = file.MkdirAll(r.LogDir, 0777) if err != nil { - log.Fatalf("Failed to make log directory: %v", err) + fs.Fatalf(nil, "Failed to make log directory: %v", err) } // Online version @@ -132,15 +131,15 @@ func (r *Report) Title() string { // LogSummary writes the summary to the log file func (r *Report) LogSummary() { - log.Printf("Logs in %q", r.LogDir) + fs.Logf(nil, "Logs in %q", r.LogDir) // Summarise results - log.Printf("SUMMARY") - log.Println(r.Title()) + fs.Logf(nil, "SUMMARY") + fs.Log(nil, r.Title()) if !r.AllPassed() { for _, t := range r.Failed { - log.Printf(" * %s", toShell(t.nextCmdLine())) - log.Printf(" * Failed tests: %v", t.FailedTests) + fs.Logf(nil, " * %s", toShell(t.nextCmdLine())) + fs.Logf(nil, " * Failed tests: %v", t.FailedTests) } } } @@ -149,11 +148,11 @@ func (r *Report) LogSummary() { func (r *Report) LogJSON() { out, err := json.MarshalIndent(r, "", "\t") if err != nil { - log.Fatalf("Failed to marshal data for index.json: %v", err) + fs.Fatalf(nil, "Failed to marshal data for index.json: %v", err) } err = os.WriteFile(path.Join(r.LogDir, "index.json"), out, 0666) if err != nil { - log.Fatalf("Failed to write index.json: %v", err) + fs.Fatalf(nil, "Failed to write index.json: %v", err) } } @@ -162,17 +161,17 @@ func (r *Report) LogHTML() { r.IndexHTML = path.Join(r.LogDir, "index.html") out, err := os.Create(r.IndexHTML) if err != nil { - log.Fatalf("Failed to open index.html: %v", err) + fs.Fatalf(nil, "Failed to open index.html: %v", err) } defer func() { err := out.Close() if err != nil { - log.Fatalf("Failed to close index.html: %v", err) + fs.Fatalf(nil, "Failed to close index.html: %v", err) } }() err = reportTemplate.Execute(out, r) if err != nil { - log.Fatalf("Failed to execute template: %v", err) + fs.Fatalf(nil, "Failed to execute template: %v", err) } _ = open.Start("file://" + r.IndexHTML) } @@ -282,19 +281,19 @@ func (r *Report) EmailHTML() { if *emailReport == "" || r.IndexHTML == "" { return } - log.Printf("Sending email summary to %q", *emailReport) + fs.Logf(nil, "Sending email summary to %q", *emailReport) cmdLine := []string{"mail", "-a", "Content-Type: text/html", *emailReport, "-s", "rclone integration tests: " + r.Title()} cmd := exec.Command(cmdLine[0], cmdLine[1:]...) in, err := os.Open(r.IndexHTML) if err != nil { - log.Fatalf("Failed to open index.html: %v", err) + fs.Fatalf(nil, "Failed to open index.html: %v", err) } cmd.Stdin = in cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err = cmd.Run() if err != nil { - log.Fatalf("Failed to send email: %v", err) + fs.Fatalf(nil, "Failed to send email: %v", err) } _ = in.Close() } @@ -302,14 +301,14 @@ func (r *Report) EmailHTML() { // uploadTo uploads a copy of the report online to the dir given func (r *Report) uploadTo(uploadDir string) { dst := path.Join(*uploadPath, uploadDir) - log.Printf("Uploading results to %q", dst) + fs.Logf(nil, "Uploading results to %q", dst) cmdLine := []string{"rclone", "sync", "--stats-log-level", "NOTICE", r.LogDir, dst} cmd := exec.Command(cmdLine[0], cmdLine[1:]...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr err := cmd.Run() if err != nil { - log.Fatalf("Failed to upload results: %v", err) + fs.Fatalf(nil, "Failed to upload results: %v", err) } } diff --git a/fstest/test_all/run.go b/fstest/test_all/run.go index f156b7f18..40560d6e5 100644 --- a/fstest/test_all/run.go +++ b/fstest/test_all/run.go @@ -8,7 +8,6 @@ import ( "fmt" "go/build" "io" - "log" "os" "os/exec" "path" @@ -94,10 +93,10 @@ func (rs Runs) Less(i, j int) bool { // dumpOutput prints the error output func (r *Run) dumpOutput() { - log.Println("------------------------------------------------------------") - log.Printf("---- %q ----", r.CmdString) - log.Println(string(r.output)) - log.Println("------------------------------------------------------------") + fs.Log(nil, "------------------------------------------------------------") + fs.Logf(nil, "---- %q ----", r.CmdString) + fs.Log(nil, string(r.output)) + fs.Log(nil, "------------------------------------------------------------") } // trie for storing runs @@ -180,7 +179,7 @@ func (r *Run) findFailures() { } r.FailedTests = newTests if len(r.FailedTests) == 0 && ignored > 0 { - log.Printf("%q - Found %d ignored errors only - marking as good", r.CmdString, ignored) + fs.Logf(nil, "%q - Found %d ignored errors only - marking as good", r.CmdString, ignored) r.err = nil r.dumpOutput() return @@ -191,10 +190,10 @@ func (r *Run) findFailures() { r.RunFlag = "" } if r.passed() && len(r.FailedTests) != 0 { - log.Printf("%q - Expecting no errors but got: %v", r.CmdString, r.FailedTests) + fs.Logf(nil, "%q - Expecting no errors but got: %v", r.CmdString, r.FailedTests) r.dumpOutput() } else if !r.passed() && len(r.FailedTests) == 0 { - log.Printf("%q - Expecting errors but got none: %v", r.CmdString, r.FailedTests) + fs.Logf(nil, "%q - Expecting errors but got none: %v", r.CmdString, r.FailedTests) r.dumpOutput() r.FailedTests = oldFailedTests } @@ -214,23 +213,23 @@ func (r *Run) trial() { CmdLine := r.nextCmdLine() CmdString := toShell(CmdLine) msg := fmt.Sprintf("%q - Starting (try %d/%d)", CmdString, r.Try, *maxTries) - log.Println(msg) + fs.Log(nil, msg) logName := path.Join(r.LogDir, r.TrialName) out, err := os.Create(logName) if err != nil { - log.Fatalf("Couldn't create log file: %v", err) + fs.Fatalf(nil, "Couldn't create log file: %v", err) } defer func() { err := out.Close() if err != nil { - log.Fatalf("Failed to close log file: %v", err) + fs.Fatalf(nil, "Failed to close log file: %v", err) } }() _, _ = fmt.Fprintln(out, msg) // Early exit if --try-run if *dryRun { - log.Printf("Not executing as --dry-run: %v", CmdLine) + fs.Logf(nil, "Not executing as --dry-run: %v", CmdLine) _, _ = fmt.Fprintln(out, "--dry-run is set - not running") return } @@ -238,7 +237,7 @@ func (r *Run) trial() { // Start the test server if required finish, err := testserver.Start(r.Remote) if err != nil { - log.Printf("%s: Failed to start test server: %v", r.Remote, err) + fs.Logf(nil, "%s: Failed to start test server: %v", r.Remote, err) _, _ = fmt.Fprintf(out, "%s: Failed to start test server: %v\n", r.Remote, err) r.err = err return @@ -263,7 +262,7 @@ func (r *Run) trial() { } else { msg = fmt.Sprintf("%q - Finished ERROR in %v (try %d/%d): %v: Failed %v", CmdString, duration, r.Try, *maxTries, r.err, r.FailedTests) } - log.Println(msg) + fs.Log(nil, msg) _, _ = fmt.Fprintln(out, msg) } @@ -304,23 +303,23 @@ func (r *Run) PackagePath() string { func (r *Run) MakeTestBinary() { binary := r.BinaryPath() binaryName := r.BinaryName() - log.Printf("%s: Making test binary %q", r.Path, binaryName) + fs.Logf(nil, "%s: Making test binary %q", r.Path, binaryName) CmdLine := []string{"go", "test", "-c"} if *race { CmdLine = append(CmdLine, "-race") } if *dryRun { - log.Printf("Not executing: %v", CmdLine) + fs.Logf(nil, "Not executing: %v", CmdLine) return } cmd := exec.Command(CmdLine[0], CmdLine[1:]...) cmd.Dir = r.Path err := cmd.Run() if err != nil { - log.Fatalf("Failed to make test binary: %v", err) + fs.Fatalf(nil, "Failed to make test binary: %v", err) } if _, err := os.Stat(binary); err != nil { - log.Fatalf("Couldn't find test binary %q", binary) + fs.Fatalf(nil, "Couldn't find test binary %q", binary) } } @@ -332,7 +331,7 @@ func (r *Run) RemoveTestBinary() { binary := r.BinaryPath() err := os.Remove(binary) // Delete the binary when finished if err != nil { - log.Printf("Error removing test binary %q: %v", binary, err) + fs.Logf(nil, "Error removing test binary %q: %v", binary, err) } } @@ -428,7 +427,7 @@ func (r *Run) Run(LogDir string, result chan<- *Run) { for r.Try = 1; r.Try <= *maxTries; r.Try++ { r.TrialName = r.Name() + ".txt" r.TrialNames = append(r.TrialNames, r.TrialName) - log.Printf("Starting run with log %q", r.TrialName) + fs.Logf(nil, "Starting run with log %q", r.TrialName) r.trial() if r.passed() || r.NoRetries { break diff --git a/fstest/test_all/test_all.go b/fstest/test_all/test_all.go index 79263f0e4..f1b40c34d 100644 --- a/fstest/test_all/test_all.go +++ b/fstest/test_all/test_all.go @@ -12,7 +12,7 @@ Make TesTrun have a []string of flags to try - that then makes it generic import ( "flag" - "log" + "fmt" "math/rand" "os" "path" @@ -21,6 +21,7 @@ import ( "time" _ "github.com/rclone/rclone/backend/all" // import all fs + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configfile" "github.com/rclone/rclone/lib/pacer" ) @@ -68,8 +69,8 @@ func main() { flag.Parse() conf, err := NewConfig(*configFile) if err != nil { - log.Println("test_all should be run from the root of the rclone source code") - log.Fatal(err) + fs.Log(nil, "test_all should be run from the root of the rclone source code") + fs.Fatal(nil, fmt.Sprint(err)) } configfile.Install() @@ -91,7 +92,7 @@ func main() { if *clean { err := cleanRemotes(conf) if err != nil { - log.Fatalf("Failed to clean: %v", err) + fs.Fatalf(nil, "Failed to clean: %v", err) } return } @@ -100,7 +101,7 @@ func main() { for _, remote := range conf.Backends { names = append(names, remote.Remote) } - log.Printf("Testing remotes: %s", strings.Join(names, ", ")) + fs.Logf(nil, "Testing remotes: %s", strings.Join(names, ", ")) // Runs we will do for this test in random order runs := conf.MakeRuns() diff --git a/go.mod b/go.mod index d0acc41a7..954905cd1 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,6 @@ require ( github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 github.com/Files-com/files-sdk-go/v3 v3.2.34 github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd - github.com/Unknwon/goconfig v1.0.0 github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 github.com/abbot/go-http-auth v0.4.0 @@ -52,13 +51,14 @@ require ( github.com/minio/minio-go/v7 v7.0.74 github.com/mitchellh/go-homedir v1.1.0 github.com/moby/sys/mountinfo v0.7.2 - github.com/ncw/swift/v2 v2.0.2 + github.com/ncw/swift/v2 v2.0.3 github.com/oracle/oci-go-sdk/v65 v65.69.2 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pkg/sftp v1.13.6 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 github.com/prometheus/client_golang v1.19.1 github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 + github.com/quasilyte/go-ruleguard/dsl v0.3.22 github.com/rclone/gofakes3 v0.0.3-0.20240807151802-e80146f8de87 github.com/rfjakob/eme v1.1.2 github.com/rivo/uniseg v0.4.7 @@ -70,6 +70,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 github.com/t3rm1n4l/go-mega v0.0.0-20240219080617-d494b6a8ace7 + github.com/unknwon/goconfig v1.0.0 github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9 github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0 github.com/xanzy/ssh-agent v0.3.3 @@ -78,7 +79,6 @@ require ( go.etcd.io/bbolt v1.3.10 goftp.io/server/v2 v2.0.1 golang.org/x/crypto v0.25.0 - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.8.0 @@ -88,7 +88,7 @@ require ( google.golang.org/api v0.188.0 gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v2 v2.4.0 - storj.io/uplink v1.13.0 + storj.io/uplink v1.13.1 ) require ( @@ -204,6 +204,7 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/mod v0.19.0 // indirect golang.org/x/tools v0.23.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect @@ -212,9 +213,9 @@ require ( google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect moul.io/http2curl/v2 v2.3.0 // indirect - storj.io/common v0.0.0-20240424123607-5f226fc92c16 // indirect - storj.io/drpc v0.0.33 // indirect - storj.io/eventkit v0.0.0-20240306141230-6cb545e5f892 // indirect + storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect + storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect + storj.io/eventkit v0.0.0-20240415002644-1d9596fee086 // indirect storj.io/infectious v0.0.2 // indirect storj.io/picobuf v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index f95f29590..81ff97f66 100644 --- a/go.sum +++ b/go.sum @@ -83,8 +83,6 @@ github.com/ProtonMail/gopenpgp/v2 v2.7.4 h1:Vz/8+HViFFnf2A6XX8JOvZMrA6F5puwNvvF2 github.com/ProtonMail/gopenpgp/v2 v2.7.4/go.mod h1:IhkNEDaxec6NyzSI0PlxapinnwPVIESk8/76da3Ct3g= github.com/PuerkitoBio/goquery v1.8.1 h1:uQxhNlArOIdbrH1tr0UXwdVFgDcZDrZVdcpygAcwmWM= github.com/PuerkitoBio/goquery v1.8.1/go.mod h1:Q8ICL1kNUJ2sXGoAhPGUdYDJvgQgHzJsnnd3H7Ho5jQ= -github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= -github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e h1:KMVieI1/Ub++GYfnhyFPoGE3g5TUiG4srE3TMGr5nM4= github.com/a8m/tree v0.0.0-20240104212747-2c8764a5f17e/go.mod h1:j5astEcUkZQX8lK+KKlQ3NRQ50f4EE8ZjyZpCz3mrH4= github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= @@ -463,8 +461,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= -github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= +github.com/ncw/swift/v2 v2.0.3 h1:8R9dmgFIWs+RiVlisCEfiQiik1hjuR0JnOkLxaP9ihg= +github.com/ncw/swift/v2 v2.0.3/go.mod h1:cbAO76/ZwcFrFlHdXPjaqWZ9R7Hdar7HpjRXBfbjigk= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -513,6 +511,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= +github.com/quasilyte/go-ruleguard/dsl v0.3.22 h1:wd8zkOhSNr+I+8Qeciml08ivDt1pSXe60+5DqOpCjPE= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= github.com/quic-go/qtls-go1-20 v0.4.1 h1:D33340mCNDAIKBqXuAvexTNMUByrYmFYVfKfDN5nfFs= github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= github.com/quic-go/quic-go v0.40.1 h1:X3AGzUNFs0jVuO3esAGnTfvdgvL4fq655WaOi1snv1Q= @@ -604,8 +604,8 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/willscott/go-nfs v0.0.2 h1:BaBp1CpGDMooCT6bCgX6h6ZwgPcTMST4yToYZ9byee0= -github.com/willscott/go-nfs v0.0.2/go.mod h1:SvullWeHxr/924WQNbUaZqtluBt2vuZ61g6yAV+xj7w= +github.com/unknwon/goconfig v1.0.0 h1:rS7O+CmUdli1T+oDm7fYj1MwqNWtEJfNj+FqcUHML8U= +github.com/unknwon/goconfig v1.0.0/go.mod h1:qu2ZQ/wcC/if2u32263HTVC39PeOQRSmidQk3DuDFQ8= github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9 h1:IGSoH2aBagQ9VI8ZwbjHYIslta5vXfczegV1B4y9KqY= github.com/willscott/go-nfs v0.0.3-0.20240425122109-91bc38957cc9/go.mod h1:Ql2ebUpEFm/a1CAY884di2XZkdcddfHZ6ONrAlhFev0= github.com/willscott/go-nfs-client v0.0.0-20240104095149-b44639837b00 h1:U0DnHRZFzoIV1oFEZczg5XyPut9yxk9jjtax/9Bxr/o= @@ -1043,15 +1043,15 @@ moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHc rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -storj.io/common v0.0.0-20240424123607-5f226fc92c16 h1:Eg2S+KnMD7yCXnlxG5uHUrGOO9H7o1v4Fl/175n7tWc= -storj.io/common v0.0.0-20240424123607-5f226fc92c16/go.mod h1:MFl009RHY4tIqySVNy/6EmgRw2q60d26h9N/nb7JxGU= -storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI= -storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4= -storj.io/eventkit v0.0.0-20240306141230-6cb545e5f892 h1:IVzNtTR1VT+QNALCNLX/Z+0hzGo/Y2XI7umgG5PFwOk= -storj.io/eventkit v0.0.0-20240306141230-6cb545e5f892/go.mod h1:S6p41RzIBKoeGAdrziksWkiijnZXql9YcNsc23t0u+8= +storj.io/common v0.0.0-20240812101423-26b53789c348 h1:Urs3fX+1Fyb+CFKGw0mCJV3MPR499WM+Vs6osw4Rqtk= +storj.io/common v0.0.0-20240812101423-26b53789c348/go.mod h1:XMpwKxc04HCBl4H5IFCGv1ca5Dm0tvH4NL7Jx+JhxuA= +storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 h1:hLvEV2RMTscX3JHPd+LSQCeTt8i1Q0Yt7U2EdfyMnaQ= +storj.io/drpc v0.0.35-0.20240709171858-0075ac871661/go.mod h1:Y9LZaa8esL1PW2IDMqJE7CFSNq7d5bQ3RI7mGPtmKMg= +storj.io/eventkit v0.0.0-20240415002644-1d9596fee086 h1:TkytkGUI6zGtH5Qx/O0VxQCcYJqOOiwRq0oMi4uM5Tg= +storj.io/eventkit v0.0.0-20240415002644-1d9596fee086/go.mod h1:S6p41RzIBKoeGAdrziksWkiijnZXql9YcNsc23t0u+8= storj.io/infectious v0.0.2 h1:rGIdDC/6gNYAStsxsZU79D/MqFjNyJc1tsyyj9sTl7Q= storj.io/infectious v0.0.2/go.mod h1:QEjKKww28Sjl1x8iDsjBpOM4r1Yp8RsowNcItsZJ1Vs= storj.io/picobuf v0.0.3 h1:xAUPB5ZUGfxkqd3bnw3zp01kkWb9wlhg4vtZWUs2S9A= storj.io/picobuf v0.0.3/go.mod h1:4V4xelV1RSCck5GgmkL/Txw9l6IfX3XcBzegmL5Kudo= -storj.io/uplink v1.13.0 h1:MAwzMaO4F86n2sMdNm7/m7LVyf8KD0FP+72h1H+HuRE= -storj.io/uplink v1.13.0/go.mod h1:MT8+V7qddgn1ra09piq3Idy4IUva2S90S7me7U8n6cE= +storj.io/uplink v1.13.1 h1:C8RdW/upALoCyuF16Lod9XGCXEdbJAS+ABQy9JO/0pA= +storj.io/uplink v1.13.1/go.mod h1:x0MQr4UfFsQBwgVWZAtEsLpuwAn6dg7G0Mpne1r516E= diff --git a/lib/cache/cache.go b/lib/cache/cache.go index fc73eab68..6f841d182 100644 --- a/lib/cache/cache.go +++ b/lib/cache/cache.go @@ -260,3 +260,19 @@ func (c *Cache) SetFinalizer(finalize func(interface{})) { c.finalize = finalize c.mu.Unlock() } + +// EntriesWithPinCount returns the number of pinned and unpinned entries in the cache +// +// Each entry is counted only once, regardless of entry.pinCount +func (c *Cache) EntriesWithPinCount() (pinned, unpinned int) { + c.mu.Lock() + for _, entry := range c.cache { + if entry.pinCount <= 0 { + unpinned++ + } else { + pinned++ + } + } + c.mu.Unlock() + return pinned, unpinned +} diff --git a/lib/encoder/encoder.go b/lib/encoder/encoder.go index 5efc8a99d..b1ff541a3 100644 --- a/lib/encoder/encoder.go +++ b/lib/encoder/encoder.go @@ -190,7 +190,7 @@ func (mask *MultiEncoder) Set(in string) error { if bits, ok := nameToEncoding[part]; ok { out |= bits } else { - i, err := strconv.ParseInt(part, 0, 64) + i, err := strconv.ParseUint(part, 0, 0) if err != nil { return fmt.Errorf("bad encoding %q: possible values are: %s", part, validStrings()) } diff --git a/lib/encoder/internal/gen/main.go b/lib/encoder/internal/gen/main.go index 5fd8c37ba..dfe556760 100644 --- a/lib/encoder/internal/gen/main.go +++ b/lib/encoder/internal/gen/main.go @@ -4,12 +4,12 @@ package main import ( "flag" "fmt" - "log" "math/rand" "os" "strconv" "strings" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/encoder" ) @@ -434,13 +434,13 @@ var testCasesDoubleEdge = []testCase{ func fatal(err error, s ...interface{}) { if err != nil { - log.Fatalln(append(s, err)) + fs.Fatal(nil, fmt.Sprint(append(s, err))) } } func fatalW(_ int, err error) func(...interface{}) { if err != nil { return func(s ...interface{}) { - log.Fatalln(append(s, err)) + fs.Fatal(nil, fmt.Sprint(append(s, err))) } } return func(s ...interface{}) {} diff --git a/lib/exitcode/exitcode.go b/lib/exitcode/exitcode.go index 6cedb3303..afe886197 100644 --- a/lib/exitcode/exitcode.go +++ b/lib/exitcode/exitcode.go @@ -4,10 +4,10 @@ package exitcode const ( // Success is returned when rclone finished without error. Success = iota - // UsageError is returned when there was a syntax or usage error in the arguments. - UsageError // UncategorizedError is returned for any error not categorised otherwise. UncategorizedError + // UsageError is returned when there was a syntax or usage error in the arguments. + UsageError // DirNotFound is returned when a source or destination directory is not found. DirNotFound // FileNotFound is returned when a source or destination file is not found. diff --git a/lib/file/mkdir_other.go b/lib/file/mkdir_other.go index 6baa7566b..3803885fb 100644 --- a/lib/file/mkdir_other.go +++ b/lib/file/mkdir_other.go @@ -1,10 +1,10 @@ -//go:build !windows +//go:build !windows || go1.22 package file import "os" -// MkdirAll just calls os.MkdirAll on non-Windows. +// MkdirAll just calls os.MkdirAll on non-Windows and with go1.22 or newer on Windows func MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) } diff --git a/lib/file/mkdir_windows.go b/lib/file/mkdir_windows.go index 4b5ba4f12..0da50c0c8 100644 --- a/lib/file/mkdir_windows.go +++ b/lib/file/mkdir_windows.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && !go1.22 package file diff --git a/lib/file/mkdir_windows_go122_test.go b/lib/file/mkdir_windows_go122_test.go new file mode 100644 index 000000000..eb9bbbef9 --- /dev/null +++ b/lib/file/mkdir_windows_go122_test.go @@ -0,0 +1,122 @@ +//go:build windows && go1.22 + +package file + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func checkMkdirAll(t *testing.T, path string, valid bool, errormsgs ...string) { + if valid { + assert.NoError(t, os.MkdirAll(path, 0777)) + } else { + err := os.MkdirAll(path, 0777) + assert.Error(t, err) + ok := false + for _, msg := range errormsgs { + if err.Error() == msg { + ok = true + } + } + assert.True(t, ok, fmt.Sprintf("Error message '%v' didn't match any of %v", err, errormsgs)) + } +} + +func checkMkdirAllSubdirs(t *testing.T, path string, valid bool, errormsgs ...string) { + checkMkdirAll(t, path, valid, errormsgs...) + checkMkdirAll(t, path+`\`, valid, errormsgs...) + checkMkdirAll(t, path+`\parent`, valid, errormsgs...) + checkMkdirAll(t, path+`\parent\`, valid, errormsgs...) + checkMkdirAll(t, path+`\parent\child`, valid, errormsgs...) + checkMkdirAll(t, path+`\parent\child\`, valid, errormsgs...) +} + +// Testing paths on existing drive +func TestMkdirAllOnDrive(t *testing.T) { + path := t.TempDir() + + dir, err := os.Stat(path) + require.NoError(t, err) + require.True(t, dir.IsDir()) + + drive := filepath.VolumeName(path) + + checkMkdirAll(t, drive, true, "") + checkMkdirAll(t, drive+`\`, true, "") + checkMkdirAll(t, `\\?\`+drive, false, fmt.Sprintf(`mkdir \\?\%s: Access is denied.`, drive)) // This isn't actually a valid Windows path, it worked under go1.21.3 but fails under go1.21.4 and newer + checkMkdirAll(t, `\\?\`+drive+`\`, true, "") + checkMkdirAllSubdirs(t, path, true, "") + checkMkdirAllSubdirs(t, `\\?\`+path, true, "") +} + +// Testing paths on unused drive +// This covers the cases that we wanted to improve with our own custom version +// of golang's os.MkdirAll, introduced in PR #5401. Before go1.22 the original +// os.MkdirAll would recurse extended-length paths down to the "\\?" prefix and +// return the noninformative error: +// "mkdir \\?: The filename, directory name, or volume label syntax is incorrect." +// Our version stopped the recursion at drive's root directory, and reported, +// before go1.21.4: +// "mkdir \\?\A:\: The system cannot find the path specified." +// or, starting with go1.21.4: +// "mkdir \\?\A:: The system cannot find the path specified." +// See https://github.com/rclone/rclone/pull/5401. +// Starting with go1.22 golang's os.MkdirAll have similar improvements that made +// our custom version no longer necessary. +func TestMkdirAllOnUnusedDrive(t *testing.T) { + letter := FindUnusedDriveLetter() + require.NotEqual(t, letter, 0) + drive := string(letter) + ":" + checkMkdirAll(t, drive, false, fmt.Sprintf(`mkdir %s: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\`, false, fmt.Sprintf(`mkdir %s\: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\`, false, fmt.Sprintf(`mkdir %s\parent\: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\child`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\child\`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) + + drive = `\\?\` + drive + checkMkdirAll(t, drive, false, fmt.Sprintf(`mkdir %s: The system cannot find the file specified.`, drive)) + checkMkdirAll(t, drive+`\`, false, fmt.Sprintf(`mkdir %s\: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\`, false, fmt.Sprintf(`mkdir %s\parent\: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\child`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) + checkMkdirAll(t, drive+`\parent\child\`, false, fmt.Sprintf(`mkdir %s\parent: The system cannot find the path specified.`, drive)) +} + +// Testing paths on unknown network host +// This covers more cases that we wanted to improve in our custom version of +// golang's os.MkdirAll, extending that explained in TestMkdirAllOnUnusedDrive. +// With our first fix, stopping it from recursing extended-length paths down to +// the "\\?" prefix, it would now stop at `\\?\UNC`, because that is what +// filepath.VolumeName returns (which is wrong, that is not a volume name!), +// and still return a noninformative error: +// "mkdir \\?\UNC\\: The filename, directory name, or volume label syntax is incorrect." +// Our version stopped the recursion at level before this, and reports: +// "mkdir \\?\UNC\0.0.0.0: The specified path is invalid." +// See https://github.com/rclone/rclone/pull/6420. +// Starting with go1.22 golang's os.MkdirAll have similar improvements that made +// our custom version no longer necessary. +func TestMkdirAllOnUnusedNetworkHost(t *testing.T) { + sharePath := `\\0.0.0.0\share` + checkMkdirAll(t, sharePath, false, fmt.Sprintf(`mkdir %s: The format of the specified network name is invalid.`, sharePath)) + checkMkdirAll(t, sharePath+`\`, false, fmt.Sprintf(`mkdir %s\: The format of the specified network name is invalid.`, sharePath)) + checkMkdirAll(t, sharePath+`\parent`, false, fmt.Sprintf(`mkdir %s\parent: The format of the specified network name is invalid.`, sharePath)) + checkMkdirAll(t, sharePath+`\parent\`, false, fmt.Sprintf(`mkdir %s\parent\: The format of the specified network name is invalid.`, sharePath)) + checkMkdirAll(t, sharePath+`\parent\child`, false, fmt.Sprintf(`mkdir %s\parent: The format of the specified network name is invalid.`, sharePath)) + checkMkdirAll(t, sharePath+`\parent\child\`, false, fmt.Sprintf(`mkdir %s\parent: The format of the specified network name is invalid.`, sharePath)) + + serverPath := `\\?\UNC\0.0.0.0` + sharePath = serverPath + `\share` + checkMkdirAll(t, sharePath, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) + checkMkdirAll(t, sharePath+`\`, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) + checkMkdirAll(t, sharePath+`\parent`, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) + checkMkdirAll(t, sharePath+`\parent\`, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) + checkMkdirAll(t, sharePath+`\parent\child`, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) + checkMkdirAll(t, sharePath+`\parent\child\`, false, fmt.Sprintf(`mkdir %s: The specified path is invalid.`, serverPath)) +} diff --git a/lib/file/mkdir_windows_test.go b/lib/file/mkdir_windows_test.go index 75e050bf7..e6d4bd389 100644 --- a/lib/file/mkdir_windows_test.go +++ b/lib/file/mkdir_windows_test.go @@ -1,4 +1,4 @@ -//go:build windows +//go:build windows && !go1.22 package file diff --git a/lib/http/auth.go b/lib/http/auth.go index 0c1137152..219b140a8 100644 --- a/lib/http/auth.go +++ b/lib/http/auth.go @@ -2,8 +2,8 @@ package http import ( "bytes" + "fmt" "html/template" - "log" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" @@ -42,7 +42,7 @@ Use ` + "`--{{ .Prefix }}salt`" + ` to change the password hashing salt from the ` tmpl, err := template.New("auth help").Parse(help) if err != nil { - log.Fatal("Fatal error parsing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err)) } data := struct { @@ -53,7 +53,7 @@ Use ` + "`--{{ .Prefix }}salt`" + ` to change the password hashing salt from the buf := &bytes.Buffer{} err = tmpl.Execute(buf, data) if err != nil { - log.Fatal("Fatal error executing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err)) } return buf.String() } diff --git a/lib/http/serve/dir.go b/lib/http/serve/dir.go index 664f81cf2..5a92ddeb8 100644 --- a/lib/http/serve/dir.go +++ b/lib/http/serve/dir.go @@ -2,6 +2,7 @@ package serve import ( "bytes" + "context" "fmt" "html/template" "net/http" @@ -124,8 +125,8 @@ func (d *Directory) AddEntry(remote string, isDir bool) { } // Error logs the error and if a ResponseWriter is given it writes an http.StatusInternalServerError -func Error(what interface{}, w http.ResponseWriter, text string, err error) { - err = fs.CountError(err) +func Error(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) { + err = fs.CountError(ctx, err) fs.Errorf(what, "%s: %v", text, err) if w != nil { http.Error(w, text+".", http.StatusInternalServerError) @@ -223,6 +224,7 @@ const ( // Serve serves a directory func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() // Account the transfer tr := accounting.Stats(r.Context()).NewTransferRemoteSize(d.DirRemote, -1, nil, nil) defer tr.Done(r.Context(), nil) @@ -232,12 +234,12 @@ func (d *Directory) Serve(w http.ResponseWriter, r *http.Request) { buf := &bytes.Buffer{} err := d.HTMLTemplate.Execute(buf, d) if err != nil { - Error(d.DirRemote, w, "Failed to render template", err) + Error(ctx, d.DirRemote, w, "Failed to render template", err) return } w.Header().Set("Content-Length", fmt.Sprintf("%d", buf.Len())) _, err = buf.WriteTo(w) if err != nil { - Error(d.DirRemote, nil, "Failed to drain template buffer", err) + Error(ctx, d.DirRemote, nil, "Failed to drain template buffer", err) } } diff --git a/lib/http/serve/dir_test.go b/lib/http/serve/dir_test.go index 7bedc0471..19ab22f57 100644 --- a/lib/http/serve/dir_test.go +++ b/lib/http/serve/dir_test.go @@ -1,6 +1,7 @@ package serve import ( + "context" "errors" "html/template" "io" @@ -88,9 +89,10 @@ func TestAddEntry(t *testing.T) { } func TestError(t *testing.T) { + ctx := context.Background() w := httptest.NewRecorder() err := errors.New("help") - Error("potato", w, "sausage", err) + Error(ctx, "potato", w, "sausage", err) resp := w.Result() assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) body, _ := io.ReadAll(resp.Body) diff --git a/lib/http/server.go b/lib/http/server.go index ca76fcf52..acc07c4b2 100644 --- a/lib/http/server.go +++ b/lib/http/server.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "html/template" - "log" "net" "net/http" "os" @@ -22,6 +21,7 @@ import ( "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/flags" "github.com/rclone/rclone/lib/atexit" + sdActivation "github.com/rclone/rclone/lib/sdactivation" "github.com/spf13/pflag" ) @@ -43,6 +43,7 @@ or just by using an absolute path name. Note that unix sockets bypass the authentication - this is expected to be done with file system permissions. ` + "`--{{ .Prefix }}addr`" + ` may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to accomplish the same. ` + "`--{{ .Prefix }}server-read-timeout` and `--{{ .Prefix }}server-write-timeout`" + ` can be used to control the timeouts on the server. Note that this is the total time @@ -75,10 +76,25 @@ certificate authority certificate. values are "tls1.0", "tls1.1", "tls1.2" and "tls1.3" (default "tls1.0"). +### Socket activation + +Instead of the listening addresses specified above, rclone will listen to all +FDs passed by the service manager, if any (and ignore any arguments passed by ` + + "--{{ .Prefix }}addr`" + `). + +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html + +Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command + + systemd-socket-activate -l 8000 -- rclone serve + +This will socket-activate rclone on the first connection to port 8000 over TCP. ` tmpl, err := template.New("server help").Parse(help) if err != nil { - log.Fatal("Fatal error parsing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err)) } data := struct { @@ -89,7 +105,7 @@ certificate authority certificate. buf := &bytes.Buffer{} err = tmpl.Execute(buf, data) if err != nil { - log.Fatal("Fatal error executing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err)) } return buf.String() } @@ -158,7 +174,7 @@ type Config struct { // AddFlagsPrefix adds flags for the httplib func (cfg *Config) AddFlagsPrefix(flagSet *pflag.FlagSet, prefix string) { - flags.StringArrayVarP(flagSet, &cfg.ListenAddr, prefix+"addr", "", cfg.ListenAddr, "IPaddress:Port or :Port to bind server to", prefix) + flags.StringArrayVarP(flagSet, &cfg.ListenAddr, prefix+"addr", "", cfg.ListenAddr, "IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to", prefix) flags.DurationVarP(flagSet, &cfg.ServerReadTimeout, prefix+"server-read-timeout", "", cfg.ServerReadTimeout, "Timeout for server reading data", prefix) flags.DurationVarP(flagSet, &cfg.ServerWriteTimeout, prefix+"server-write-timeout", "", cfg.ServerWriteTimeout, "Timeout for server writing data", prefix) flags.IntVarP(flagSet, &cfg.MaxHeaderBytes, prefix+"max-header-bytes", "", cfg.MaxHeaderBytes, "Maximum size of request header", prefix) @@ -199,7 +215,7 @@ func (s instance) serve(wg *sync.WaitGroup) { defer wg.Done() err := s.httpServer.Serve(s.listener) if err != http.ErrServerClosed && err != nil { - log.Printf("%s: unexpected error: %s", s.listener.Addr(), err.Error()) + fs.Logf(nil, "%s: unexpected error: %s", s.listener.Addr(), err.Error()) } } @@ -241,6 +257,32 @@ func WithTemplate(cfg TemplateConfig) Option { } } +// For a given listener, and optional tlsConfig, construct a instance. +// The url string ends up in the `url` field of the `instance`. +// This unconditionally wraps the listener with the provided TLS config if one +// is specified, so all decision logic on whether to use TLS needs to live at +// the callsite. +func newInstance(ctx context.Context, s *Server, listener net.Listener, tlsCfg *tls.Config, url string) *instance { + if tlsCfg != nil { + listener = tls.NewListener(listener, tlsCfg) + } + + return &instance{ + url: url, + listener: listener, + httpServer: &http.Server{ + Handler: s.mux, + ReadTimeout: s.cfg.ServerReadTimeout, + WriteTimeout: s.cfg.ServerWriteTimeout, + MaxHeaderBytes: s.cfg.MaxHeaderBytes, + ReadHeaderTimeout: 10 * time.Second, // time to send the headers + IdleTimeout: 60 * time.Second, // time to keep idle connections open + TLSConfig: tlsCfg, + BaseContext: NewBaseContext(ctx, url), + }, + } +} + // NewServer instantiates a new http server using provided listeners and options // This function is provided if the default http server does not meet a services requirements and should not generally be used // A http server can listen using multiple listeners. For example, a listener for port 80, and a listener for port 443. @@ -289,55 +331,60 @@ func NewServer(ctx context.Context, options ...Option) (*Server, error) { s.initAuth() + // (Only) listen on FDs provided by the service manager, if any. + sdListeners, err := sdActivation.ListenersWithNames() + if err != nil { + return nil, fmt.Errorf("unable to acquire listeners: %w", err) + } + + if len(sdListeners) != 0 { + for listenerName, listeners := range sdListeners { + for i, listener := range listeners { + url := fmt.Sprintf("sd-listen:%s-%d/%s", listenerName, i, s.cfg.BaseURL) + if s.tlsConfig != nil { + url = fmt.Sprintf("sd-listen+tls:%s-%d/%s", listenerName, i, s.cfg.BaseURL) + } + + instance := newInstance(ctx, s, listener, s.tlsConfig, url) + + s.instances = append(s.instances, *instance) + } + } + + return s, nil + } + + // Process all listeners specified in the CLI Args. for _, addr := range s.cfg.ListenAddr { - var url string - var network = "tcp" - var tlsCfg *tls.Config + var instance *instance if strings.HasPrefix(addr, "unix://") || filepath.IsAbs(addr) { - network = "unix" addr = strings.TrimPrefix(addr, "unix://") - url = addr - } else if strings.HasPrefix(addr, "tls://") || (len(s.cfg.ListenAddr) == 1 && s.tlsConfig != nil) { - tlsCfg = s.tlsConfig - addr = strings.TrimPrefix(addr, "tls://") - } - - var listener net.Listener - if tlsCfg == nil { - listener, err = net.Listen(network, addr) - } else { - listener, err = tls.Listen(network, addr, tlsCfg) - } - if err != nil { - return nil, err - } - - if network == "tcp" { - var secure string - if tlsCfg != nil { - secure = "s" + listener, err := net.Listen("unix", addr) + if err != nil { + return nil, err } - url = fmt.Sprintf("http%s://%s%s/", secure, listener.Addr().String(), s.cfg.BaseURL) + instance = newInstance(ctx, s, listener, s.tlsConfig, addr) + } else if strings.HasPrefix(addr, "tls://") || (len(s.cfg.ListenAddr) == 1 && s.tlsConfig != nil) { + addr = strings.TrimPrefix(addr, "tls://") + listener, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + instance = newInstance(ctx, s, listener, s.tlsConfig, fmt.Sprintf("https://%s%s/", listener.Addr().String(), s.cfg.BaseURL)) + } else { + // HTTP case + addr = strings.TrimPrefix(addr, "http://") + listener, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + instance = newInstance(ctx, s, listener, nil, fmt.Sprintf("http://%s%s/", listener.Addr().String(), s.cfg.BaseURL)) + } - ii := instance{ - url: url, - listener: listener, - httpServer: &http.Server{ - Handler: s.mux, - ReadTimeout: s.cfg.ServerReadTimeout, - WriteTimeout: s.cfg.ServerWriteTimeout, - MaxHeaderBytes: s.cfg.MaxHeaderBytes, - ReadHeaderTimeout: 10 * time.Second, // time to send the headers - IdleTimeout: 60 * time.Second, // time to keep idle connections open - TLSConfig: tlsCfg, - BaseContext: NewBaseContext(ctx, url), - }, - } - - s.instances = append(s.instances, ii) + s.instances = append(s.instances, *instance) } return s, nil @@ -497,7 +544,7 @@ func (s *Server) Shutdown() error { expiry := time.Now().Add(gracefulShutdownTime) ctx, cancel := context.WithDeadline(context.Background(), expiry) if err := ii.httpServer.Shutdown(ctx); err != nil { - log.Printf("error shutting down server: %s", err) + fs.Logf(nil, "error shutting down server: %s", err) } cancel() } diff --git a/lib/http/template.go b/lib/http/template.go index e36e44b80..a79c06f63 100644 --- a/lib/http/template.go +++ b/lib/http/template.go @@ -3,8 +3,8 @@ package http import ( "bytes" "embed" + "fmt" "html/template" - "log" "os" "strings" "time" @@ -57,7 +57,7 @@ be used to render HTML based on specific conditions. tmpl, err := template.New("template help").Parse(help) if err != nil { - log.Fatal("Fatal error parsing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error parsing template", err)) } data := struct { @@ -68,7 +68,7 @@ be used to render HTML based on specific conditions. buf := &bytes.Buffer{} err = tmpl.Execute(buf, data) if err != nil { - log.Fatal("Fatal error executing template", err) + fs.Fatal(nil, fmt.Sprint("Fatal error executing template", err)) } return buf.String() } diff --git a/lib/pool/pool.go b/lib/pool/pool.go index cf0ed93e2..4cb4d3a71 100644 --- a/lib/pool/pool.go +++ b/lib/pool/pool.go @@ -4,10 +4,10 @@ package pool import ( "fmt" - "log" "sync" "time" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/mmap" ) @@ -161,7 +161,7 @@ func (bp *Pool) Get() []byte { bp.alloced++ break } - log.Printf("Failed to get memory for buffer, waiting for %v: %v", waitTime, err) + fs.Logf(nil, "Failed to get memory for buffer, waiting for %v: %v", waitTime, err) bp.mu.Unlock() time.Sleep(waitTime) bp.mu.Lock() @@ -178,7 +178,7 @@ func (bp *Pool) Get() []byte { func (bp *Pool) freeBuffer(mem []byte) { err := bp.free(mem) if err != nil { - log.Printf("Failed to free memory: %v", err) + fs.Logf(nil, "Failed to free memory: %v", err) } bp.alloced-- } diff --git a/lib/ranges/ranges.go b/lib/ranges/ranges.go index f72be5adf..d14377cef 100644 --- a/lib/ranges/ranges.go +++ b/lib/ranges/ranges.go @@ -36,20 +36,6 @@ func (r *Range) Clip(offset int64) { } } -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func max(a, b int64) int64 { - if a > b { - return a - } - return b -} - // Intersection returns the common Range for two Range~s // // If there is no intersection then the Range returned will have diff --git a/lib/sdactivation/sdactivation_stub.go b/lib/sdactivation/sdactivation_stub.go new file mode 100644 index 000000000..e5fbff553 --- /dev/null +++ b/lib/sdactivation/sdactivation_stub.go @@ -0,0 +1,24 @@ +//go:build windows || plan9 +// +build windows plan9 + +// Package sdactivation provides support for systemd socket activation, +// wrapping the coreos/go-systemd package. +// This wraps the underlying go-systemd binary, as it fails to build on plan9 +// https://github.com/coreos/go-systemd/pull/440 +package sdactivation + +import ( + "net" +) + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +// This wraps the underlying go-systemd binary, as it fails to build on plan9 +// https://github.com/coreos/go-systemd/pull/440 +func ListenersWithNames() (map[string][]net.Listener, error) { + return make(map[string][]net.Listener), nil +} + +// Listeners returns a slice containing a net.Listener for each matching socket type passed to this process. +func Listeners() ([]net.Listener, error) { + return nil, nil +} diff --git a/lib/sdactivation/sdactivation_unix.go b/lib/sdactivation/sdactivation_unix.go new file mode 100644 index 000000000..523321bcf --- /dev/null +++ b/lib/sdactivation/sdactivation_unix.go @@ -0,0 +1,24 @@ +//go:build !windows && !plan9 +// +build !windows,!plan9 + +// Package sdactivation provides support for systemd socket activation, wrapping +// the coreos/go-systemd package. +// This wraps the underlying go-systemd library, as it fails to build on plan9 +// https://github.com/coreos/go-systemd/pull/440 +package sdactivation + +import ( + "net" + + sdActivation "github.com/coreos/go-systemd/v22/activation" +) + +// ListenersWithNames maps a listener name to a set of net.Listener instances. +func ListenersWithNames() (map[string][]net.Listener, error) { + return sdActivation.ListenersWithNames() +} + +// Listeners returns a slice containing a net.Listener for each matching socket type passed to this process. +func Listeners() ([]net.Listener, error) { + return sdActivation.Listeners() +} diff --git a/lib/systemd/notify.go b/lib/systemd/notify.go index 237343147..056a8628d 100644 --- a/lib/systemd/notify.go +++ b/lib/systemd/notify.go @@ -2,10 +2,10 @@ package systemd import ( "fmt" - "log" "sync" "github.com/coreos/go-systemd/v22/daemon" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/atexit" ) @@ -18,13 +18,13 @@ import ( // It should not be called as a result of rc commands. See #7540. func Notify() func() { if _, err := daemon.SdNotify(false, daemon.SdNotifyReady); err != nil { - log.Printf("failed to notify ready to systemd: %v", err) + fs.Logf(nil, "failed to notify ready to systemd: %v", err) } var finaliseOnce sync.Once finalise := func() { finaliseOnce.Do(func() { if _, err := daemon.SdNotify(false, daemon.SdNotifyStopping); err != nil { - log.Printf("failed to notify stopping to systemd: %v", err) + fs.Logf(nil, "failed to notify stopping to systemd: %v", err) } }) } diff --git a/rclone.1 b/rclone.1 index 4b8022693..12a2f7604 100644 --- a/rclone.1 +++ b/rclone.1 @@ -1,7 +1,7 @@ .\"t .\" Automatically generated by Pandoc 2.9.2.1 .\" -.TH "rclone" "1" "Jun 14, 2024" "User Manual" "" +.TH "rclone" "1" "Sep 08, 2024" "User Manual" "" .hy .SH Rclone syncs your files to cloud storage .PP @@ -178,8 +178,12 @@ Enterprise File Fabric .IP \[bu] 2 Fastmail Files .IP \[bu] 2 +Files.com +.IP \[bu] 2 FTP .IP \[bu] 2 +Gofile +.IP \[bu] 2 Google Cloud Storage .IP \[bu] 2 Google Drive @@ -256,6 +260,8 @@ Petabox .IP \[bu] 2 PikPak .IP \[bu] 2 +Pixeldrain +.IP \[bu] 2 premiumize.me .IP \[bu] 2 put.io @@ -942,6 +948,14 @@ executable will be in your GOPATH bin folder go install github.com/rclone/rclone\[at]latest \f[R] .fi +.PP +In some situations, rclone executable size might be too big for +deployment in very restricted environments when all backends with large +SDKs are included. +To limit binary size unused backends can be commented out in +\f[C]backends/all/all.go\f[R] and unused commands in +\f[C]cmd/all/all.go\f[R] before building with \f[C]go build\f[R] or +\f[C]make\f[R] .SS Ansible installation .PP This can be done with Stefan Weichinger\[aq]s ansible @@ -1216,8 +1230,12 @@ Dropbox (https://rclone.org/dropbox/) .IP \[bu] 2 Enterprise File Fabric (https://rclone.org/filefabric/) .IP \[bu] 2 +Files.com (https://rclone.org/filescom/) +.IP \[bu] 2 FTP (https://rclone.org/ftp/) .IP \[bu] 2 +Gofile (https://rclone.org/gofile/) +.IP \[bu] 2 Google Cloud Storage (https://rclone.org/googlecloudstorage/) .IP \[bu] 2 Google Drive (https://rclone.org/drive/) @@ -1229,6 +1247,8 @@ remotes .IP \[bu] 2 HDFS (https://rclone.org/hdfs/) .IP \[bu] 2 +Hetzner Storage Box (https://rclone.org/sftp/#hetzner-storage-box) +.IP \[bu] 2 HiDrive (https://rclone.org/hidrive/) .IP \[bu] 2 HTTP (https://rclone.org/http/) @@ -1264,6 +1284,8 @@ Pcloud (https://rclone.org/pcloud/) .IP \[bu] 2 PikPak (https://rclone.org/pikpak/) .IP \[bu] 2 +Pixeldrain (https://rclone.org/pixeldrain/) +.IP \[bu] 2 premiumize.me (https://rclone.org/premiumizeme/) .IP \[bu] 2 put.io (https://rclone.org/putio/) @@ -1274,6 +1296,8 @@ QingStor (https://rclone.org/qingstor/) .IP \[bu] 2 Quatrix by Maytech (https://rclone.org/quatrix/) .IP \[bu] 2 +rsync.net (https://rclone.org/sftp/#rsync-net) +.IP \[bu] 2 Seafile (https://rclone.org/seafile/) .IP \[bu] 2 SFTP (https://rclone.org/sftp/) @@ -1380,7 +1404,7 @@ rclone config [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1401,6 +1425,10 @@ Dump the config file as JSON. rclone config edit (https://rclone.org/commands/rclone_config_edit/) - Enter an interactive configuration session. .IP \[bu] 2 +rclone config +encryption (https://rclone.org/commands/rclone_config_encryption/) - +set, remove and check the encryption for the config file +.IP \[bu] 2 rclone config file (https://rclone.org/commands/rclone_config_file/) - Show path of configuration file in use. .IP \[bu] 2 @@ -1544,15 +1572,19 @@ rclone copy source:path dest:path [flags] -h, --help help for copy \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1586,7 +1618,7 @@ Flags for anything which can Copy a file. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -1597,7 +1629,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -1627,7 +1659,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -1635,10 +1667,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1787,15 +1816,19 @@ rclone sync source:path dest:path [flags] -t, --timeformat string Specify a custom time format, or \[aq]max\[aq] for max precision supported by remote (default: 2006-01-02 15:04:05) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -1829,7 +1862,7 @@ Flags for anything which can Copy a file. .fi .SS Sync Options .PP -Flags just used for \f[C]rclone sync\f[R]. +Flags used for sync commands .IP .nf \f[C] @@ -1849,7 +1882,7 @@ Flags just used for \f[C]rclone sync\f[R]. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -1860,7 +1893,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -1890,7 +1923,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -1898,10 +1931,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -1963,15 +1993,19 @@ rclone move source:path dest:path [flags] -h, --help help for move \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -2005,7 +2039,7 @@ Flags for anything which can Copy a file. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -2016,7 +2050,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2046,7 +2080,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2054,10 +2088,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2119,9 +2150,13 @@ rclone delete remote:path [flags] --rmdirs rmdirs removes empty directories but leaves root intact \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -2132,7 +2167,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2162,7 +2197,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2170,10 +2205,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2206,9 +2238,13 @@ rclone purge remote:path [flags] -h, --help help for purge \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -2217,10 +2253,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2240,9 +2273,13 @@ rclone mkdir remote:path [flags] -h, --help help for mkdir \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -2251,10 +2288,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2285,9 +2319,13 @@ rclone rmdir remote:path [flags] -h, --help help for rmdir \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -2296,10 +2334,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2389,9 +2424,13 @@ rclone check source:path dest:path [flags] --one-way Check one way only, source files must exist on remote \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Check Options .PP -Flags used for \f[C]rclone check\f[R]. +Flags used for check commands .IP .nf \f[C] @@ -2400,7 +2439,7 @@ Flags used for \f[C]rclone check\f[R]. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2430,7 +2469,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2438,10 +2477,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2507,9 +2543,13 @@ rclone ls remote:path [flags] -h, --help help for ls \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2539,7 +2579,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2547,10 +2587,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2632,9 +2669,13 @@ rclone lsd remote:path [flags] -R, --recursive Recurse into the listing \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2664,7 +2705,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2672,10 +2713,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2741,9 +2779,13 @@ rclone lsl remote:path [flags] -h, --help help for lsl \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2773,7 +2815,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2781,10 +2823,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2827,9 +2866,13 @@ rclone md5sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2859,7 +2902,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2867,10 +2910,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2916,9 +2956,13 @@ rclone sha1sum remote:path [flags] --output-file string Output hashsums to a file rather than the terminal \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -2948,7 +2992,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -2956,10 +3000,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -2998,9 +3039,13 @@ rclone size remote:path [flags] --json Format output as JSON \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -3030,7 +3075,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -3038,10 +3083,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3115,7 +3157,7 @@ rclone version [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3140,9 +3182,13 @@ rclone cleanup remote:path [flags] -h, --help help for cleanup \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -3151,10 +3197,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3326,9 +3369,13 @@ rclone dedupe [mode] remote:path [flags] -h, --help help for dedupe \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -3337,10 +3384,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3349,8 +3393,7 @@ commands, flags and backends. Get quota information from the remote. .SS Synopsis .PP -\f[C]rclone about\f[R] prints quota information about a remote to -standard output. +Prints quota information about a remote to standard output. The output is typically used, free, quota and trash contents. .PP E.g. @@ -3436,7 +3479,7 @@ rclone about remote: [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3473,7 +3516,7 @@ rclone authorize [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3540,9 +3583,13 @@ rclone backend remote:path [opts] [flags] -o, --option stringArray Option in the form name=value or name \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -3551,10 +3598,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3620,15 +3664,19 @@ rclone bisync remote1:path1 remote2:path2 [flags] --workdir string Use custom working dir - useful for testing. (default: {WORKDIR}) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -3662,7 +3710,7 @@ Flags for anything which can Copy a file. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -3673,7 +3721,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -3701,10 +3749,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3713,7 +3758,7 @@ commands, flags and backends. Concatenates any files and sends them to stdout. .SS Synopsis .PP -rclone cat sends any files to standard output. +Sends any files to standard output. .PP You can use it like this to output a single file .IP @@ -3788,9 +3833,13 @@ rclone cat remote:path [flags] --tail int Only print the last N characters \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -3820,7 +3869,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -3828,10 +3877,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3913,9 +3959,13 @@ rclone checksum sumfile dst:path [flags] --one-way Check one way only, source files must exist on remote \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -3945,7 +3995,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -3953,10 +4003,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -3977,7 +4024,7 @@ Run with \f[C]--help\f[R] to list the supported shells. .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -4008,7 +4055,7 @@ By default, when run without any arguments, .IP .nf \f[C] -rclone genautocomplete bash +rclone completion bash \f[R] .fi .PP @@ -4058,7 +4105,7 @@ rclone completion bash [output_file] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone completion (https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -4074,7 +4121,7 @@ probably need to be run with sudo or as root, e.g. .IP .nf \f[C] -sudo rclone genautocomplete fish +sudo rclone completion fish \f[R] .fi .PP @@ -4107,7 +4154,7 @@ rclone completion fish [output_file] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone completion (https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -4147,7 +4194,7 @@ rclone completion powershell [output_file] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone completion (https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -4163,7 +4210,7 @@ will probably need to be run with sudo or as root, e.g. .IP .nf \f[C] -sudo rclone genautocomplete zsh +sudo rclone completion zsh \f[R] .fi .PP @@ -4196,7 +4243,7 @@ rclone completion zsh [output_file] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone completion (https://rclone.org/commands/rclone_completion/) - Output completion script for a given shell. @@ -4361,7 +4408,7 @@ rclone config create name type [key value]* [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4384,7 +4431,7 @@ rclone config delete name [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4414,7 +4461,7 @@ rclone config disconnect remote: [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4437,7 +4484,7 @@ rclone config dump [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4465,10 +4512,158 @@ rclone config edit [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. +.SH rclone config encryption +.PP +set, remove and check the encryption for the config file +.SS Synopsis +.PP +This command sets, clears and checks the encryption for the config file +using the subcommands below. +.SS Options +.IP +.nf +\f[C] + -h, --help help for encryption +\f[R] +.fi +.PP +See the global flags page (https://rclone.org/flags/) for global options +not listed here. +.SS See Also +.IP \[bu] 2 +rclone config (https://rclone.org/commands/rclone_config/) - Enter an +interactive configuration session. +.IP \[bu] 2 +rclone config encryption +check (https://rclone.org/commands/rclone_config_encryption_check/) - +Check that the config file is encrypted +.IP \[bu] 2 +rclone config encryption +remove (https://rclone.org/commands/rclone_config_encryption_remove/) - +Remove the config file encryption password +.IP \[bu] 2 +rclone config encryption +set (https://rclone.org/commands/rclone_config_encryption_set/) - Set or +change the config file encryption password +.SH rclone config encryption check +.PP +Check that the config file is encrypted +.SS Synopsis +.PP +This checks the config file is encrypted and that you can decrypt it. +.PP +It will attempt to decrypt the config using the password you supply. +.PP +If decryption fails it will return a non-zero exit code if using +\f[C]--password-command\f[R], otherwise it will prompt again for the +password. +.PP +If the config file is not encrypted it will return a non zero exit code. +.IP +.nf +\f[C] +rclone config encryption check [flags] +\f[R] +.fi +.SS Options +.IP +.nf +\f[C] + -h, --help help for check +\f[R] +.fi +.PP +See the global flags page (https://rclone.org/flags/) for global options +not listed here. +.SS See Also +.IP \[bu] 2 +rclone config +encryption (https://rclone.org/commands/rclone_config_encryption/) - +set, remove and check the encryption for the config file +.SH rclone config encryption remove +.PP +Remove the config file encryption password +.SS Synopsis +.PP +Remove the config file encryption password +.PP +This removes the config file encryption, returning it to un-encrypted. +.PP +If \f[C]--password-command\f[R] is in use, this will be called to supply +the old config password. +.PP +If the config was not encrypted then no error will be returned and this +command will do nothing. +.IP +.nf +\f[C] +rclone config encryption remove [flags] +\f[R] +.fi +.SS Options +.IP +.nf +\f[C] + -h, --help help for remove +\f[R] +.fi +.PP +See the global flags page (https://rclone.org/flags/) for global options +not listed here. +.SS See Also +.IP \[bu] 2 +rclone config +encryption (https://rclone.org/commands/rclone_config_encryption/) - +set, remove and check the encryption for the config file +.SH rclone config encryption set +.PP +Set or change the config file encryption password +.SS Synopsis +.PP +This command sets or changes the config file encryption password. +.PP +If there was no config password set then it sets a new one, otherwise it +changes the existing config password. +.PP +Note that if you are changing an encryption password using +\f[C]--password-command\f[R] then this will be called once to decrypt +the config using the old password and then again to read the new +password to re-encrypt the config. +.PP +When \f[C]--password-command\f[R] is called to change the password then +the environment variable \f[C]RCLONE_PASSWORD_CHANGE=1\f[R] will be set. +So if changing passwords programatically you can use the environment +variable to distinguish which password you must supply. +.PP +Alternatively you can remove the password first (with +\f[C]rclone config encryption remove\f[R]), then set it again with this +command which may be easier if you don\[aq]t mind the unecrypted config +file being on the disk briefly. +.IP +.nf +\f[C] +rclone config encryption set [flags] +\f[R] +.fi +.SS Options +.IP +.nf +\f[C] + -h, --help help for set +\f[R] +.fi +.PP +See the global flags page (https://rclone.org/flags/) for global options +not listed here. +.SS See Also +.IP \[bu] 2 +rclone config +encryption (https://rclone.org/commands/rclone_config_encryption/) - +set, remove and check the encryption for the config file .SH rclone config file .PP Show path of configuration file in use. @@ -4488,7 +4683,7 @@ rclone config file [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4529,7 +4724,7 @@ rclone config password name [key value]+ [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4552,7 +4747,7 @@ rclone config paths [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4575,7 +4770,7 @@ rclone config providers [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4605,7 +4800,7 @@ rclone config reconnect remote: [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4641,7 +4836,7 @@ rclone config redacted [] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4664,7 +4859,7 @@ rclone config show [] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4687,7 +4882,7 @@ rclone config touch [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4852,7 +5047,7 @@ rclone config update name [key value]+ [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4880,7 +5075,7 @@ rclone config userinfo remote: [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone config (https://rclone.org/commands/rclone_config/) - Enter an interactive configuration session. @@ -4939,15 +5134,19 @@ rclone copyto source:path dest:path [flags] -h, --help help for copyto \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -4981,7 +5180,7 @@ Flags for anything which can Copy a file. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -4992,7 +5191,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -5022,7 +5221,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -5030,10 +5229,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5095,9 +5291,13 @@ rclone copyurl https://example.com dest:path [flags] --stdout Write the output to stdout rather than a file \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -5106,10 +5306,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5118,8 +5315,7 @@ commands, flags and backends. Cryptcheck checks the integrity of an encrypted remote. .SS Synopsis .PP -rclone cryptcheck checks a remote against a -crypted (https://rclone.org/crypt/) remote. +Checks a remote against a crypted (https://rclone.org/crypt/) remote. This is the equivalent of running rclone check (https://rclone.org/commands/rclone_check/), but able to check the checksums of the encrypted remote. @@ -5208,9 +5404,13 @@ rclone cryptcheck remote:path cryptedremote:path [flags] --one-way Check one way only, source files must exist on remote \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Check Options .PP -Flags used for \f[C]rclone check\f[R]. +Flags used for check commands .IP .nf \f[C] @@ -5219,7 +5419,7 @@ Flags used for \f[C]rclone check\f[R]. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -5249,7 +5449,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -5257,10 +5457,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5269,8 +5466,8 @@ commands, flags and backends. Cryptdecode returns unencrypted file names. .SS Synopsis .PP -rclone cryptdecode returns unencrypted file names when provided with a -list of encrypted file names. +Returns unencrypted file names when provided with a list of encrypted +file names. List limit is 10 items. .PP If you supply the \f[C]--reverse\f[R] flag, it will return encrypted @@ -5307,7 +5504,7 @@ rclone cryptdecode encryptedremote: encryptedfilename [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5333,9 +5530,13 @@ rclone deletefile remote:path [flags] -h, --help help for deletefile \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -5344,196 +5545,10 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. -.SH rclone genautocomplete -.PP -Output completion script for a given shell. -.SH Synopsis -.PP -Generates a shell completion script for rclone. -Run with \f[C]--help\f[R] to list the supported shells. -.SH Options -.IP -.nf -\f[C] - -h, --help help for genautocomplete -\f[R] -.fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO -.IP \[bu] 2 -rclone (https://rclone.org/commands/rclone/) - Show help for rclone -commands, flags and backends. -.IP \[bu] 2 -rclone genautocomplete -bash (https://rclone.org/commands/rclone_genautocomplete_bash/) - Output -bash completion script for rclone. -.IP \[bu] 2 -rclone genautocomplete -fish (https://rclone.org/commands/rclone_genautocomplete_fish/) - Output -fish completion script for rclone. -.IP \[bu] 2 -rclone genautocomplete -zsh (https://rclone.org/commands/rclone_genautocomplete_zsh/) - Output -zsh completion script for rclone. -.SH rclone genautocomplete bash -.PP -Output bash completion script for rclone. -.SH Synopsis -.PP -Generates a bash shell autocompletion script for rclone. -.PP -This writes to /etc/bash_completion.d/rclone by default so will probably -need to be run with sudo or as root, e.g. -.IP -.nf -\f[C] -sudo rclone genautocomplete bash -\f[R] -.fi -.PP -Logout and login again to use the autocompletion scripts, or source them -directly -.IP -.nf -\f[C] -\&. /etc/bash_completion -\f[R] -.fi -.PP -If you supply a command line argument the script will be written there. -.PP -If output_file is \[dq]-\[dq], then the output will be written to -stdout. -.IP -.nf -\f[C] -rclone genautocomplete bash [output_file] [flags] -\f[R] -.fi -.SH Options -.IP -.nf -\f[C] - -h, --help help for bash -\f[R] -.fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO -.IP \[bu] 2 -rclone -genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) - -Output completion script for a given shell. -.SH rclone genautocomplete fish -.PP -Output fish completion script for rclone. -.SH Synopsis -.PP -Generates a fish autocompletion script for rclone. -.PP -This writes to /etc/fish/completions/rclone.fish by default so will -probably need to be run with sudo or as root, e.g. -.IP -.nf -\f[C] -sudo rclone genautocomplete fish -\f[R] -.fi -.PP -Logout and login again to use the autocompletion scripts, or source them -directly -.IP -.nf -\f[C] -\&. /etc/fish/completions/rclone.fish -\f[R] -.fi -.PP -If you supply a command line argument the script will be written there. -.PP -If output_file is \[dq]-\[dq], then the output will be written to -stdout. -.IP -.nf -\f[C] -rclone genautocomplete fish [output_file] [flags] -\f[R] -.fi -.SH Options -.IP -.nf -\f[C] - -h, --help help for fish -\f[R] -.fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO -.IP \[bu] 2 -rclone -genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) - -Output completion script for a given shell. -.SH rclone genautocomplete zsh -.PP -Output zsh completion script for rclone. -.SH Synopsis -.PP -Generates a zsh autocompletion script for rclone. -.PP -This writes to /usr/share/zsh/vendor-completions/_rclone by default so -will probably need to be run with sudo or as root, e.g. -.IP -.nf -\f[C] -sudo rclone genautocomplete zsh -\f[R] -.fi -.PP -Logout and login again to use the autocompletion scripts, or source them -directly -.IP -.nf -\f[C] -autoload -U compinit && compinit -\f[R] -.fi -.PP -If you supply a command line argument the script will be written there. -.PP -If output_file is \[dq]-\[dq], then the output will be written to -stdout. -.IP -.nf -\f[C] -rclone genautocomplete zsh [output_file] [flags] -\f[R] -.fi -.SH Options -.IP -.nf -\f[C] - -h, --help help for zsh -\f[R] -.fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO -.IP \[bu] 2 -rclone -genautocomplete (https://rclone.org/commands/rclone_genautocomplete/) - -Output completion script for a given shell. .SH rclone gendocs .PP Output markdown docs for rclone to the directory supplied. @@ -5559,7 +5574,7 @@ rclone gendocs output_directory [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5670,7 +5685,7 @@ rclone gitannex [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5738,9 +5753,13 @@ rclone hashsum [ remote:path] [flags] --output-file string Output hashsums to a file rather than the terminal \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -5770,7 +5789,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -5778,10 +5797,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5790,8 +5806,7 @@ commands, flags and backends. Generate public link to file/folder. .SS Synopsis .PP -rclone link will create, retrieve or remove a public link to the given -file or folder. +Create, retrieve or remove a public link to the given file or folder. .IP .nf \f[C] @@ -5834,7 +5849,7 @@ rclone link remote:path [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -5844,28 +5859,44 @@ List all the remotes in the config file and defined in environment variables. .SS Synopsis .PP -rclone listremotes lists all the available remotes from the config file. +Lists all the available remotes from the config file, or the remotes +matching an optional filter. .PP -When used with the \f[C]--long\f[R] flag it lists the types and the -descriptions too. +Prints the result in human-readable format by default, and as a simple +list of remote names, or if used with flag \f[C]--long\f[R] a tabular +format including the remote names, types and descriptions. +Using flag \f[C]--json\f[R] produces machine-readable output instead, +which always includes all attributes - including the source (file or +environment). +.PP +Result can be filtered by a filter argument which applies to all +attributes, and/or filter flags specific for each attribute. +The values must be specified according to regular rclone filtering +pattern syntax. .IP .nf \f[C] -rclone listremotes [flags] +rclone listremotes [] [flags] \f[R] .fi .SS Options .IP .nf \f[C] - -h, --help help for listremotes - --long Show the type and the description as well as names + --description string Filter remotes by description + -h, --help help for listremotes + --json Format output as JSON + --long Show type and description in addition to name + --name string Filter remotes by name + --order-by string Instructions on how to order the result, e.g. \[aq]type,name=descending\[aq] + --source string Filter remotes by source, e.g. \[aq]file\[aq] or \[aq]environment\[aq] + --type string Filter remotes by type \f[R] .fi .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -6076,9 +6107,13 @@ rclone lsf remote:path [flags] -t, --time-format string Specify a custom time format, or \[aq]max\[aq] for max precision supported by remote (default: 2006-01-02 15:04:05) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -6108,7 +6143,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -6116,10 +6151,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -6130,7 +6162,7 @@ List directories and objects in the path in JSON format. .PP List directories and objects in the path in JSON format. .PP -The output is an array of Items, where each Item looks like this +The output is an array of Items, where each Item looks like this: .IP .nf \f[C] @@ -6156,41 +6188,58 @@ The output is an array of Items, where each Item looks like this \f[R] .fi .PP +The exact set of properties included depends on the backend: +.IP \[bu] 2 +The property IsBucket will only be included for bucket-based remotes, +and only for directories that are buckets. +It will always be omitted when value is not true. +.IP \[bu] 2 +Properties Encrypted and EncryptedPath will only be included for +encrypted remotes, and (as mentioned below) only if the +\f[C]--encrypted\f[R] option is set. +.PP +Different options may also affect which properties are included: +.IP \[bu] 2 If \f[C]--hash\f[R] is not specified, the Hashes property will be omitted. The types of hash can be specified with the \f[C]--hash-type\f[R] parameter (which may be repeated). If \f[C]--hash-type\f[R] is set then it implies \f[C]--hash\f[R]. -.PP +.IP \[bu] 2 If \f[C]--no-modtime\f[R] is specified then ModTime will be blank. This can speed things up on remotes where reading the ModTime takes an extra request (e.g. s3, swift). -.PP +.IP \[bu] 2 If \f[C]--no-mimetype\f[R] is specified then MimeType will be blank. This can speed things up on remotes where reading the MimeType takes an extra request (e.g. s3, swift). +.IP \[bu] 2 +If \f[C]--encrypted\f[R] is not specified the Encrypted and +EncryptedPath properties will be omitted - even for encrypted remotes. +.IP \[bu] 2 +If \f[C]--metadata\f[R] is set then an additional Metadata property will +be returned. +This will have metadata (https://rclone.org/docs/#metadata) in rclone +standard format as a JSON object. .PP -If \f[C]--encrypted\f[R] is not specified the Encrypted will be omitted. +The default is to list directories and files/objects, but this can be +changed with the following options: +.IP \[bu] 2 +If \f[C]--dirs-only\f[R] is specified then directories will be returned +only, no files/objects. +.IP \[bu] 2 +If \f[C]--files-only\f[R] is specified then files will be returned only, +no directories. .PP -If \f[C]--dirs-only\f[R] is not specified files in addition to -directories are returned -.PP -If \f[C]--files-only\f[R] is not specified directories in addition to -the files will be returned. -.PP -If \f[C]--metadata\f[R] is set then an additional Metadata key will be -returned. -This will have metadata in rclone standard format as a JSON object. -.PP -if \f[C]--stat\f[R] is set then a single JSON blob will be returned -about the item pointed to. -This will return an error if the item isn\[aq]t found. -However on bucket based backends (like s3, gcs, b2, azureblob etc) if -the item isn\[aq]t found it will return an empty directory as it -isn\[aq]t possible to tell empty directories from missing directories -there. +If \f[C]--stat\f[R] is set then the the output is not an array of items, +but instead a single JSON blob will be returned about the item pointed +to. +This will return an error if the item isn\[aq]t found, however on bucket +based backends (like s3, gcs, b2, azureblob etc) if the item isn\[aq]t +found it will return an empty directory, as it isn\[aq]t possible to +tell empty directories from missing directories there. .PP The Path field will only show folders below the remote path being listed. @@ -6200,10 +6249,6 @@ not \[dq]remote:path/subfolder/file.txt\[dq]. When used without \f[C]--recursive\f[R] the Path will always be the same as Name. .PP -If the directory is a bucket in a bucket-based backend, then -\[dq]IsBucket\[dq] will be set to true. -This key won\[aq]t be present unless it is \[dq]true\[dq]. -.PP The time is in RFC3339 format with up to nanosecond precision. The number of decimal digits in the seconds will depend on the precision that the remote can hold the times, so if times are accurate to the @@ -6214,7 +6259,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits will be shown (\[dq]2017-05-31T16:15:57+01:00\[dq]). .PP The whole output can be processed as a JSON blob, or alternatively it -can be processed line by line as each item is written one to a line. +can be processed line by line as each item is written on individual +lines (except with \f[C]--stat\f[R]). .PP Any of the filtering options can be applied to this command. .PP @@ -6268,9 +6314,13 @@ rclone lsjson remote:path [flags] --stat Just return the info for the pointed to file \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -6300,7 +6350,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -6308,10 +6358,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7188,9 +7235,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -7208,6 +7260,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -7369,9 +7446,9 @@ rclone mount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for mount @@ -7387,7 +7464,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -7400,6 +7477,7 @@ rclone mount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -7409,9 +7487,13 @@ rclone mount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -7439,10 +7521,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7504,15 +7583,19 @@ rclone moveto source:path dest:path [flags] -h, --help help for moveto \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Copy Options .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -7546,7 +7629,7 @@ Flags for anything which can Copy a file. .fi .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -7557,7 +7640,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -7587,7 +7670,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -7595,10 +7678,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -7668,6 +7748,7 @@ e means this is an empty directory, i.e. contains no files (but This an homage to the ncdu tool (https://dev.yorhel.nl/ncdu) but for rclone remotes. It is missing lots of features at the moment but is useful as it stands. +Unlike ncdu it does not show excluded files. .PP Note that it might take some time to delete big files/directories. The UI won\[aq]t respond in the meantime since the deletion is done @@ -7690,9 +7771,13 @@ rclone ncdu remote:path [flags] -h, --help help for ncdu \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -7722,7 +7807,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -7730,10 +7815,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -8611,9 +8693,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -8631,6 +8718,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -8793,16 +8905,18 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfsmount --max-read-ahead SizeSuffix The number of bytes that can be prefetched for sequential reads (not supported on Windows) (default 128Ki) --mount-case-insensitive Tristate Tell the OS the mount is case insensitive (true) or sensitive (false) regardless of the backend (auto) (default unset) --network-mode Mount as remote network drive, instead of fixed disk drive (supported on Windows only) + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don\[aq]t compare checksums on up/download --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files @@ -8811,9 +8925,9 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] -o, --option stringArray Option for libfuse/WinFsp (repeat if required) --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access - --sudo Use sudo to run the mount command as root. + --sudo Use sudo to run the mount/umount commands as root. --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -8826,6 +8940,7 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -8835,9 +8950,13 @@ rclone nfsmount remote:path /path/to/mountpoint [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -8865,10 +8984,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -8921,7 +9037,7 @@ rclone obscure password [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -8944,6 +9060,18 @@ Note that \f[C]--rc-addr\f[R], \f[C]--rc-user\f[R], \f[C]--rc-pass\f[R] will be read also for \f[C]--url\f[R], \f[C]--user\f[R], \f[C]--pass\f[R]. .PP +The \f[C]--unix-socket\f[R] flag can be used to connect over a unix +socket like this +.IP +.nf +\f[C] +# start server on /tmp/my.socket +rclone rcd --rc-addr unix:///tmp/my.socket +# Connect to it +rclone rc --unix-socket /tmp/my.socket core/stats +\f[R] +.fi +.PP Arguments should be passed in as parameter=value. .PP The result will be returned as a JSON object by default. @@ -9015,21 +9143,22 @@ rclone rc commands parameter [flags] .IP .nf \f[C] - -a, --arg stringArray Argument placed in the \[dq]arg\[dq] array - -h, --help help for rc - --json string Input JSON - use instead of key=value args - --loopback If set connect to this rclone instance not via HTTP - --no-output If set, don\[aq]t output the JSON result - -o, --opt stringArray Option in the form name=value or name placed in the \[dq]opt\[dq] array - --pass string Password to use to connect to rclone remote control - --url string URL to connect to rclone remote control (default \[dq]http://localhost:5572/\[dq]) - --user string Username to use to rclone remote control + -a, --arg stringArray Argument placed in the \[dq]arg\[dq] array + -h, --help help for rc + --json string Input JSON - use instead of key=value args + --loopback If set connect to this rclone instance not via HTTP + --no-output If set, don\[aq]t output the JSON result + -o, --opt stringArray Option in the form name=value or name placed in the \[dq]opt\[dq] array + --pass string Password to use to connect to rclone remote control + --unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly + --url string URL to connect to rclone remote control (default \[dq]http://localhost:5572/\[dq]) + --user string Username to use to rclone remote control \f[R] .fi .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9038,8 +9167,7 @@ commands, flags and backends. Copies standard input to file on remote. .SS Synopsis .PP -rclone rcat reads from standard input (stdin) and copies it to a single -remote file. +Reads from standard input (stdin) and copies it to a single remote file. .IP .nf \f[C] @@ -9092,9 +9220,13 @@ rclone rcat remote:path [flags] --size int File size hint to preallocate (default -1) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -9103,10 +9235,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9144,6 +9273,8 @@ be done with file system permissions. .PP \f[C]--rc-addr\f[R] may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. .PP \f[C]--rc-server-read-timeout\f[R] and \f[C]--rc-server-write-timeout\f[R] can be used to control the timeouts @@ -9181,7 +9312,29 @@ authority certificate. acceptable. Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). -.SS Template +.SS Socket activation +.PP +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --rc-addr\[ga]). +.PP +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command +.IP +.nf +\f[C] + systemd-socket-activate -l 8000 -- rclone serve +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 8000 +over TCP. +### Template .PP \f[C]--rc-template\f[R] allows a user to specify a custom markup template for HTTP and WebDAV serve functions. @@ -9363,19 +9516,23 @@ rclone rcd * [flags] -h, --help help for rcd \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS RC Options .PP -Flags to control the Remote Control API. +Flags to control the Remote Control API .IP .nf \f[C] --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [\[dq]localhost:5572\[dq]]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -9400,10 +9557,7 @@ Flags to control the Remote Control API. --rc-web-gui-update Check and update to latest version of web gui \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9445,9 +9599,13 @@ rclone rmdirs remote:path [flags] --leave-root Do not remove root directory if empty \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -9456,10 +9614,7 @@ Important flags useful for most commands. -v, --verbose count Print lots more stuff (repeat for more) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9554,7 +9709,7 @@ rclone selfupdate [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9589,7 +9744,7 @@ rclone serve [opts] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -9637,6 +9792,11 @@ based on media formats or file extensions. Additionally, there is no media transcoding support. This means that some players might show files that they are not able to play back correctly. +.PP +Rclone will add external subtitle files (.srt) to videos if they have +the same filename as the video file itself (except the extension), +either in the same directory as the video, or in a \[dq]Subs\[dq] +subdirectory. .SS Server options .PP Use \f[C]--addr\f[R] to specify which IP address and port the server @@ -9925,9 +10085,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -9945,6 +10110,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -10097,8 +10287,8 @@ rclone serve dlna remote:path [flags] --addr string The ip:port or :port to bind the DLNA http server to (default \[dq]:7879\[dq]) --announce-interval Duration The interval between SSDP announcements (default 12m0s) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for dlna --interface stringArray The interface to use for SSDP (repeat as necessary) @@ -10110,7 +10300,7 @@ rclone serve dlna remote:path [flags] --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -10123,6 +10313,7 @@ rclone serve dlna remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -10130,9 +10321,13 @@ rclone serve dlna remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -10160,10 +10355,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -10497,9 +10689,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -10517,6 +10714,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -10679,9 +10901,9 @@ rclone serve docker [flags] --default-permissions Makes kernel enforce access control based on the file mode (not supported on Windows) --devname string Set the device name - default is remote:path --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --direct-io Use Direct IO, disables caching of data - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --forget-state Skip restoring previous state --fuse-flag stringArray Flags or arguments to be passed direct to libfuse/WinFsp (repeat if required) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) @@ -10701,7 +10923,7 @@ rclone serve docker [flags] --socket-addr string Address or absolute path (default: /run/docker/plugins/rclone.sock) --socket-gid int GID for unix socket (default: current process GID) (default 1000) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -10714,6 +10936,7 @@ rclone serve docker [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -10723,9 +10946,13 @@ rclone serve docker [flags] --write-back-cache Makes kernel buffer writes before sending them to rclone (without this, writethrough caching is used) (not supported on Windows) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -10753,10 +10980,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -11060,9 +11284,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -11080,6 +11309,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -11326,8 +11580,8 @@ rclone serve ftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --cert string TLS PEM key (concatenation of certificate and CA certificate) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for ftp --key string TLS PEM Private key @@ -11340,7 +11594,7 @@ rclone serve ftp remote:path [flags] --public-ip string Public IP address to advertise for passive connections --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication (default \[dq]anonymous\[dq]) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -11354,6 +11608,7 @@ rclone serve ftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -11361,9 +11616,13 @@ rclone serve ftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -11391,10 +11650,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -11434,6 +11690,8 @@ be done with file system permissions. .PP \f[C]--addr\f[R] may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. .PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. @@ -11468,7 +11726,29 @@ authority certificate. \f[C]--min-tls-version\f[R] is minimum TLS version that is acceptable. Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). -.SS Template +.SS Socket activation +.PP +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr\[ga]). +.PP +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command +.IP +.nf +\f[C] + systemd-socket-activate -l 8000 -- rclone serve +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 8000 +over TCP. +### Template .PP \f[C]--template\f[R] allows a user to specify a custom markup template for HTTP and WebDAV serve functions. @@ -11912,9 +12192,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -11932,6 +12217,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -12174,15 +12484,15 @@ rclone serve http remote:path [flags] .IP .nf \f[C] - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for http --htpasswd string A htpasswd file - if not provided no authentication is done @@ -12201,7 +12511,7 @@ rclone serve http remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -12215,6 +12525,7 @@ rclone serve http remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -12222,9 +12533,13 @@ rclone serve http remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -12252,10 +12567,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -12266,31 +12578,61 @@ Serve the remote as an NFS mount .PP Create an NFS server that serves the given remote over the network. .PP -The primary purpose for this command is to enable mount +This implements an NFSv3 server to serve any rclone remote via NFS. +.PP +The primary purpose for this command is to enable the mount command (https://rclone.org/commands/rclone_mount/) on recent macOS versions where installing FUSE is very cumbersome. .PP -Since this is running on NFSv3, no authentication method is available. -Any client will be able to access the data. -To limit access, you can use serve NFS on loopback address and rely on -secure tunnels (such as SSH). -For this reason, by default, a random TCP port is chosen and loopback -interface is used for the listening address; meaning that it is only -available to the local machine. +This server does not implement any authentication so any client will be +able to access the data. +To limit access, you can use \f[C]serve nfs\f[R] on the loopback address +or rely on secure tunnels (such as SSH) or use firewalling. +.PP +For this reason, by default, a random TCP port is chosen and the +loopback interface is used for the listening address by default; meaning +that it is only available to the local machine. If you want other machines to access the NFS mount over local network, -you need to specify the listening address and port using +you need to specify the listening address and port using the \f[C]--addr\f[R] flag. .PP -Modifying files through NFS protocol requires VFS caching. +Modifying files through the NFS protocol requires VFS caching. Usually you will need to specify \f[C]--vfs-cache-mode\f[R] in order to -be able to write to the mountpoint (full is recommended). +be able to write to the mountpoint (\f[C]full\f[R] is recommended). If you don\[aq]t specify VFS cache mode, the mount will be read-only. -Note also that \f[C]--nfs-cache-handle-limit\f[R] controls the maximum -number of cached file handles stored by the caching handler. +.PP +\f[C]--nfs-cache-type\f[R] controls the type of the NFS handle cache. +By default this is \f[C]memory\f[R] where new handles will be randomly +allocated when needed. +These are stored in memory. +If the server is restarted the handle cache will be lost and connected +NFS clients will get stale handle errors. +.PP +\f[C]--nfs-cache-type disk\f[R] uses an on disk NFS handle cache. +Rclone hashes the path of the object and stores it in a file named after +the hash. +These hashes are stored on disk the directory controlled by +\f[C]--cache-dir\f[R] or the exact directory may be specified with +\f[C]--nfs-cache-dir\f[R]. +Using this means that the NFS server can be restarted at will without +affecting the connected clients. +.PP +\f[C]--nfs-cache-type symlink\f[R] is similar to +\f[C]--nfs-cache-type disk\f[R] in that it uses an on disk cache, but +the cache entries are held as symlinks. +Rclone will use the handle of the underlying file as the NFS handle +which improves performance. +This sort of cache can\[aq]t be backed up and restored as the underlying +handles will change. +This is Linux only. +.PP +\f[C]--nfs-cache-handle-limit\f[R] controls the maximum number of cached +NFS handles stored by the caching handler. This should not be set too low or you may experience errors when trying to access files. The default is \f[C]1000000\f[R], but consider lowering this limit if the server\[aq]s system resource usage causes problems. +This is only used by the \f[C]memory\f[R] type cache. .PP To serve NFS over the network use following command: .IP @@ -12300,20 +12642,20 @@ rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full \f[R] .fi .PP -We specify a specific port that we can use in the mount command: -.PP +This specifies a port that can be used in the mount command. To mount the server under Linux/macOS, use the following command: .IP .nf \f[C] -mount -oport=$PORT,mountport=$PORT $HOSTNAME: path/to/mountpoint +mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint \f[R] .fi .PP -Where \f[C]$PORT\f[R] is the same port number we used in the serve nfs -command. +Where \f[C]$PORT\f[R] is the same port number used in the +\f[C]serve nfs\f[R] command and \f[C]$HOSTNAME\f[R] is the network +address of the machine that \f[C]serve nfs\f[R] was run on. .PP -This feature is only available on Unix platforms. +This command is only available on Unix platforms. .SS VFS - Virtual File System .PP This command uses the VFS layer. @@ -12590,9 +12932,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -12610,6 +12957,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -12761,18 +13133,20 @@ rclone serve nfs remote:path [flags] \f[C] --addr string IPaddress:Port or :Port to bind server to --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for nfs + --nfs-cache-dir string The directory the NFS handle cache will use if set --nfs-cache-handle-limit int max file handles cached simultaneously (min 5) (default 1000000) + --nfs-cache-type memory|disk|symlink Type of NFS handle cache to use (default memory) --no-checksum Don\[aq]t compare checksums on up/download --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -12785,6 +13159,7 @@ rclone serve nfs remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -12792,9 +13167,13 @@ rclone serve nfs remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -12822,10 +13201,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -12947,6 +13323,8 @@ be done with file system permissions. .PP \f[C]--addr\f[R] may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. .PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. @@ -12981,7 +13359,29 @@ authority certificate. \f[C]--min-tls-version\f[R] is minimum TLS version that is acceptable. Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). -.SS Authentication +.SS Socket activation +.PP +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr\[ga]). +.PP +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command +.IP +.nf +\f[C] + systemd-socket-activate -l 8000 -- rclone serve +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 8000 +over TCP. +### Authentication .PP By default this will serve files without needing a login. .PP @@ -13025,7 +13425,7 @@ rclone serve restic remote:path [flags] .IP .nf \f[C] - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --append-only Disallow deletion of repository data --baseurl string Prefix for URLs - leave blank for root @@ -13050,7 +13450,7 @@ rclone serve restic remote:path [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -13227,6 +13627,40 @@ Object .RE .PP Other operations will return error \f[C]Unimplemented\f[R]. +.SS Authentication +.PP +By default this will serve files without needing a login. +.PP +You can either use an htpasswd file which can take lots of users, or set +a single username and password with the \f[C]--user\f[R] and +\f[C]--pass\f[R] flags. +.PP +If no static users are configured by either of the above methods, and +client certificates are required by the \f[C]--client-ca\f[R] flag +passed to the server, the client certificate common name will be +considered as the username. +.PP +Use \f[C]--htpasswd /path/to/htpasswd\f[R] to provide an htpasswd file. +This is in standard apache format and supports MD5, SHA1 and BCrypt for +basic authentication. +Bcrypt is recommended. +.PP +To create an htpasswd file: +.IP +.nf +\f[C] +touch htpasswd +htpasswd -B htpasswd user +htpasswd -B htpasswd anotherUser +\f[R] +.fi +.PP +The password file can be updated while rclone is running. +.PP +Use \f[C]--realm\f[R] to set the authentication realm. +.PP +Use \f[C]--salt\f[R] to change the password hashing salt from the +default. .SS Server options .PP Use \f[C]--addr\f[R] to specify which IP address and port the server @@ -13246,6 +13680,8 @@ be done with file system permissions. .PP \f[C]--addr\f[R] may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. .PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. @@ -13280,7 +13716,29 @@ authority certificate. \f[C]--min-tls-version\f[R] is minimum TLS version that is acceptable. Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). -.SS VFS - Virtual File System +.SS Socket activation +.PP +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr\[ga]). +.PP +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command +.IP +.nf +\f[C] + systemd-socket-activate -l 8000 -- rclone serve +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 8000 +over TCP. +## VFS - Virtual File System .PP This command uses the VFS layer. This adapts the cloud storage objects that rclone uses into something @@ -13556,9 +14014,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -13576,6 +14039,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -13725,19 +14213,21 @@ rclone serve s3 remote:path [flags] .IP .nf \f[C] - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-key stringArray Set key pair for v4 authorization: access_key_id,secret_access_key + --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --etag-hash string Which hash to use for the ETag, or auto or blank for off (default \[dq]MD5\[dq]) - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --force-path-style If true use path style access if false use virtual hosted style (default true) (default true) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for s3 + --htpasswd string A htpasswd file - if not provided no authentication is done --key string TLS PEM Private key --max-header-bytes int Maximum size of request header (default 4096) --min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) @@ -13745,12 +14235,16 @@ rclone serve s3 remote:path [flags] --no-cleanup Not to cleanup empty folder after object is deleted --no-modtime Don\[aq]t read/write the modification time (can speed things up) --no-seek Don\[aq]t allow seeking in files + --pass string Password for authentication --poll-interval Duration Time to wait between polling for changes, must be smaller than dir-cache-time and only on supported remotes (set 0 to disable) (default 1m0s) --read-only Only allow read-only access + --realm string Realm for authentication + --salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) --server-read-timeout Duration Timeout for server reading data (default 1h0m0s) --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) + --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) --vfs-cache-max-size SizeSuffix Max total size of objects in the cache (default off) @@ -13763,6 +14257,7 @@ rclone serve s3 remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -13770,9 +14265,13 @@ rclone serve s3 remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -13800,10 +14299,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -13848,6 +14344,24 @@ in the \[dq]serve-sftp\[dq] directory. By default the server binds to localhost:2022 - if you want it to be reachable externally then supply \f[C]--addr :2022\f[R] for example. .PP +This also supports being run with socket activation, in which case it +will listen on the first passed FD. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command: +.IP +.nf +\f[C] +systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/ +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 2222 +over TCP. +.PP Note that the default of \f[C]--vfs-cache-mode off\f[R] is fine for the rclone sftp backend, but it may not be with other SFTP clients. .PP @@ -14149,9 +14663,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -14169,6 +14688,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -14415,8 +14959,8 @@ rclone serve sftp remote:path [flags] --auth-proxy string A program to use to create the backend from the auth --authorized-keys string Authorized keys file (default \[dq]\[ti]/.ssh/authorized_keys\[dq]) --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) - --file-perms FileMode File permissions (default 0666) + --dir-perms FileMode Directory permissions (default 777) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for sftp --key stringArray SSH private host key file (Can be multi-valued, leave blank to auto generate) @@ -14429,7 +14973,7 @@ rclone serve sftp remote:path [flags] --read-only Only allow read-only access --stdio Run an sftp server on stdin/stdout --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -14443,6 +14987,7 @@ rclone serve sftp remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -14450,9 +14995,13 @@ rclone serve sftp remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -14480,10 +15029,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -14537,6 +15083,26 @@ SSL connections only 2 - Basic authentication enabled for SSL and for non-SSL connections .PP https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint +.SS Serving over a unix socket +.PP +You can serve the webdav on a unix socket like this: +.IP +.nf +\f[C] +rclone serve webdav --addr unix:///tmp/my.socket remote:path +\f[R] +.fi +.PP +and connect to it like this using rclone and the webdav backend: +.IP +.nf +\f[C] +rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav: +\f[R] +.fi +.PP +Note that there is no authentication on http protocol - this is expected +to be done by the permissions on the socket. .SS Server options .PP Use \f[C]--addr\f[R] to specify which IP address and port the server @@ -14556,6 +15122,8 @@ be done with file system permissions. .PP \f[C]--addr\f[R] may be repeated to listen on multiple IPs/ports/sockets. +Socket activation, described further below, can also be used to +accomplish the same. .PP \f[C]--server-read-timeout\f[R] and \f[C]--server-write-timeout\f[R] can be used to control the timeouts on the server. @@ -14590,7 +15158,29 @@ authority certificate. \f[C]--min-tls-version\f[R] is minimum TLS version that is acceptable. Valid values are \[dq]tls1.0\[dq], \[dq]tls1.1\[dq], \[dq]tls1.2\[dq] and \[dq]tls1.3\[dq] (default \[dq]tls1.0\[dq]). -.SS Template +.SS Socket activation +.PP +Instead of the listening addresses specified above, rclone will listen +to all FDs passed by the service manager, if any (and ignore any +arguments passed by --addr\[ga]). +.PP +This allows rclone to be a socket-activated service. +It can be configured with .socket and .service unit files as described +in +https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html +.PP +Socket activation can be tested ad-hoc with the +\f[C]systemd-socket-activate\f[R]command +.IP +.nf +\f[C] + systemd-socket-activate -l 8000 -- rclone serve +\f[R] +.fi +.PP +This will socket-activate rclone on the first connection to port 8000 +over TCP. +### Template .PP \f[C]--template\f[R] allows a user to specify a custom markup template for HTTP and WebDAV serve functions. @@ -15034,9 +15624,14 @@ These flags control the chunking: \f[C] --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128M) --vfs-read-chunk-size-limit SizeSuffix Max chunk doubling size (default off) +--vfs-read-chunk-streams int The number of parallel streams to read at once \f[R] .fi .PP +The chunking behaves differently depending on the +\f[C]--vfs-read-chunk-streams\f[R] parameter. +.SS \f[C]--vfs-read-chunk-streams\f[R] == 0 +.PP Rclone will start reading a chunk of size \f[C]--vfs-read-chunk-size\f[R], and then double the size for each read. When \f[C]--vfs-read-chunk-size-limit\f[R] is specified, and greater @@ -15054,6 +15649,31 @@ on. .PP Setting \f[C]--vfs-read-chunk-size\f[R] to \f[C]0\f[R] or \[dq]off\[dq] disables chunked reading. +.PP +The chunks will not be buffered in memory. +.SS \f[C]--vfs-read-chunk-streams\f[R] > 0 +.PP +Rclone reads \f[C]--vfs-read-chunk-streams\f[R] chunks of size +\f[C]--vfs-read-chunk-size\f[R] concurrently. +The size for each read will stay constant. +.PP +This improves performance performance massively on high latency links or +very high bandwidth links to high performance object stores. +.PP +Some experimentation will be needed to find the optimum values of +\f[C]--vfs-read-chunk-size\f[R] and \f[C]--vfs-read-chunk-streams\f[R] +as these will depend on the backend in use and the latency to the +backend. +.PP +For high performance object stores (eg AWS S3) a reasonable place to +start might be \f[C]--vfs-read-chunk-streams 16\f[R] and +\f[C]--vfs-read-chunk-size 4M\f[R]. +In testing with AWS S3 the performance scaled roughly as the +\f[C]--vfs-read-chunk-streams\f[R] setting. +.PP +Similar settings should work for high latency links, but depending on +the latency they may need more \f[C]--vfs-read-chunk-streams\f[R] in +order to get the throughput. .SS VFS Performance .PP These flags may be used to enable/disable features of the VFS for @@ -15296,17 +15916,17 @@ rclone serve webdav remote:path [flags] .IP .nf \f[C] - --addr stringArray IPaddress:Port or :Port to bind server to (default [127.0.0.1:8080]) + --addr stringArray IPaddress:Port, :Port or [unix://]/path/to/socket to bind server to (default [127.0.0.1:8080]) --allow-origin string Origin which cross-domain request (CORS) can be executed from --auth-proxy string A program to use to create the backend from the auth --baseurl string Prefix for URLs - leave blank for root --cert string TLS PEM key (concatenation of certificate and CA certificate) --client-ca string Client certificate authority to verify clients with --dir-cache-time Duration Time to cache directory entries for (default 5m0s) - --dir-perms FileMode Directory permissions (default 0777) + --dir-perms FileMode Directory permissions (default 777) --disable-dir-list Disable HTML directory list on GET request for a directory --etag-hash string Which hash to use for the ETag, or auto or blank for off - --file-perms FileMode File permissions (default 0666) + --file-perms FileMode File permissions (default 666) --gid uint32 Override the gid field set by the filesystem (not supported on Windows) (default 1000) -h, --help help for webdav --htpasswd string A htpasswd file - if not provided no authentication is done @@ -15325,7 +15945,7 @@ rclone serve webdav remote:path [flags] --server-write-timeout Duration Timeout for server writing data (default 1h0m0s) --template string User-specified template --uid uint32 Override the uid field set by the filesystem (not supported on Windows) (default 1000) - --umask int Override the permission bits set by the filesystem (not supported on Windows) (default 2) + --umask FileMode Override the permission bits set by the filesystem (not supported on Windows) (default 002) --user string User name for authentication --vfs-block-norm-dupes If duplicate filenames exist in the same directory (after normalization), log an error and hide the duplicates (may have a performance cost) --vfs-cache-max-age Duration Max time since last access of objects in the cache (default 1h0m0s) @@ -15339,6 +15959,7 @@ rclone serve webdav remote:path [flags] --vfs-read-ahead SizeSuffix Extra read ahead over --buffer-size when using cache-mode full --vfs-read-chunk-size SizeSuffix Read the source objects in chunks (default 128Mi) --vfs-read-chunk-size-limit SizeSuffix If greater than --vfs-read-chunk-size, double the chunk size after each chunk read, until the limit is reached (\[aq]off\[aq] is unlimited) (default off) + --vfs-read-chunk-streams int The number of parallel streams to read at once --vfs-read-wait Duration Time to wait for in-sequence read before seeking (default 20ms) --vfs-refresh Refreshes the directory cache recursively in the background on start --vfs-used-is-size rclone size Use the rclone size algorithm for Used size @@ -15346,9 +15967,13 @@ rclone serve webdav remote:path [flags] --vfs-write-wait Duration Time to wait for in-sequence write before giving error (default 1s) \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -15376,10 +16001,7 @@ Flags for filtering directory listings. --min-size SizeSuffix Only transfer files bigger than this in KiB or suffix B|K|M|G|T|P (default off) \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone serve (https://rclone.org/commands/rclone_serve/) - Serve a remote over a protocol. @@ -15388,7 +16010,7 @@ remote over a protocol. Changes storage class/tier of objects in remote. .SS Synopsis .PP -rclone settier changes storage tier or class at remote if supported. +Changes storage tier or class at remote if supported. Few cloud storage services provides different storage classes on objects, for example AWS S3 and Glacier, Azure Blob storage - Hot, Cool and Archive, Google Cloud Storage, Regional Storage, Nearline, Coldline @@ -15440,7 +16062,7 @@ rclone settier tier remote:path [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -15473,7 +16095,7 @@ things so reading their documentation first is recommended. .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -15518,7 +16140,7 @@ rclone test changenotify remote: [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15548,7 +16170,7 @@ rclone test histogram [remote:path] [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15557,8 +16179,8 @@ command Discovers file name or other limitations for paths. .SS Synopsis .PP -rclone info discovers what filenames and upload methods are possible to -write to the paths passed in and how long they can be. +Discovers what filenames and upload methods are possible to write to the +paths passed in and how long they can be. It can take some time. It will write test files into the remote:path passed in. It outputs a bit of go code for each one. @@ -15590,7 +16212,7 @@ rclone test info [remote:path]+ [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15619,7 +16241,7 @@ rclone test makefile []+ [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15655,7 +16277,7 @@ rclone test makefiles [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15678,7 +16300,7 @@ rclone test memory remote:path [flags] .PP See the global flags page (https://rclone.org/flags/) for global options not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone test (https://rclone.org/commands/rclone_test/) - Run a test command @@ -15730,9 +16352,13 @@ rclone touch remote:path [flags] -t, --timestamp string Use specified time instead of the current time of day \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Important Options .PP -Important flags useful for most commands. +Important flags useful for most commands .IP .nf \f[C] @@ -15743,7 +16369,7 @@ Important flags useful for most commands. .fi .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -15773,7 +16399,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -15781,10 +16407,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -15793,8 +16416,8 @@ commands, flags and backends. List the contents of the remote in a tree like fashion. .SS Synopsis .PP -rclone tree lists the contents of a remote in a similar way to the unix -tree command. +Lists the contents of a remote in a similar way to the unix tree +command. .PP For example .IP @@ -15856,9 +16479,13 @@ rclone tree remote:path [flags] --version Sort files alphanumerically by version \f[R] .fi +.PP +Options shared with other commands are described next. +See the global flags page (https://rclone.org/flags/) for global options +not listed here. .SS Filter Options .PP -Flags for filtering directory listings. +Flags for filtering directory listings .IP .nf \f[C] @@ -15888,7 +16515,7 @@ Flags for filtering directory listings. .fi .SS Listing Options .PP -Flags for listing directories. +Flags for listing directories .IP .nf \f[C] @@ -15896,10 +16523,7 @@ Flags for listing directories. --fast-list Use recursive list if available; uses more memory but fewer transactions \f[R] .fi -.PP -See the global flags page (https://rclone.org/flags/) for global options -not listed here. -.SH SEE ALSO +.SS See Also .IP \[bu] 2 rclone (https://rclone.org/commands/rclone/) - Show help for rclone commands, flags and backends. @@ -16730,13 +17354,22 @@ It is optional. .IP \[bu] 2 \f[C]HH:MM\f[R] is an hour from 00:00 to 23:59. .PP +Entries can be separated by spaces or semicolons. +.PP +\f[B]Note:\f[R] Semicolons can be used as separators instead of spaces +to avoid parsing issues in environments like Docker. +.PP An example of a typical timetable to avoid link saturation during daytime working hours could be: .PP +Using spaces as separators: \f[C]--bwlimit \[dq]08:00,512k 12:00,10M 13:00,512k 18:00,30M 23:00,off\[dq]\f[R] .PP -In this example, the transfer bandwidth will be set to 512 KiB/s at 8am -every day. +Using semicolons as separators: +\f[C]--bwlimit \[dq]08:00,512k;12:00,10M;13:00,512k;18:00,30M;23:00,off\[dq]\f[R] +.PP +In these examples, the transfer bandwidth will be set to 512 KiB/s at +8am every day. At noon, it will rise to 10 MiB/s, and drop back to 512 KiB/sec at 1pm. At 6pm, the bandwidth limit will be set to 30 MiB/s, and at 11pm it will be completely disabled (full speed). @@ -16744,8 +17377,12 @@ Anything between 11pm and 8am will remain unlimited. .PP An example of timetable with \f[C]WEEKDAY\f[R] could be: .PP +Using spaces as separators: \f[C]--bwlimit \[dq]Mon-00:00,512 Fri-23:59,10M Sat-10:00,1M Sun-20:00,off\[dq]\f[R] .PP +Using semicolons as separators: +\f[C]--bwlimit \[dq]Mon-00:00,512;Fri-23:59,10M;Sat-10:00,1M;Sun-20:00,off\[dq]\f[R] +.PP It means that, the transfer bandwidth will be set to 512 KiB/s on Monday. It will rise to 10 MiB/s before the end of Friday. @@ -17449,11 +18086,14 @@ local ftp .IP \[bu] 2 sftp +.IP \[bu] 2 +pcloud .PP Without \f[C]--inplace\f[R] (the default) rclone will first upload to a temporary file with an extension like this, where \f[C]XXXXXX\f[R] -represents a random string and \f[C].partial\f[R] is --partial-suffix -value (\f[C].partial\f[R] by default). +represents a hash of the source file\[aq]s fingerprint and +\f[C].partial\f[R] is --partial-suffix value (\f[C].partial\f[R] by +default). .IP .nf \f[C] @@ -18104,6 +18744,7 @@ This flag supplies a program which should supply the config password when run. This is an alternative to rclone prompting for the password or setting the \f[C]RCLONE_CONFIG_PASS\f[R] variable. +It is also used when setting the config password for the first time. .PP The argument to this should be a command with a space separated list of arguments. @@ -18123,6 +18764,11 @@ Eg \f[R] .fi .PP +Note that when changing the configuration password the environment +variable \f[C]RCLONE_PASSWORD_CHANGE=1\f[R] will be set. +This can be used to distinguish initial decryption of the config file +from the new password. +.PP See the Configuration Encryption for more info. .PP See a Windows PowerShell example on the @@ -18774,6 +19420,20 @@ encryption from your configuration. .PP There is no way to recover the configuration if you lose your password. .PP +You can also use +.IP \[bu] 2 +rclone config encryption +set (https://rclone.org/commands/rclone_config_encryption_set/) to set +the config encryption directly +.IP \[bu] 2 +rclone config encryption +remove (https://rclone.org/commands/rclone_config_encryption_remove/) to +remove it +.IP \[bu] 2 +rclone config encryption +check (https://rclone.org/commands/rclone_config_encryption_check/) to +check that it is encrypted properly. +.PP rclone uses nacl secretbox (https://godoc.org/golang.org/x/crypto/nacl/secretbox) which in turn uses XSalsa20 and Poly1305 to encrypt and authenticate your @@ -18855,11 +19515,59 @@ by overriding the location, e.g. with one of the documented special values for memory-only configuration. Since only backend options can be stored in configuration files, this is normally unnecessary for commands that do not operate on backends, e.g. -\f[C]genautocomplete\f[R]. +\f[C]completion\f[R]. However, it will be relevant for commands that do operate on backends in general, but are used without referencing a stored remote, e.g. listing local filesystem paths, or connection strings: \f[C]rclone --config=\[dq]\[dq] ls .\f[R] +.SS Configuration Encryption Cheatsheet +.PP +You can quickly apply a configuration encryption without plain-text at +rest or transfer. +Detailed instructions for popular OSes: +.SS Mac +.IP \[bu] 2 +Generate and store a password +.PP +\f[C]security add-generic-password -a rclone -s config -w $(openssl rand -base64 40)\f[R] +.IP \[bu] 2 +Add the retrieval instruction to your .zprofile / .profile +.PP +\f[C]export RCLONE_PASSWORD_COMMAND=\[dq]/usr/bin/security find-generic-password -a rclone -s config -w\[dq]\f[R] +.SS Linux +.IP \[bu] 2 +Prerequisite +.PP +Linux doesn\[aq]t come with a default password manager. +Let\[aq]s install the \[dq]pass\[dq] utility using a package manager, +e.g. +\f[C]apt install pass\f[R], \f[C]yum install pass\f[R], +etc. (https://www.passwordstore.org/#download); then initialize a +password store: +.PP +\f[C]pass init rclone\f[R] +.IP \[bu] 2 +Generate and store a password +.PP +\f[C]echo $(openssl rand -base64 40) | pass insert -m rclone/config\f[R] +.IP \[bu] 2 +Add the retrieval instruction +.PP +\f[C]export RCLONE_PASSWORD_COMMAND=\[dq]/usr/bin/pass rclone/config\[dq]\f[R] +.SS Windows +.IP \[bu] 2 +Generate and store a password +.PP +\f[C]New-Object -TypeName PSCredential -ArgumentList \[dq]rclone\[dq], (ConvertTo-SecureString -String ([System.Web.Security.Membership]::GeneratePassword(40, 10)) -AsPlainText -Force) | Export-Clixml -Path \[dq]rclone-credential.xml\[dq]\f[R] +.IP \[bu] 2 +Add the password retrieval instruction +.PP +\f[C][Environment]::SetEnvironmentVariable(\[dq]RCLONE_PASSWORD_COMMAND\[dq], \[dq][System.Runtime.InteropServices.Marshal]::PtrToStringAuto([System.Runtime.InteropServices.Marshal]::SecureStringToBSTR((Import-Clixml -Path \[dq]rclone-credential.xml\[dq]).Password))\[dq])\f[R] +.SS Encrypt the config file (all systems) +.IP \[bu] 2 +Execute \f[C]rclone config\f[R] -> \f[C]s\f[R] +.IP \[bu] 2 +Add/update the password from previous steps .SS Developer options .PP These options are useful when developing or debugging rclone. @@ -19031,6 +19739,25 @@ and the \f[C]--syslog-facility\f[R] control which facility it uses. Rclone prefixes all log messages with their level in capitals, e.g. INFO which makes it easy to grep the log file for different kinds of information. +.SS Metrics +.PP +Rclone can publish metrics in the OpenMetrics/Prometheus format. +.PP +To enable the metrics endpoint, use the \f[C]--metrics-addr\f[R] flag. +Metrics can also be published on the \f[C]--rc-addr\f[R] port if the +\f[C]--rc\f[R] flag and \f[C]--rc-enable-metrics\f[R] flags are supplied +or if using rclone rcd \f[C]--rc-enable-metrics\f[R] +.PP +Rclone provides extensive configuration options for the metrics HTTP +endpoint. +These settings are grouped under the Metrics section and have a prefix +\f[C]--metrics-*\f[R]. +.PP +When metrics are enabled with \f[C]--rc-enable-metrics\f[R], they will +be published on the same port as the rc API. +In this case, the \f[C]--metrics-*\f[R] flags will be ignored, and the +HTTP endpoint configuration will be managed by the \f[C]--rc-*\f[R] +parameters. .SS Exit Code .PP If any errors occur during the command execution, rclone will exit with @@ -19259,32 +19986,28 @@ There are two ways of doing it, described below. .SS Configuring using rclone authorize .PP On the headless box run \f[C]rclone\f[R] config but answer \f[C]N\f[R] -to the \f[C]Use web browser to automatically authenticate?\f[R] -question. +to the \f[C]Use auto config?\f[R] question. .IP .nf \f[C] -\&... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> n + +Option config_token. For this to work, you will need rclone available on a machine that has a web browser available. - For more help and alternate methods see: https://rclone.org/remote_setup/ - Execute the following on the machine with the web browser (same rclone version recommended): - - rclone authorize \[dq]dropbox\[dq] - -Then paste the result below: -result> + rclone authorize \[dq]onedrive\[dq] +Then paste the result. +Enter a value. +config_token> \f[R] .fi .PP @@ -19292,7 +20015,7 @@ Then on your main desktop machine .IP .nf \f[C] -rclone authorize \[dq]dropbox\[dq] +rclone authorize \[dq]onedrive\[dq] If your browser doesn\[aq]t open automatically go to the following link: http://127.0.0.1:53682/auth Log in and authorize rclone for access Waiting for code... @@ -19307,7 +20030,7 @@ Then back to the headless box, paste in the code .IP .nf \f[C] -result> SECRET_TOKEN +config_token> SECRET_TOKEN -------------------- [acd12] client_id = @@ -19360,18 +20083,15 @@ ssh -L localhost:53682:localhost:53682 username\[at]remote_server \f[R] .fi .PP -Then on the headless box run \f[C]rclone\f[R] config and answer -\f[C]Y\f[R] to the -\f[C]Use web browser to automatically authenticate?\f[R] question. +Then on the headless box run \f[C]rclone config\f[R] and answer +\f[C]Y\f[R] to the \f[C]Use auto config?\f[R] question. .IP .nf \f[C] -\&... -Remote config -Use web browser to automatically authenticate rclone with remote? - * Say Y if the machine running rclone has a web browser you can use - * Say N if running rclone on a (remote) machine without web browser access -If not sure try Y. If Y failed, try N. +Use auto config? + * Say Y if not sure + * Say N if you are working on a remote or headless machine + y) Yes (default) n) No y/n> y @@ -20415,8 +21135,9 @@ The fix then is to quote values containing spaces. .SS \f[C]--min-size\f[R] - Don\[aq]t transfer any file smaller than this .PP Controls the minimum size file within the scope of an rclone command. -Default units are \f[C]KiB\f[R] but abbreviations \f[C]K\f[R], -\f[C]M\f[R], \f[C]G\f[R], \f[C]T\f[R] or \f[C]P\f[R] are valid. +Default units are \f[C]KiB\f[R] but abbreviations \f[C]B\f[R], +\f[C]K\f[R], \f[C]M\f[R], \f[C]G\f[R], \f[C]T\f[R] or \f[C]P\f[R] are +valid. .PP E.g. \f[C]rclone ls remote: --min-size 50k\f[R] lists files on @@ -20427,8 +21148,9 @@ info. .SS \f[C]--max-size\f[R] - Don\[aq]t transfer any file larger than this .PP Controls the maximum size file within the scope of an rclone command. -Default units are \f[C]KiB\f[R] but abbreviations \f[C]K\f[R], -\f[C]M\f[R], \f[C]G\f[R], \f[C]T\f[R] or \f[C]P\f[R] are valid. +Default units are \f[C]KiB\f[R] but abbreviations \f[C]B\f[R], +\f[C]K\f[R], \f[C]M\f[R], \f[C]G\f[R], \f[C]T\f[R] or \f[C]P\f[R] are +valid. .PP E.g. \f[C]rclone ls remote: --max-size 1G\f[R] lists files on @@ -20794,6 +21516,9 @@ Default Off. .SS --rc-enable-metrics .PP Enable OpenMetrics/Prometheus compatible endpoint at \f[C]/metrics\f[R]. +If more control over the metrics is desired (for example running it on a +different port or with different auth) then endpoint can be enabled with +the \f[C]--metrics-*\f[R] flags instead. .PP Default Off. .SS --rc-web-gui @@ -21146,6 +21871,226 @@ be set as a string, eg \[dq]HARD\[dq] for \f[C]CutoffMode\f[R] or .IP \[bu] 2 \f[C]BandwidthSpec\f[R] - this will be set and returned as a string, eg \[dq]1M\[dq]. +.SS Option blocks +.PP +The calls options/info (for the main config) and config/providers (for +the backend config) may be used to get information on the rclone +configuration options. +This can be used to build user interfaces for displaying and setting any +rclone option. +.PP +These consist of arrays of \f[C]Option\f[R] blocks. +These have the following format. +Each block describes a single option. +.PP +.TS +tab(@); +lw(13.6n) lw(11.7n) lw(19.4n) lw(25.3n). +T{ +Field +T}@T{ +Type +T}@T{ +Optional +T}@T{ +Description +T} +_ +T{ +Name +T}@T{ +string +T}@T{ +N +T}@T{ +name of the option in snake_case +T} +T{ +FieldName +T}@T{ +string +T}@T{ +N +T}@T{ +name of the field used in the rc - if blank use Name +T} +T{ +Help +T}@T{ +string +T}@T{ +N +T}@T{ +help, started with a single sentence on a single line +T} +T{ +Groups +T}@T{ +string +T}@T{ +Y +T}@T{ +groups this option belongs to - comma separated string for options +classification +T} +T{ +Provider +T}@T{ +string +T}@T{ +Y +T}@T{ +set to filter on provider +T} +T{ +Default +T}@T{ +any +T}@T{ +N +T}@T{ +default value, if set (and not to nil or \[dq]\[dq]) then Required does +nothing +T} +T{ +Value +T}@T{ +any +T}@T{ +N +T}@T{ +value to be set by flags +T} +T{ +Examples +T}@T{ +Examples +T}@T{ +Y +T}@T{ +predefined values that can be selected from list (multiple-choice +option) +T} +T{ +ShortOpt +T}@T{ +string +T}@T{ +Y +T}@T{ +the short command line option for this +T} +T{ +Hide +T}@T{ +Visibility +T}@T{ +N +T}@T{ +if non zero, this option is hidden from the configurator or the command +line +T} +T{ +Required +T}@T{ +bool +T}@T{ +N +T}@T{ +this option is required, meaning value cannot be empty unless there is a +default +T} +T{ +IsPassword +T}@T{ +bool +T}@T{ +N +T}@T{ +set if the option is a password +T} +T{ +NoPrefix +T}@T{ +bool +T}@T{ +N +T}@T{ +set if the option for this should not use the backend prefix +T} +T{ +Advanced +T}@T{ +bool +T}@T{ +N +T}@T{ +set if this is an advanced config option +T} +T{ +Exclusive +T}@T{ +bool +T}@T{ +N +T}@T{ +set if the answer can only be one of the examples (empty string allowed +unless Required or Default is set) +T} +T{ +Sensitive +T}@T{ +bool +T}@T{ +N +T}@T{ +set if this option should be redacted when using +\f[C]rclone config redacted\f[R] +T} +.TE +.PP +An example of this might be the \f[C]--log-level\f[R] flag. +Note that the \f[C]Name\f[R] of the option becomes the command line flag +with \f[C]_\f[R] replaced with \f[C]-\f[R]. +.IP +.nf +\f[C] +{ + \[dq]Advanced\[dq]: false, + \[dq]Default\[dq]: 5, + \[dq]DefaultStr\[dq]: \[dq]NOTICE\[dq], + \[dq]Examples\[dq]: [ + { + \[dq]Help\[dq]: \[dq]\[dq], + \[dq]Value\[dq]: \[dq]EMERGENCY\[dq] + }, + { + \[dq]Help\[dq]: \[dq]\[dq], + \[dq]Value\[dq]: \[dq]ALERT\[dq] + }, + ... + ], + \[dq]Exclusive\[dq]: true, + \[dq]FieldName\[dq]: \[dq]LogLevel\[dq], + \[dq]Groups\[dq]: \[dq]Logging\[dq], + \[dq]Help\[dq]: \[dq]Log level DEBUG|INFO|NOTICE|ERROR\[dq], + \[dq]Hide\[dq]: 0, + \[dq]IsPassword\[dq]: false, + \[dq]Name\[dq]: \[dq]log_level\[dq], + \[dq]NoPrefix\[dq]: true, + \[dq]Required\[dq]: true, + \[dq]Sensitive\[dq]: false, + \[dq]Type\[dq]: \[dq]LogLevel\[dq], + \[dq]Value\[dq]: null, + \[dq]ValueStr\[dq]: \[dq]NOTICE\[dq] +}, +\f[R] +.fi +.PP +Note that the \f[C]Help\f[R] may be multiple lines separated by +\f[C]\[rs]n\f[R]. +The first line will always be a short sentence and this is the sentence +shown when running \f[C]rclone help flags\f[R]. .SS Specifying remotes to work on .PP Remotes are specified with the \f[C]fs=\f[R], \f[C]srcFs=\f[R], @@ -21180,7 +22125,7 @@ For example this JSON is equivalent to \f[C]remote:/tmp\f[R] \f[C] { \[dq]_name\[dq]: \[dq]remote\[dq], - \[dq]_path\[dq]: \[dq]/tmp\[dq] + \[dq]_root\[dq]: \[dq]/tmp\[dq] } \f[R] .fi @@ -21193,7 +22138,7 @@ And this is equivalent to { \[dq]type\[dq]: \[dq]sftp\[dq], \[dq]host\[dq]: \[dq]example.com\[dq], - \[dq]_path\[dq]: \[dq]/tmp\[dq] + \[dq]_root\[dq]: \[dq]/tmp\[dq] } \f[R] .fi @@ -21204,7 +22149,7 @@ And this is equivalent to \f[C]/tmp/dir\f[R] \f[C] { type = \[dq]local\[dq], - _ path = \[dq]/tmp/dir\[dq] + _root = \[dq]/tmp/dir\[dq] } \f[R] .fi @@ -21438,6 +22383,10 @@ See the config providers (https://rclone.org/commands/rclone_config_providers/) command for more information on the above. .PP +Note that the Options blocks are in the same format as returned by +\[dq]options/info\[dq]. +They are described in the option blocks section. +.PP \f[B]Authentication is required for this call.\f[R] .SS config/setpath: Set the path of the config file .PP @@ -22678,6 +23627,14 @@ Returns: - options - a list of the options block names Returns an object where keys are option block names and values are an object with the current option values in. .PP +Parameters: +.IP \[bu] 2 +blocks: optional string of comma separated blocks to include +.RS 2 +.IP \[bu] 2 +all are included if this is missing or \[dq]\[dq] +.RE +.PP Note that these are the global options which are unaffected by use of the _config and _filter parameters. If you wish to read the parameters set in _config then use @@ -22685,6 +23642,22 @@ options/config and for _filter use options/filter. .PP This shows the internal names of the option within rclone which should map to the external options very easily with a few exceptions. +.SS options/info: Get info about all the global options +.PP +Returns an object where keys are option block names and values are an +array of objects with info about each options. +.PP +Parameters: +.IP \[bu] 2 +blocks: optional string of comma separated blocks to include +.RS 2 +.IP \[bu] 2 +all are included if this is missing or \[dq]\[dq] +.RE +.PP +These objects are in the same format as returned by +\[dq]config/providers\[dq]. +They are described in the option blocks section. .SS options/local: Get the currently active config for this call .PP Returns an object with the keys \[dq]config\[dq] and \[dq]filter\[dq]. @@ -23041,6 +24014,82 @@ If this parameter is not supplied and if there is only one VFS in use then that VFS will be used. If there is more than one VFS in use then the \[dq]fs\[dq] parameter must be supplied. +.SS vfs/queue: Queue info for a VFS. +.PP +This returns info about the upload queue for the selected VFS. +.PP +This is only useful if \f[C]--vfs-cache-mode\f[R] > off. +If you call it when the \f[C]--vfs-cache-mode\f[R] is off, it will +return an empty result. +.IP +.nf +\f[C] +{ + \[dq]queued\[dq]: // an array of files queued for upload + [ + { + \[dq]name\[dq]: \[dq]file\[dq], // string: name (full path) of the file, + \[dq]id\[dq]: 123, // integer: id of this item in the queue, + \[dq]size\[dq]: 79, // integer: size of the file in bytes + \[dq]expiry\[dq]: 1.5 // float: time until file is eligible for transfer, lowest goes first + \[dq]tries\[dq]: 1, // integer: number of times we have tried to upload + \[dq]delay\[dq]: 5.0, // float: seconds between upload attempts + \[dq]uploading\[dq]: false, // boolean: true if item is being uploaded + }, + ], +} +\f[R] +.fi +.PP +The \f[C]expiry\f[R] time is the time until the file is elegible for +being uploaded in floating point seconds. +This may go negative. +As rclone only transfers \f[C]--transfers\f[R] files at once, only the +lowest \f[C]--transfers\f[R] expiry times will have \f[C]uploading\f[R] +as \f[C]true\f[R]. +So there may be files with negative expiry times for which +\f[C]uploading\f[R] is \f[C]false\f[R]. +.PP +This command takes an \[dq]fs\[dq] parameter. +If this parameter is not supplied and if there is only one VFS in use +then that VFS will be used. +If there is more than one VFS in use then the \[dq]fs\[dq] parameter +must be supplied. +.SS vfs/queue-set-expiry: Set the expiry time for an item queued for upload. +.PP +Use this to adjust the \f[C]expiry\f[R] time for an item in the upload +queue. +You will need to read the \f[C]id\f[R] of the item using +\f[C]vfs/queue\f[R] before using this call. +.PP +You can then set \f[C]expiry\f[R] to a floating point number of seconds +from now when the item is eligible for upload. +If you want the item to be uploaded as soon as possible then set it to a +large negative number (eg -1000000000). +If you want the upload of the item to be delayed for a long time then +set it to a large positive number. +.PP +Setting the \f[C]expiry\f[R] of an item which has already has started +uploading will have no effect - the item will carry on being uploaded. +.PP +This will return an error if called with \f[C]--vfs-cache-mode\f[R] off +or if the \f[C]id\f[R] passed is not found. +.PP +This takes the following parameters +.IP \[bu] 2 +\f[C]fs\f[R] - select the VFS in use (optional) +.IP \[bu] 2 +\f[C]id\f[R] - a numeric ID as returned from \f[C]vfs/queue\f[R] +.IP \[bu] 2 +\f[C]expiry\f[R] - a new expiry time as floating point seconds +.PP +This returns an empty result on success, or an error. +.PP +This command takes an \[dq]fs\[dq] parameter. +If this parameter is not supplied and if there is only one VFS in use +then that VFS will be used. +If there is more than one VFS in use then the \[dq]fs\[dq] parameter +must be supplied. .SS vfs/refresh: Refresh the directory cache. .PP This reads the directories for the specified paths and freshens the @@ -23531,6 +24580,21 @@ T}@T{ - T} T{ +Files.com +T}@T{ +MD5, CRC32 +T}@T{ +DR/W +T}@T{ +Yes +T}@T{ +No +T}@T{ +R +T}@T{ +- +T} +T{ FTP T}@T{ - @@ -23546,6 +24610,21 @@ T}@T{ - T} T{ +Gofile +T}@T{ +MD5 +T}@T{ +DR/W +T}@T{ +No +T}@T{ +Yes +T}@T{ +R +T}@T{ +- +T} +T{ Google Cloud Storage T}@T{ MD5 @@ -23861,6 +24940,21 @@ T}@T{ - T} T{ +Pixeldrain +T}@T{ +SHA256 +T}@T{ +R/W +T}@T{ +No +T}@T{ +No +T}@T{ +R +T}@T{ +RW +T} +T{ premiumize.me T}@T{ - @@ -24696,8 +25790,8 @@ translated to regular (halfwidth) \f[C]*\f[R], \f[C]?\f[R] and .PP The \f[C]--backend-encoding\f[R] flags allow you to change that. You can disable the encoding completely with -\f[C]--backend-encoding None\f[R] or set \f[C]encoding = None\f[R] in -the config file. +\f[C]--backend-encoding Raw\f[R] or set \f[C]encoding = Raw\f[R] in the +config file. .PP Encoding takes a comma separated list of encodings. You can see the list of all possible values by passing an invalid value @@ -24788,6 +25882,13 @@ T}@T{ \f[C]\[uFF02]\f[R] T} T{ +Exclamation +T}@T{ +\f[C]!\f[R] +T}@T{ +\f[C]\[uFF01]\f[R] +T} +T{ Hash T}@T{ \f[C]#\f[R] @@ -24839,10 +25940,11 @@ T}@T{ \f[C]\[uFF1C]\f[R], \f[C]\[uFF1E]\f[R] T} T{ -None +None \[S1] T}@T{ -No characters are encoded +NUL 0x00 T}@T{ +\[u2400] T} T{ Percent @@ -24916,6 +26018,11 @@ T}@T{ \f[C]\[uFF3B]\f[R], \f[C]\[uFF3D]\f[R] T} .TE +.PP +\[S1] Encoding from NUL 0x00 to \[u2400] is always implicit except when +using Raw. +It was previously incorrectly documented as disabling encoding, and to +maintain backward compatibility, its behavior has not been changed. .SS Encoding example: FTP .PP To take a specific example, the FTP backend\[aq]s default encoding is @@ -24980,7 +26087,7 @@ the default value but without \f[C]Colon,Question,Asterisk\f[R]: .fi .PP Alternatively, you can disable the conversion of any characters with -\f[C]--local-encoding None\f[R]. +\f[C]--local-encoding Raw\f[R]. .PP Instead of using command-line argument \f[C]--local-encoding\f[R], you may also set it as environment @@ -25302,6 +26409,31 @@ T}@T{ Yes T} T{ +Files.com +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +No +T}@T{ +No +T}@T{ +Yes +T}@T{ +No +T}@T{ +Yes +T}@T{ +No +T}@T{ +Yes +T} +T{ FTP T}@T{ No @@ -25327,6 +26459,31 @@ T}@T{ Yes T} T{ +Gofile +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +No +T}@T{ +No +T}@T{ +Yes +T}@T{ +No +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T} +T{ Google Cloud Storage T}@T{ Yes @@ -25852,6 +27009,31 @@ T}@T{ Yes T} T{ +Pixeldrain +T}@T{ +Yes +T}@T{ +No +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +No +T}@T{ +No +T}@T{ +Yes +T}@T{ +No +T}@T{ +Yes +T}@T{ +Yes +T}@T{ +Yes +T} +T{ premiumize.me T}@T{ Yes @@ -26382,13 +27564,13 @@ This describes the global flags available to every rclone command split into groups. .SS Copy .PP -Flags for anything which can Copy a file. +Flags for anything which can copy a file. .IP .nf \f[C] --check-first Do all the checks before starting transfers - -c, --checksum Check for changes with size & checksum (if available, or fallback to size only). - --compare-dest stringArray Include additional comma separated server-side paths during comparison + -c, --checksum Check for changes with size & checksum (if available, or fallback to size only) + --compare-dest stringArray Include additional server-side paths during comparison --copy-dest stringArray Implies --compare-dest but also copies files from paths into destination --cutoff-mode HARD|SOFT|CAUTIOUS Mode to stop transfers when reaching the max transfer limit HARD|SOFT|CAUTIOUS (default HARD) --ignore-case-sync Ignore case when synchronizing @@ -26422,7 +27604,7 @@ Flags for anything which can Copy a file. .fi .SS Sync .PP -Flags just used for \f[C]rclone sync\f[R]. +Flags used for sync commands. .IP .nf \f[C] @@ -26453,7 +27635,7 @@ Important flags useful for most commands. .fi .SS Check .PP -Flags used for \f[C]rclone check\f[R]. +Flags used for check commands. .IP .nf \f[C] @@ -26462,7 +27644,7 @@ Flags used for \f[C]rclone check\f[R]. .fi .SS Networking .PP -General networking and HTTP stuff. +Flags for general networking and HTTP stuff. .IP .nf \f[C] @@ -26473,7 +27655,7 @@ General networking and HTTP stuff. --client-cert string Client SSL certificate (PEM) for mutual TLS auth --client-key string Client SSL private key (PEM) for mutual TLS auth --contimeout Duration Connect timeout (default 1m0s) - --disable-http-keep-alives Disable HTTP keep-alives and use each connection once. + --disable-http-keep-alives Disable HTTP keep-alives and use each connection once --disable-http2 Disable HTTP/2 in the global transport --dscp string Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21 --expect-continue-timeout Duration Timeout when using expect / 100-continue in HTTP (default 1s) @@ -26486,7 +27668,7 @@ General networking and HTTP stuff. --tpslimit float Limit HTTP transactions per second to this --tpslimit-burst int Max burst of transactions for --tpslimit (default 1) --use-cookies Enable session cookiejar - --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.67.0\[dq]) + --user-agent string Set the user-agent to a specified string (default \[dq]rclone/v1.68.0\[dq]) \f[R] .fi .SS Performance @@ -26502,7 +27684,7 @@ Flags helpful for increasing performance. .fi .SS Config .PP -General configuration of rclone. +Flags for general configuration of rclone. .IP .nf \f[C] @@ -26586,7 +27768,7 @@ Flags for listing directories. .fi .SS Logging .PP -Logging and statistics. +Flags for logging and statistics. .IP .nf \f[C] @@ -26606,7 +27788,7 @@ Logging and statistics. --stats-one-line-date-format string Enable --stats-one-line-date and use custom formatted date: Enclose date string in double quotes (\[dq]), see https://golang.org/pkg/time/#Time.Format --stats-unit string Show data rate in stats as either \[aq]bits\[aq] or \[aq]bytes\[aq] per second (default \[dq]bytes\[dq]) --syslog Use Syslog for logging - --syslog-facility string Facility for syslog, e.g. KERN,USER,... (default \[dq]DAEMON\[dq]) + --syslog-facility string Facility for syslog, e.g. KERN,USER (default \[dq]DAEMON\[dq]) --use-json-log Use json log format -v, --verbose count Print lots more stuff (repeat for more) \f[R] @@ -26635,12 +27817,12 @@ Flags to control the Remote Control API. .nf \f[C] --rc Enable the remote control server - --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [localhost:5572]) + --rc-addr stringArray IPaddress:Port or :Port to bind server to (default [\[dq]localhost:5572\[dq]]) --rc-allow-origin string Origin which cross-domain request (CORS) can be executed from --rc-baseurl string Prefix for URLs - leave blank for root --rc-cert string TLS PEM key (concatenation of certificate and CA certificate) --rc-client-ca string Client certificate authority to verify clients with - --rc-enable-metrics Enable prometheus metrics on /metrics + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server --rc-files string Path to local files to serve on the HTTP server --rc-htpasswd string A htpasswd file - if not provided no authentication is done --rc-job-expire-duration Duration Expire finished async jobs older than this value (default 1m0s) @@ -26665,10 +27847,34 @@ Flags to control the Remote Control API. --rc-web-gui-update Check and update to latest version of web gui \f[R] .fi +.SS Metrics +.PP +Flags to control the Metrics HTTP endpoint.. +.IP +.nf +\f[C] + --metrics-addr stringArray IPaddress:Port or :Port to bind metrics server to (default [\[dq]\[dq]]) + --metrics-allow-origin string Origin which cross-domain request (CORS) can be executed from + --metrics-baseurl string Prefix for URLs - leave blank for root + --metrics-cert string TLS PEM key (concatenation of certificate and CA certificate) + --metrics-client-ca string Client certificate authority to verify clients with + --metrics-htpasswd string A htpasswd file - if not provided no authentication is done + --metrics-key string TLS PEM Private key + --metrics-max-header-bytes int Maximum size of request header (default 4096) + --metrics-min-tls-version string Minimum TLS version that is acceptable (default \[dq]tls1.0\[dq]) + --metrics-pass string Password for authentication + --metrics-realm string Realm for authentication + --metrics-salt string Password hashing salt (default \[dq]dlPL2MqE\[dq]) + --metrics-server-read-timeout Duration Timeout for server reading data (default 1h0m0s) + --metrics-server-write-timeout Duration Timeout for server writing data (default 1h0m0s) + --metrics-template string User-specified template + --metrics-user string User name for authentication + --rc-enable-metrics Enable the Prometheus metrics path at the remote control server +\f[R] +.fi .SS Backend .PP -Backend only flags. -These can be set in the config file also. +Backend-only flags (these can be set in the config file also). .IP .nf \f[C] @@ -26893,6 +28099,12 @@ These can be set in the config file also. --filefabric-token-expiry string Token expiry time --filefabric-url string URL of the Enterprise File Fabric to connect to --filefabric-version string Version read from the file fabric + --filescom-api-key string The API key used to authenticate with Files.com + --filescom-description string Description of the remote + --filescom-encoding Encoding The encoding for the backend (default Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot) + --filescom-password string The password used to authenticate with Files.com (obscured) + --filescom-site string Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) + --filescom-username string The username used to authenticate with Files.com --ftp-ask-password Allow asking for FTP password when needed --ftp-close-timeout Duration Maximum time to wait for a response to close (default 1m0s) --ftp-concurrency int Maximum number of FTP simultaneous connections, 0 for unlimited @@ -26936,6 +28148,12 @@ These can be set in the config file also. --gcs-token string OAuth Access Token as a JSON blob --gcs-token-url string Token server url --gcs-user-project string User project + --gofile-access-token string API Access token + --gofile-account-id string Account ID + --gofile-description string Description of the remote + --gofile-encoding Encoding The encoding for the backend (default Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation) + --gofile-list-chunk int Number of items to list in each call (default 1000) + --gofile-root-folder-id string ID of the root folder --gphotos-auth-url string Auth server URL --gphotos-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s) --gphotos-batch-mode string Upload file batching sync|async|off (default \[dq]sync\[dq]) @@ -27027,6 +28245,7 @@ These can be set in the config file also. --local-description string Description of the remote --local-encoding Encoding The encoding for the backend (default Slash,Dot) --local-no-check-updated Don\[aq]t check to see if the files change during upload + --local-no-clone Disable reflink cloning for server-side copies --local-no-preallocate Disable preallocation of disk space for transferred files --local-no-set-modtime Disable setting modtime --local-no-sparse Disable sparse files for multi-thread downloads @@ -27143,6 +28362,10 @@ These can be set in the config file also. --pikpak-upload-concurrency int Concurrency for multipart uploads (default 5) --pikpak-use-trash Send files to the trash instead of deleting permanently (default true) --pikpak-user string Pikpak username + --pixeldrain-api-key string API key for your pixeldrain account + --pixeldrain-api-url string The API endpoint to connect to. In the vast majority of cases it\[aq]s fine to leave (default \[dq]https://pixeldrain.com/api\[dq]) + --pixeldrain-description string Description of the remote + --pixeldrain-root-folder-id string Root of the filesystem to use (default \[dq]me\[dq]) --premiumizeme-auth-url string Auth server URL --premiumizeme-client-id string OAuth Client Id --premiumizeme-client-secret string OAuth Client Secret @@ -27217,6 +28440,7 @@ These can be set in the config file also. --s3-provider string Choose your S3 provider --s3-region string Region to connect to --s3-requester-pays Enables requester pays option when interacting with S3 bucket + --s3-sdk-log-mode Bits Set to debug the SDK (default Off) --s3-secret-access-key string AWS Secret Access Key (password) --s3-server-side-encryption string The server-side encryption algorithm used when storing this object in S3 --s3-session-token string An AWS session token @@ -27227,7 +28451,6 @@ These can be set in the config file also. --s3-sse-customer-key-md5 string If using SSE-C you may provide the secret encryption key MD5 checksum (optional) --s3-sse-kms-key-id string If using KMS ID you must provide the ARN of Key --s3-storage-class string The storage class to use when storing new objects in S3 - --s3-sts-endpoint string Endpoint for STS --s3-upload-concurrency int Concurrency for multipart uploads and copies (default 4) --s3-upload-cutoff SizeSuffix Cutoff for switching to chunked upload (default 200Mi) --s3-use-accelerate-endpoint If true use the AWS S3 accelerated endpoint @@ -27237,6 +28460,7 @@ These can be set in the config file also. --s3-use-multipart-etag Tristate Whether to use ETag in multipart uploads for verification (default unset) --s3-use-multipart-uploads Tristate Set if rclone should use multipart uploads (default unset) --s3-use-presigned-request Whether to use a presigned request or PutObject for single part uploads + --s3-use-unsigned-payload Tristate Whether to use an unsigned payload in PutObject (default unset) --s3-v2-auth If true use v2 authentication --s3-version-at Time Show file versions as they were at the specified time (default off) --s3-version-deleted Show deleted file markers when using versions @@ -27345,10 +28569,12 @@ These can be set in the config file also. --swift-encoding Encoding The encoding for the backend (default Slash,InvalidUtf8) --swift-endpoint-type string Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE) (default \[dq]public\[dq]) --swift-env-auth Get swift credentials from environment variables in standard OpenStack form + --swift-fetch-until-empty-page When paginating, always fetch unless we received an empty page --swift-key string API key or password (OS_PASSWORD) --swift-leave-parts-on-error If true avoid calling abort upload on a failure --swift-no-chunk Don\[aq]t chunk files during streaming upload --swift-no-large-objects Disable support for static and dynamic large objects + --swift-partial-page-fetch-threshold int When paginating, fetch if the current page is within this percentage of the limit --swift-region string Region name - optional (OS_REGION_NAME) --swift-storage-policy string The storage policy to use when creating a new container --swift-storage-url string Storage URL - optional (OS_STORAGE_URL) @@ -27386,6 +28612,7 @@ These can be set in the config file also. --webdav-owncloud-exclude-shares Exclude ownCloud shares --webdav-pacer-min-sleep Duration Minimum time to sleep between API calls (default 10ms) --webdav-pass string Password (obscured) + --webdav-unix-socket string Path to a unix domain socket to dial to, instead of opening a TCP connection directly --webdav-url string URL of http host to connect to --webdav-user string User name --webdav-vendor string Name of the WebDAV site/service/software you are using @@ -27395,6 +28622,7 @@ These can be set in the config file also. --yandex-description string Description of the remote --yandex-encoding Encoding The encoding for the backend (default Slash,Del,Ctl,InvalidUtf8,Dot) --yandex-hard-delete Delete files permanently rather than putting them into the trash + --yandex-spoof-ua Set the user agent to match an official version of the yandex disk client. May help with upload performance (default true) --yandex-token string OAuth Access Token as a JSON blob --yandex-token-url string Token server url --zoho-auth-url string Auth server URL @@ -30522,6 +31750,13 @@ Also note a number of academic publications by Benjamin Pierce (http://www.cis.upenn.edu/%7Ebcpierce/papers/index.shtml#File%20Synchronization) about \f[I]Unison\f[R] and synchronization in general. .SS Changelog +.SS \f[C]v1.68\f[R] +.IP \[bu] 2 +Fixed an issue affecting backends that round modtimes to a lower +precision. +.SS \f[C]v1.67\f[R] +.IP \[bu] 2 +Added integration tests against all backends. .SS \f[C]v1.66\f[R] .IP \[bu] 2 Copies and deletes are now handled in one operation instead of two @@ -30803,7 +32038,7 @@ You can verify the signatures and hashes in one command line like this: .IP .nf \f[C] -$ gpg --decrypt SHA256SUMS | sha256sum -c --ignore-missing +$ h=$(gpg --decrypt SHA256SUMS) && echo \[dq]$h\[dq] | sha256sum - -c --ignore-missing gpg: Signature made Mon 17 Jul 2023 15:03:17 BST gpg: using DSA key FBF737ECE9F8AB18604BD2AC93935E02FF3B54FA gpg: Good signature from \[dq]Nick Craig-Wood \[dq] [ultimate] @@ -30864,11 +32099,11 @@ y) Yes n) No y/n> Remote config --------------------- -[remote] -type = fichier -api_key = example_key --------------------- +Configuration complete. +Options: +- type: fichier +- api_key: example_key +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31191,10 +32426,11 @@ Remote or path to alias. Can be \[dq]myremote:path/to/dir\[dq], \[dq]myremote:bucket\[dq], \[dq]myremote:\[dq] or \[dq]/local/path\[dq]. remote> /mnt/storage/backup Remote config --------------------- -[remote] -remote = /mnt/storage/backup --------------------- +Configuration complete. +Options: +- type: alias +- remote: /mnt/storage/backup +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -31576,20 +32812,20 @@ Choose a number from below, or type in your own value \[rs] \[dq]GLACIER_IR\[dq] storage_class> 1 Remote config --------------------- -[remote] -type = s3 -provider = AWS -env_auth = false -access_key_id = XXX -secret_access_key = YYY -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = --------------------- +Configuration complete. +Options: +- type: s3 +- provider: AWS +- env_auth: false +- access_key_id: XXX +- secret_access_key: YYY +- region: us-east-1 +- endpoint: +- location_constraint: +- acl: private +- server_side_encryption: +- storage_class: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -32008,12 +33244,15 @@ Profile files are standard files used by AWS CLI tools .IP \[bu] 2 By default it will use the profile in your home directory (e.g. \f[C]\[ti]/.aws/credentials\f[R] on unix based systems) file and the -\[dq]default\[dq] profile, to change set these environment variables: +\[dq]default\[dq] profile, to change set these environment variables or +config keys: .RS 2 .IP \[bu] 2 -\f[C]AWS_SHARED_CREDENTIALS_FILE\f[R] to control which file. +\f[C]AWS_SHARED_CREDENTIALS_FILE\f[R] to control which file or the +\f[C]shared_credentials_file\f[R] config key. .IP \[bu] 2 -\f[C]AWS_PROFILE\f[R] to control which profile to use. +\f[C]AWS_PROFILE\f[R] to control which profile to use or the +\f[C]profile\f[R] config key. .RE .RE .IP \[bu] 2 @@ -32023,11 +33262,20 @@ Or, run \f[C]rclone\f[R] on an EC2 instance with an IAM role (AWS only). .IP \[bu] 2 Or, run \f[C]rclone\f[R] in an EKS pod with an IAM role that is associated with a service account (AWS only). +.IP \[bu] 2 +Or, use process +credentials (https://docs.aws.amazon.com/sdkref/latest/guide/feature-process-credentials.html) +to read config from an external program. .RE .PP +With \f[C]env_auth = true\f[R] rclone (which uses the SDK for Go v2) +should support all authentication +methods (https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html) +that the \f[C]aws\f[R] CLI tool does and the other AWS SDKs. +.PP If none of these option actually end up providing \f[C]rclone\f[R] with -AWS credentials then S3 interaction will be non-authenticated (see -below). +AWS credentials then S3 interaction will be non-authenticated (see the +anonymous access section for more info). .SS S3 Permissions .PP When using the \f[C]sync\f[R] subcommand of \f[C]rclone\f[R] the @@ -33533,6 +34781,9 @@ Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to false - rclone will do this automatically based on the provider setting. .PP +Note that if your bucket isn\[aq]t a valid DNS name, i.e. +has \[aq].\[aq] or \[aq]_\[aq] in, you\[aq]ll need to set this to true. +.PP Properties: .IP \[bu] 2 Config: force_path_style @@ -33871,6 +35122,26 @@ Env Var: RCLONE_S3_USE_MULTIPART_ETAG Type: Tristate .IP \[bu] 2 Default: unset +.SS --s3-use-unsigned-payload +.PP +Whether to use an unsigned payload in PutObject +.PP +Rclone has to avoid the AWS SDK seeking the body when calling PutObject. +The AWS provider can add checksums in the trailer to avoid seeking but +other providers can\[aq]t. +.PP +This should be true, false or left unset to use the default for the +provider. +.PP +Properties: +.IP \[bu] 2 +Config: use_unsigned_payload +.IP \[bu] 2 +Env Var: RCLONE_S3_USE_UNSIGNED_PAYLOAD +.IP \[bu] 2 +Type: Tristate +.IP \[bu] 2 +Default: unset .SS --s3-use-presigned-request .PP Whether to use a presigned request or PutObject for single part uploads @@ -34051,7 +35322,7 @@ Type: bool Default: false .SS --s3-sts-endpoint .PP -Endpoint for STS. +Endpoint for STS (deprecated). .PP Leave blank if using AWS to use the default endpoint for the region. .PP @@ -34121,6 +35392,42 @@ Env Var: RCLONE_S3_USE_MULTIPART_UPLOADS Type: Tristate .IP \[bu] 2 Default: unset +.SS --s3-sdk-log-mode +.PP +Set to debug the SDK +.PP +This can be set to a comma separated list of the following functions: +.IP \[bu] 2 +\f[C]Signing\f[R] +.IP \[bu] 2 +\f[C]Retries\f[R] +.IP \[bu] 2 +\f[C]Request\f[R] +.IP \[bu] 2 +\f[C]RequestWithBody\f[R] +.IP \[bu] 2 +\f[C]Response\f[R] +.IP \[bu] 2 +\f[C]ResponseWithBody\f[R] +.IP \[bu] 2 +\f[C]DeprecatedUsage\f[R] +.IP \[bu] 2 +\f[C]RequestEventMessage\f[R] +.IP \[bu] 2 +\f[C]ResponseEventMessage\f[R] +.PP +Use \f[C]Off\f[R] to disable and \f[C]All\f[R] to set all log levels. +You will need to use \f[C]-vv\f[R] to see the debug level logs. +.PP +Properties: +.IP \[bu] 2 +Config: sdk_log_mode +.IP \[bu] 2 +Env Var: RCLONE_S3_SDK_LOG_MODE +.IP \[bu] 2 +Type: Bits +.IP \[bu] 2 +Default: Off .SS --s3-description .PP Description of the remote. @@ -34269,7 +35576,7 @@ These can be run on a running backend using the rc command backend/command (https://rclone.org/rc/#backend-command). .SS restore .PP -Restore objects from GLACIER to normal storage +Restore objects from GLACIER or INTELLIGENT-TIERING archive tier .IP .nf \f[C] @@ -34278,7 +35585,8 @@ rclone backend restore remote: [options] [+] .fi .PP This command can be used to restore one or more objects from GLACIER to -normal storage. +normal storage or from INTELLIGENT-TIERING Archive Access / Deep Archive +Access tier to the Frequent Access tier. .PP Usage Examples: .IP @@ -34287,6 +35595,7 @@ Usage Examples: rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS +rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY \f[R] .fi .PP @@ -34329,13 +35638,14 @@ Options: .IP \[bu] 2 \[dq]description\[dq]: The optional description for the job. .IP \[bu] 2 -\[dq]lifetime\[dq]: Lifetime of the active copy in days +\[dq]lifetime\[dq]: Lifetime of the active copy in days, ignored for +INTELLIGENT-TIERING storage .IP \[bu] 2 \[dq]priority\[dq]: Priority of restore: Standard|Expedited|Bulk .SS restore-status .PP -Show the restore status for objects being restored from GLACIER to -normal storage +Show the restore status for objects being restored from GLACIER or +INTELLIGENT-TIERING storage .IP .nf \f[C] @@ -34344,7 +35654,8 @@ rclone backend restore-status remote: [options] [+] .fi .PP This command can be used to show the status for objects being restored -from GLACIER to normal storage. +from GLACIER to normal storage or from INTELLIGENT-TIERING Archive +Access / Deep Archive Access tier to the Frequent Access tier. .PP Usage Examples: .IP @@ -34380,6 +35691,15 @@ It returns a list of status dictionaries. \[dq]RestoreExpiryDate\[dq]: \[dq]2023-09-06T12:29:19+01:00\[dq] }, \[dq]StorageClass\[dq]: \[dq]DEEP_ARCHIVE\[dq] + }, + { + \[dq]Remote\[dq]: \[dq]test.gz\[dq], + \[dq]VersionID\[dq]: null, + \[dq]RestoreStatus\[dq]: { + \[dq]IsRestoreInProgress\[dq]: true, + \[dq]RestoreExpiryDate\[dq]: \[dq]null\[dq] + }, + \[dq]StorageClass\[dq]: \[dq]INTELLIGENT_TIERING\[dq] } ] \f[R] @@ -34553,15 +35873,6 @@ Your config should end up looking like this: [anons3] type = s3 provider = AWS -env_auth = false -access_key_id = -secret_access_key = -region = us-east-1 -endpoint = -location_constraint = -acl = private -server_side_encryption = -storage_class = \f[R] .fi .PP @@ -34574,6 +35885,14 @@ rclone lsd anons3:1000genomes .fi .PP You will be able to list and copy data but not upload it. +.PP +You can also do this entirely on the command line +.IP +.nf +\f[C] +rclone lsd :s3,provider=AWS:1000genomes +\f[R] +.fi .SS Providers .SS AWS S3 .PP @@ -34782,6 +36101,12 @@ Now run \f[C]rclone lsf r2:\f[R] to see your buckets and For R2 tokens with the \[dq]Object Read & Write\[dq] permission, you may also need to add \f[C]no_check_bucket = true\f[R] for object uploads to work correctly. +.PP +Note that Cloudflare decompresses files uploaded with +\f[C]Content-Encoding: gzip\f[R] by default which is a deviation from +what AWS does. +If this is causing a problem then upload the files with +\f[C]--header-upload \[dq]Cache-Control: no-transform\[dq]\f[R] .SS Dreamhost .PP Dreamhost DreamObjects (https://www.dreamhost.com/cloud/storage/) is an @@ -37762,6 +39087,29 @@ nodes across the network. .PP For more detailed comparison please check the documentation of the storj backend. +.SS Memory usage {memory} +.PP +The most common cause of rclone using lots of memory is a single +directory with millions of files in. +Despite s3 not really having the concepts of directories, rclone does +the sync on a directory by directory basis to be compatible with normal +filing systems. +.PP +Rclone loads each directory into memory as rclone objects. +Each rclone object takes 0.5k-1k of memory, so approximately 1GB per +1,000,000 files, and the sync for that directory does not begin until it +is entirely loaded in memory. +So the sync can take a long time to start for large directories. +.PP +To sync a directory with 100,000,000 files in you would need +approximately 100 GB of memory. +At some point the amount of memory becomes difficult to provide so there +is a workaround for +this (https://github.com/rclone/rclone/wiki/Big-syncs-with-millions-of-files) +which involves a bit of scripting. +.PP +At some point rclone will gain a sync mode which is effectively this +workaround but built in to rclone. .SS Limitations .PP \f[C]rclone about\f[R] is not supported by the S3 backend. @@ -37959,12 +39307,13 @@ key> 0123456789abcdef0123456789abcdef0123456789 Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = 123456789abc -key = 0123456789abcdef0123456789abcdef0123456789 -endpoint = --------------------- +Configuration complete. +Options: +- type: b2 +- account: 123456789abc +- key: 0123456789abcdef0123456789abcdef0123456789 +- endpoint: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -38117,13 +39466,21 @@ There can be at most \f[C]--transfers\f[R] of these in use at any moment, so this sets the upper limit on the memory used. .SS Versions .PP -When rclone uploads a new version of a file it creates a new version of +The default setting of B2 is to keep old versions of files. +This means when rclone uploads a new version of a file it creates a new +version of it (https://www.backblaze.com/docs/cloud-storage-file-versions). Likewise when you delete a file, the old version will be marked hidden and still be available. -Conversely, you may opt in to a \[dq]hard delete\[dq] of files with the -\f[C]--b2-hard-delete\f[R] flag which would permanently remove the file -instead of hiding it. +.PP +Whether B2 keeps old versions of files or not can be adjusted on a per +bucket basis using the \[dq]Lifecycle settings\[dq] on the B2 control +panel or when creating the bucket using the --b2-lifecycle flag or after +creation using the rclone backend lifecycle command. +.PP +You may opt in to a \[dq]hard delete\[dq] of files with the +\f[C]--b2-hard-delete\f[R] flag which permanently removes files on +deletion instead of hiding them. .PP Old versions of files, where available, are visible using the \f[C]--b2-versions\f[R] flag. @@ -38913,12 +40270,13 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]XXX\[dq]} --------------------- +Configuration complete. +Options: +- type: box +- client_id: +- client_secret: +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]XXX\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -39022,11 +40380,11 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = box -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2017-07-08T23:40:08.059167677+01:00\[dq]} --------------------- +Configuration complete. +Options: +- type: box +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2017-07-08T23:40:08.059167677+01:00\[dq]} +Keep this \[dq]remote\[dq] remote? Edit remote Value \[dq]client_id\[dq] = \[dq]\[dq] Edit? (y/n)> @@ -39054,11 +40412,11 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = box -token = {\[dq]access_token\[dq]:\[dq]YYY\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]YYY\[dq],\[dq]expiry\[dq]:\[dq]2017-07-23T12:22:29.259137901+01:00\[dq]} --------------------- +Configuration complete. +Options: +- type: box +- token: {\[dq]access_token\[dq]:\[dq]YYY\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]YYY\[dq],\[dq]expiry\[dq]:\[dq]2017-07-23T12:22:29.259137901+01:00\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -41014,12 +42372,12 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = sharefile -endpoint = https://XXX.sharefile.com -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2019-09-30T19:41:45.878561877+01:00\[dq]} --------------------- +Configuration complete. +Options: +- type: sharefile +- endpoint: https://XXX.sharefile.com +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2019-09-30T19:41:45.878561877+01:00\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -42692,11 +44050,11 @@ Embedded spaces can be added using quotes \[dq]dir=remote:path with space\[dq] \[dq]dir2=remote2:path with space\[dq] Enter a fs.SpaceSepList value. upstreams> images=s3:imagesbucket files=drive:important/files --------------------- -[remote] -type = combine -upstreams = images=s3:imagesbucket files=drive:important/files --------------------- +Configuration complete. +Options: +- type: combine +- upstreams: images=s3:imagesbucket files=drive:important/files +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -42844,12 +44202,13 @@ Remote config Please visit: https://www.dropbox.com/1/oauth2/authorize?client_id=XXXXXXXXXXXXXXX&response_type=code Enter the code: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXXXXXXXX --------------------- -[remote] -app_key = -app_secret = -token = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: dropbox +- app_key: +- app_secret: +- token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXX_XXXX_XXXXXXXXXXXXXXXXXXXXXXXXXXXXX +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -43308,7 +44667,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 1000. .PP -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. .IP \[bu] 2 batch_mode: async - default batch_size is 100 @@ -43540,12 +44899,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = filefabric -url = https://yourfabric.smestorage.com/ -permanent_token = xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: filefabric +- url: https://yourfabric.smestorage.com/ +- permanent_token: xxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxx +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -43792,6 +45151,219 @@ Env Var: RCLONE_FILEFABRIC_DESCRIPTION Type: string .IP \[bu] 2 Required: false +.SH Files.com +.PP +Files.com (https://www.files.com/) is a cloud storage service that +provides a secure and easy way to store and share files. +.PP +The initial setup for filescom involves authenticating with your +Files.com account. +You can do this by providing your site subdomain, username, and +password. +Alternatively, you can authenticate using an API Key from +Files.com (https://www.files.com/docs/sdk-and-apis/api-keys/). +\f[C]rclone config\f[R] walks you through it. +.SS Configuration +.PP +Here is an example of how to make a remote called \f[C]remote\f[R]. +First run: +.IP +.nf +\f[C] +rclone config +\f[R] +.fi +.PP +This will guide you through an interactive setup process: +.IP +.nf +\f[C] +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> remote + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / Files.com + \[rs] \[dq]filescom\[dq] +[snip] +Storage> filescom + +Option site. +Your site subdomain (e.g. mysite) or custom domain (e.g. myfiles.customdomain.com) +Enter a value. Press Enter to leave empty. +site> mysite + +Option username. +The username used to authenticate with Files.com. +Enter a value. Press Enter to leave empty. +username> user + +Option password. +The password used to authenticate with Files.com. +Choose an alternative below. Press Enter for the default (n). +y) Yes, type in my own password +g) Generate random password +n) No, leave this optional password blank (default) +y/g/n> y +Enter the password: +password: +Confirm the password: +password: + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: filescom +- site: mysite +- username: user +- password: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +\f[R] +.fi +.PP +Once configured you can use rclone. +.PP +See all files in the top level: +.IP +.nf +\f[C] +rclone lsf remote: +\f[R] +.fi +.PP +Make a new directory in the root: +.IP +.nf +\f[C] +rclone mkdir remote:dir +\f[R] +.fi +.PP +Recursively List the contents: +.IP +.nf +\f[C] +rclone ls remote: +\f[R] +.fi +.PP +Sync \f[C]/home/local/directory\f[R] to the remote directory, deleting +any excess files in the directory. +.IP +.nf +\f[C] +rclone sync --interactive /home/local/directory remote:dir +\f[R] +.fi +.SS Standard options +.PP +Here are the Standard options specific to filescom (Files.com). +.SS --filescom-site +.PP +Your site subdomain (e.g. +mysite) or custom domain (e.g. +myfiles.customdomain.com). +.PP +Properties: +.IP \[bu] 2 +Config: site +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_SITE +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --filescom-username +.PP +The username used to authenticate with Files.com. +.PP +Properties: +.IP \[bu] 2 +Config: username +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_USERNAME +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --filescom-password +.PP +The password used to authenticate with Files.com. +.PP +\f[B]NB\f[R] Input to this must be obscured - see rclone +obscure (https://rclone.org/commands/rclone_obscure/). +.PP +Properties: +.IP \[bu] 2 +Config: password +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_PASSWORD +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS Advanced options +.PP +Here are the Advanced options specific to filescom (Files.com). +.SS --filescom-api-key +.PP +The API key used to authenticate with Files.com. +.PP +Properties: +.IP \[bu] 2 +Config: api_key +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_API_KEY +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --filescom-encoding +.PP +The encoding for the backend. +.PP +See the encoding section in the +overview (https://rclone.org/overview/#encoding) for more info. +.PP +Properties: +.IP \[bu] 2 +Config: encoding +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_ENCODING +.IP \[bu] 2 +Type: Encoding +.IP \[bu] 2 +Default: +Slash,BackSlash,Del,Ctl,RightSpace,RightCrLfHtVt,InvalidUtf8,Dot +.SS --filescom-description +.PP +Description of the remote. +.PP +Properties: +.IP \[bu] 2 +Config: description +.IP \[bu] 2 +Env Var: RCLONE_FILESCOM_DESCRIPTION +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false .SH FTP .PP FTP is the File Transfer Protocol. @@ -43867,12 +45439,12 @@ Use FTP over TLS (Explicit) Enter a boolean value (true or false). Press Enter for the default (\[dq]false\[dq]). explicit_tls> Remote config --------------------- -[remote] -type = ftp -host = ftp.example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: ftp +- host: ftp.example.com +- pass: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -44447,6 +46019,373 @@ A value of \f[C]1000000000\f[R] means that file time precision of 1 second is available. A value of \f[C]3153600000000000000\f[R] (or another large number) means \[dq]unsupported\[dq]. +.SH Gofile +.PP +Gofile (https://gofile.io) is a content storage and distribution +platform. +Its aim is to provide as much service as possible for free or at a very +low price. +.PP +The initial setup for Gofile involves logging in to the web interface +and going to the \[dq]My Profile\[dq] section. +Copy the \[dq]Account API token\[dq] for use in the config file. +.PP +Note that if you wish to connect rclone to Gofile you will need a +premium account. +.SS Configuration +.PP +Here is an example of how to make a remote called \f[C]remote\f[R]. +First run: +.IP +.nf +\f[C] + rclone config +\f[R] +.fi +.PP +This will guide you through an interactive setup process: +.IP +.nf +\f[C] +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> remote + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +XX / Gofile + \[rs] (gofile) +Storage> gofile + +Option access_token. +API Access token +You can get this from the web control panel. +Enter a value. Press Enter to leave empty. +access_token> YOURACCESSTOKEN + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: gofile +- access_token: YOURACCESSTOKEN +Keep this \[dq]remote\[dq] remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +\f[R] +.fi +.PP +Once configured you can then use \f[C]rclone\f[R] like this, +.PP +List directories and files in the top level of your Gofile +.IP +.nf +\f[C] +rclone lsf remote: +\f[R] +.fi +.PP +To copy a local directory to an Gofile directory called backup +.IP +.nf +\f[C] +rclone copy /home/source remote:backup +\f[R] +.fi +.SS Modification times and hashes +.PP +Gofile supports modification times with a resolution of 1 second. +.PP +Gofile supports MD5 hashes, so you can use the \f[C]--checksum\f[R] +flag. +.SS Restricted filename characters +.PP +In addition to the default restricted characters +set (https://rclone.org/overview/#restricted-characters) the following +characters are also replaced: +.PP +.TS +tab(@); +l c c. +T{ +Character +T}@T{ +Value +T}@T{ +Replacement +T} +_ +T{ +! +T}@T{ +0x21 +T}@T{ +\[uFF01] +T} +T{ +\[dq] +T}@T{ +0x22 +T}@T{ +\[uFF02] +T} +T{ +* +T}@T{ +0x2A +T}@T{ +\[uFF0A] +T} +T{ +: +T}@T{ +0x3A +T}@T{ +\[uFF1A] +T} +T{ +< +T}@T{ +0x3C +T}@T{ +\[uFF1C] +T} +T{ +> +T}@T{ +0x3E +T}@T{ +\[uFF1E] +T} +T{ +? +T}@T{ +0x3F +T}@T{ +\[uFF1F] +T} +T{ +\[rs] +T}@T{ +0x5C +T}@T{ +\[uFF3C] +T} +T{ +| +T}@T{ +0x7C +T}@T{ +\[uFF5C] +T} +.TE +.PP +File names can also not start or end with the following characters. +These only get replaced if they are the first or last character in the +name: +.PP +.TS +tab(@); +l c c. +T{ +Character +T}@T{ +Value +T}@T{ +Replacement +T} +_ +T{ +\&. +T}@T{ +0x2E +T}@T{ +\[uFF0E] +T} +.TE +.PP +Invalid UTF-8 bytes will also be +replaced (https://rclone.org/overview/#invalid-utf8), as they can\[aq]t +be used in JSON strings. +.SS Public Links +.PP +Gofile supports \f[C]rclone link\f[R] to make public links to files or +directories. +If you specify a directory it will download as a \f[C]zip\f[R] file. +You can use the \f[C]--expire\f[R] flag to specify the time the link +should be valid. +Note that \f[C]rclone link --unlink\f[R] removes all the public links +for a file. +.SS Root folder ID +.PP +You can set the \f[C]root_folder_id\f[R] for rclone. +This is the directory (identified by its \f[C]Folder ID\f[R]) that +rclone considers to be the root of your Gofile drive. +.PP +Normally you will leave this blank and rclone will determine the correct +root to use itself and fill in the value in the config file. +.PP +However you can set this to restrict rclone to a specific folder +hierarchy. +.PP +In order to do this you will have to find the \f[C]Folder ID\f[R] of the +directory you wish rclone to display. +.PP +You can do this with rclone +.IP +.nf +\f[C] +$ rclone lsf -Fip --dirs-only remote: +d6341f53-ee65-4f29-9f59-d11e8070b2a0;Files/ +f4f5c9b8-6ece-478b-b03e-4538edfe5a1c;Photos/ +d50e356c-29ca-4b27-a3a7-494d91026e04;Videos/ +\f[R] +.fi +.PP +The ID to use is the part before the \f[C];\f[R] so you could set +.IP +.nf +\f[C] +root_folder_id = d6341f53-ee65-4f29-9f59-d11e8070b2a0 +\f[R] +.fi +.PP +To restrict rclone to the \f[C]Files\f[R] directory. +.SS Standard options +.PP +Here are the Standard options specific to gofile (Gofile). +.SS --gofile-access-token +.PP +API Access token +.PP +You can get this from the web control panel. +.PP +Properties: +.IP \[bu] 2 +Config: access_token +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_ACCESS_TOKEN +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS Advanced options +.PP +Here are the Advanced options specific to gofile (Gofile). +.SS --gofile-root-folder-id +.PP +ID of the root folder +.PP +Leave this blank normally, rclone will fill it in automatically. +.PP +If you want rclone to be restricted to a particular folder you can fill +it in - see the docs for more info. +.PP +Properties: +.IP \[bu] 2 +Config: root_folder_id +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_ROOT_FOLDER_ID +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --gofile-account-id +.PP +Account ID +.PP +Leave this blank normally, rclone will fill it in automatically. +.PP +Properties: +.IP \[bu] 2 +Config: account_id +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_ACCOUNT_ID +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --gofile-list-chunk +.PP +Number of items to list in each call +.PP +Properties: +.IP \[bu] 2 +Config: list_chunk +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_LIST_CHUNK +.IP \[bu] 2 +Type: int +.IP \[bu] 2 +Default: 1000 +.SS --gofile-encoding +.PP +The encoding for the backend. +.PP +See the encoding section in the +overview (https://rclone.org/overview/#encoding) for more info. +.PP +Properties: +.IP \[bu] 2 +Config: encoding +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_ENCODING +.IP \[bu] 2 +Type: Encoding +.IP \[bu] 2 +Default: +Slash,LtGt,DoubleQuote,Colon,Question,Asterisk,Pipe,BackSlash,Del,Ctl,LeftPeriod,RightPeriod,InvalidUtf8,Dot,Exclamation +.SS --gofile-description +.PP +Description of the remote. +.PP +Properties: +.IP \[bu] 2 +Config: description +.IP \[bu] 2 +Env Var: RCLONE_GOFILE_DESCRIPTION +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS Limitations +.PP +Gofile only supports filenames up to 255 characters in length, where a +character is a unicode character. +.PP +Directories should not be cached for more than 24h otherwise files in +the directory may not be downloadable. +In practice this means when using a VFS based rclone command such as +\f[C]rclone mount\f[R] you should make sure \f[C]--dir-cache-time\f[R] +is less than \f[C]24h\f[R]. +.PP +Note that Gofile is currently limited to a total of 100,000 items. +If you attempt to upload more than that you will get +\f[C]error-limit-100000\f[R]. +This limit may be lifted in the future. +.SS Duplicated files +.PP +Gofile is capable of having files with duplicated file names. +For instance two files called \f[C]hello.txt\f[R] in the same directory. +.PP +Rclone cannot sync that to a normal file system but it can be fixed with +the \f[C]rclone dedupe\f[R] command. +.PP +Duplicated files cause problems with the syncing and you will see +messages in the log about duplicates. +.PP +Use \f[C]rclone dedupe\f[R] to fix duplicated files. .SH Google Cloud Storage .PP Paths are specified as \f[C]remote:bucket\f[R] (or \f[C]remote:\f[R] for @@ -44577,16 +46516,16 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = google cloud storage -client_id = -client_secret = -token = {\[dq]AccessToken\[dq]:\[dq]xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]RefreshToken\[dq]:\[dq]x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx\[dq],\[dq]Expiry\[dq]:\[dq]2014-07-17T20:49:14.929208288+01:00\[dq],\[dq]Extra\[dq]:null} -project_number = 12345678 -object_acl = private -bucket_acl = private --------------------- +Configuration complete. +Options: +- type: google cloud storage +- client_id: +- client_secret: +- token: {\[dq]AccessToken\[dq]:\[dq]xxxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]RefreshToken\[dq]:\[dq]x/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_xxxxxxxxx\[dq],\[dq]Expiry\[dq]:\[dq]2014-07-17T20:49:14.929208288+01:00\[dq],\[dq]Extra\[dq]:null} +- project_number: 12345678 +- object_acl: private +- bucket_acl: private +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -45614,15 +47553,16 @@ Configure this as a Shared Drive (Team Drive)? y) Yes n) No y/n> n --------------------- -[remote] -client_id = -client_secret = -scope = drive -root_folder_id = -service_account_file = -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2014-03-16T13:57:58.955387075Z\[dq]} --------------------- +Configuration complete. +Options: +type: drive +- client_id: +- client_secret: +- scope: drive +- root_folder_id: +- service_account_file: +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2014-03-16T13:57:58.955387075Z\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -45760,12 +47700,11 @@ If you\[aq]d rather stuff the contents of the credentials file into the rclone config file, you can set \f[C]service_account_credentials\f[R] with the actual contents of the file instead, or set the equivalent environment variable. -.SS Use case - Google Apps/G-suite account and individual Drive +.SS Use case - Google Workspace account and individual Drive .PP -Let\[aq]s say that you are the administrator of a Google Apps (old) or -G-suite account. -The goal is to store data on an individual\[aq]s Drive account, who IS a -member of the domain. +Let\[aq]s say that you are the administrator of a Google Workspace. +The goal is to read or write data on an individual\[aq]s Drive account, +who IS a member of the domain. We\[aq]ll call the domain \f[B]example.com\f[R], and the user \f[B]foo\[at]example.com\f[R]. .PP @@ -45775,7 +47714,8 @@ There\[aq]s a few steps we need to go through to accomplish this: To create a service account and obtain its credentials, go to the Google Developer Console (https://console.developers.google.com). .IP \[bu] 2 -You must have a project - create one if you don\[aq]t. +You must have a project - create one if you don\[aq]t and make sure you +are on the selected project. .IP \[bu] 2 Then go to \[dq]IAM & admin\[dq] -> \[dq]Service Accounts\[dq]. .IP \[bu] 2 @@ -45786,29 +47726,45 @@ with something that identifies your client. Select \[dq]Create And Continue\[dq]. Step 2 and 3 are optional. .IP \[bu] 2 -These credentials are what rclone will use for authentication. +Click on the newly created service account +.IP \[bu] 2 +Click \[dq]Keys\[dq] and then \[dq]Add Key\[dq] and then \[dq]Create new +key\[dq] +.IP \[bu] 2 +Choose type \[dq]JSON\[dq] and click create +.IP \[bu] 2 +This will download a small JSON file that rclone will use for +authentication. +.PP If you ever need to remove access, press the \[dq]Delete service account key\[dq] button. .SS 2. Allowing API access to example.com Google Drive .IP \[bu] 2 -Go to example.com\[aq]s admin console +Go to example.com\[aq]s Workspace Admin +Console (https://admin.google.com) .IP \[bu] 2 Go into \[dq]Security\[dq] (or use the search bar) .IP \[bu] 2 -Select \[dq]Show more\[dq] and then \[dq]Advanced settings\[dq] +Select \[dq]Access and data control\[dq] and then \[dq]API controls\[dq] .IP \[bu] 2 -Select \[dq]Manage API client access\[dq] in the -\[dq]Authentication\[dq] section +Click \[dq]Manage domain-wide delegation\[dq] .IP \[bu] 2 -In the \[dq]Client Name\[dq] field enter the service account\[aq]s +Click \[dq]Add new\[dq] +.IP \[bu] 2 +In the \[dq]Client ID\[dq] field enter the service account\[aq]s \[dq]Client ID\[dq] - this can be found in the Developer Console under \[dq]IAM & Admin\[dq] -> \[dq]Service Accounts\[dq], then \[dq]View Client ID\[dq] for the newly created service account. It is a \[ti]21 character numerical string. .IP \[bu] 2 -In the next field, \[dq]One or More API Scopes\[dq], enter -\f[C]https://www.googleapis.com/auth/drive\f[R] to grant access to -Google Drive specifically. +In the next field, \[dq]OAuth Scopes\[dq], enter +\f[C]https://www.googleapis.com/auth/drive\f[R] to grant read/write +access to Google Drive specifically. +You can also use +\f[C]https://www.googleapis.com/auth/drive.readonly\f[R] for read only +access. +.IP \[bu] 2 +Click \[dq]Authorise\[dq] .SS 3. Configure rclone, assuming a new install .IP .nf @@ -45817,12 +47773,12 @@ rclone config n/s/q> n # New name>gdrive # Gdrive is an example name -Storage> # Select the number shown for Google Drive +Storage> # Type drive client_id> # Can be left blank client_secret> # Can be left blank -scope> # Select your scope, 1 for example +scope> # Select the scope use used in step 2 root_folder_id> # Can be left blank -service_account_file> /home/foo/myJSONfile.json # This is where the JSON file goes! +service_account_file> /home/foo/myJSONfile.json # Path to the JSON file you downloaded in step 1. y/n> # Auto config, n \f[R] .fi @@ -45848,7 +47804,7 @@ Note: in case you configured a specific root folder on gdrive and rclone is unable to access the contents of that folder when using \f[C]--drive-impersonate\f[R], do this instead: - in the gdrive web interface, share your root folder with the user/email of the new Service -Account you created/selected at step #1 - use rclone without specifying +Account you created/selected at step 1 - use rclone without specifying the \f[C]--drive-impersonate\f[R] option, like this: \f[C]rclone -v lsf gdrive:backup\f[R] .SS Shared drives (team drives) @@ -45878,13 +47834,14 @@ Choose a number from below, or type in your own value 3 / Rclone Test 3 \[rs] \[dq]zzzzzzzzzzzzzzzzzzzz\[dq] Enter a Shared Drive ID> 1 --------------------- -[remote] -client_id = -client_secret = -token = {\[dq]AccessToken\[dq]:\[dq]xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]RefreshToken\[dq]:\[dq]1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]Expiry\[dq]:\[dq]2014-03-16T13:57:58.955387075Z\[dq],\[dq]Extra\[dq]:null} -team_drive = xxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: drive +- client_id: +- client_secret: +- token: {\[dq]AccessToken\[dq]:\[dq]xxxx.x.xxxxx_xxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]RefreshToken\[dq]:\[dq]1/xxxxxxxxxxxxxxxx_xxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]Expiry\[dq]:\[dq]2014-03-16T13:57:58.955387075Z\[dq],\[dq]Extra\[dq]:null} +- team_drive: xxxxxxxxxxxxxxxxxxxx +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48284,11 +50241,11 @@ Got code *** are stored in full resolution at original quality. These uploads *** will count towards storage in your Google Account. --------------------- -[remote] -type = google photos -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2019-06-28T17:38:04.644930156+01:00\[dq]} --------------------- +Configuration complete. +Options: +- type: google photos +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2019-06-28T17:38:04.644930156+01:00\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -48678,7 +50635,7 @@ Max number of files in upload batch. This sets the batch size of files to upload. It has to be less than 50. .PP -By default this is 0 which means rclone which calculate the batch size +By default this is 0 which means rclone will calculate the batch size depending on the setting of batch_mode. .IP \[bu] 2 batch_mode: async - default batch_size is 50 @@ -49239,6 +51196,9 @@ download object, calculate all \f[I]supported\f[R] hashes on the fly and store in cache; return requested hash. .SS Other operations .IP \[bu] 2 +any time a hash is requested, follow the logic from 1-4 from +\f[C]hashsum\f[R] above +.IP \[bu] 2 whenever a file is uploaded or downloaded \f[B]in full\f[R], capture the stream to calculate all supported hashes on the fly and update database .IP \[bu] 2 @@ -49321,12 +51281,12 @@ y) Yes n) No (default) y/n> n Remote config --------------------- -[remote] -type = hdfs -namenode = namenode.hadoop:8020 -username = root --------------------- +Configuration complete. +Options: +- type: hdfs +- namenode: namenode.hadoop:8020 +- username: root +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -49639,11 +51599,11 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = hidrive -token = {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]expiry\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxx\[dq]} --------------------- +Configuration complete. +Options: +- type: hidrive +- token: {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]expiry\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxx\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -50231,10 +52191,11 @@ Choose a number from below, or type in your own value \[rs] \[dq]https://example.com\[dq] url> https://beta.rclone.org Remote config --------------------- -[remote] -url = https://beta.rclone.org --------------------- +Configuration complete. +Options: +- type: http +- url: https://beta.rclone.org +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -51057,12 +53018,12 @@ Edit advanced config? y) Yes n) No (default) y/n> n --------------------- -[remote] -type = internetarchive -access_key_id = XXXX -secret_access_key = XXXX --------------------- +Configuration complete. +Options: +- type: internetarchive +- access_key_id: XXXX +- secret_access_key: XXXX +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -51580,18 +53541,18 @@ Press Enter for the default (Archive). 2 > Shared 3 > Sync config_mountpoint> 1 --------------------- -[remote] -type = jottacloud -configVersion = 1 -client_id = jottacli -client_secret = -tokenURL = https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token -token = {........} -username = 2940e57271a93d987d6f8a21 -device = Jotta -mountpoint = Archive --------------------- +Configuration complete. +Options: +- type: jottacloud +- configVersion: 1 +- client_id: jottacli +- client_secret: +- tokenURL: https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token +- token: {........} +- username: 2940e57271a93d987d6f8a21 +- device: Jotta +- mountpoint: Archive +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -52718,13 +54679,13 @@ y) Yes n) No y/n> n Remote config --------------------- -[remote] -type = mailru -user = username\[at]mail.ru -pass = *** ENCRYPTED *** -speedup_enable = true --------------------- +Configuration complete. +Options: +- type: mailru +- user: username\[at]mail.ru +- pass: *** ENCRYPTED *** +- speedup_enable: true +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -53304,12 +55265,12 @@ password: Confirm the password: password: Remote config --------------------- -[remote] -type = mega -user = you\[at]example.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: mega +- user: you\[at]example.com +- pass: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -53653,10 +55614,10 @@ Storage> memory Remote config --------------------- -[remote] -type = memory --------------------- +Configuration complete. +Options: +- type: memory +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -54166,12 +56127,13 @@ key> base64encodedkey== Endpoint for the service - leave blank normally. endpoint> Remote config --------------------- -[remote] -account = account_name -key = base64encodedkey== -endpoint = --------------------- +Configuration complete. +Options: +- type: azureblob +- account: account_name +- key: base64encodedkey== +- endpoint: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -54564,6 +56526,17 @@ be explicitly specified using exactly one of the If none of \f[C]msi_object_id\f[R], \f[C]msi_client_id\f[R], or \f[C]msi_mi_res_id\f[R] is set, this is is equivalent to using \f[C]env_auth\f[R]. +.SS Anonymous +.PP +If you want to access resources with public anonymous access then set +\f[C]account\f[R] only. +You can do this without making an rclone config: +.IP +.nf +\f[C] +rclone lsf :azureblob,account=ACCOUNT:CONTAINER +\f[R] +.fi .SS Standard options .PP Here are the Standard options specific to azureblob (Microsoft Azure @@ -56326,13 +58299,13 @@ Is that okay? y) Yes n) No y/n> y --------------------- -[remote] -type = onedrive -token = {\[dq]access_token\[dq]:\[dq]youraccesstoken\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]yourrefreshtoken\[dq],\[dq]expiry\[dq]:\[dq]2018-08-26T22:39:52.486512262+08:00\[dq]} -drive_id = b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk -drive_type = business --------------------- +Configuration complete. +Options: +- type: onedrive +- token: {\[dq]access_token\[dq]:\[dq]youraccesstoken\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]yourrefreshtoken\[dq],\[dq]expiry\[dq]:\[dq]2018-08-26T22:39:52.486512262+08:00\[dq]} +- drive_id: b!Eqwertyuiopasdfghjklzxcvbnm-7mnbvcxzlkjhgfdsapoiuytrewqk +- drive_type: business +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -58066,11 +60039,12 @@ Enter the password: password: Confirm the password: password: --------------------- -[remote] -username = -password = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: opendrive +- username: +- password: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -59535,15 +61509,16 @@ Number of connection retry. Leave blank will use the default value \[dq]3\[dq]. connection_retries> Remote config --------------------- -[remote] -env_auth = false -access_key_id = access_key -secret_access_key = secret_key -endpoint = -zone = pek3a -connection_retries = --------------------- +Configuration complete. +Options: +- type: qingstor +- env_auth: false +- access_key_id: access_key +- secret_access_key: secret_key +- endpoint: +- zone: pek3a +- connection_retries: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -59940,11 +61915,12 @@ api_key> your_api_key Host name of Quatrix account. host> example.quatrix.it --------------------- -[remote] -api_key = your_api_key -host = example.quatrix.it --------------------- +Configuration complete. +Options: +- type: quatrix +- api_key: your_api_key +- host: example.quatrix.it +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -60006,12 +61982,12 @@ e/n/d/r/c/s/q> e Choose a number from below, or type in an existing value 1 > remote remote> remote --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this \[dq]remote\[dq] remote? Edit remote Option api_key. API key for accessing Quatrix account @@ -60021,12 +61997,12 @@ Option host. Host name of Quatrix account Enter a string value. Press Enter for the default (some_host.quatrix.it). --------------------- -[remote] -type = quatrix -host = some_host.quatrix.it -api_key = your_api_key --------------------- +Configuration complete. +Options: +- type: quatrix +- host: some_host.quatrix.it +- api_key: your_api_key +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -61146,6 +63122,53 @@ Env Var: RCLONE_SWIFT_LEAVE_PARTS_ON_ERROR Type: bool .IP \[bu] 2 Default: false +.SS --swift-fetch-until-empty-page +.PP +When paginating, always fetch unless we received an empty page. +.PP +Consider using this option if rclone listings show fewer objects than +expected, or if repeated syncs copy unchanged objects. +.PP +It is safe to enable this, but rclone may make more API calls than +necessary. +.PP +This is one of a pair of workarounds to handle implementations of the +Swift API that do not implement pagination as expected. +See also \[dq]partial_page_fetch_threshold\[dq]. +.PP +Properties: +.IP \[bu] 2 +Config: fetch_until_empty_page +.IP \[bu] 2 +Env Var: RCLONE_SWIFT_FETCH_UNTIL_EMPTY_PAGE +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false +.SS --swift-partial-page-fetch-threshold +.PP +When paginating, fetch if the current page is within this percentage of +the limit. +.PP +Consider using this option if rclone listings show fewer objects than +expected, or if repeated syncs copy unchanged objects. +.PP +It is safe to enable this, but rclone may make more API calls than +necessary. +.PP +This is one of a pair of workarounds to handle implementations of the +Swift API that do not implement pagination as expected. +See also \[dq]fetch_until_empty_page\[dq]. +.PP +Properties: +.IP \[bu] 2 +Config: partial_page_fetch_threshold +.IP \[bu] 2 +Env Var: RCLONE_SWIFT_PARTIAL_PAGE_FETCH_THRESHOLD +.IP \[bu] 2 +Type: int +.IP \[bu] 2 +Default: 0 .SS --swift-chunk-size .PP Above this size files will be chunked. @@ -61392,12 +63415,13 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]expiry\[dq]:\[dq]0001-01-01T00:00:00Z\[dq]} --------------------- +Configuration complete. +Options: +- type: pcloud +- client_id: +- client_secret: +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]bearer\[dq],\[dq]expiry\[dq]:\[dq]0001-01-01T00:00:00Z\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -62125,6 +64149,237 @@ user-uploaded files. Deleted files will still be visible with \f[C]--pikpak-trashed-only\f[R] even after the trash emptied. This goes away after few days. +.SH Pixeldrain +.PP +This is the backend for Pixeldrain\[aq]s premium filesystem feature. +This is not the same as pixeldrain\[aq]s free file sharing product. +The filesystem requires either a Pro subscription or the Prepaid plan. +More information on subscriptions (https://pixeldrain.com/#pro). +.PP +An overview of the filesystem\[aq]s features and limitations is +available in the filesystem guide (https://pixeldrain.com/filesystem) on +pixeldrain. +.SS Usage with account +.PP +To use the personal filesystem you will need a pixeldrain +account (https://pixeldrain.com/register) and either the Prepaid plan or +one of the Patreon-based subscriptions. +After registering and subscribing, your personal filesystem will be +available at this link: https://pixeldrain.com/d/me. +.PP +Go to the API keys page (https://pixeldrain.com/user/api_keys) on your +account and generate a new API key for rclone. +Then run \f[C]rclone config\f[R] and use the API key to create a new +backend. +.PP +Example: +.IP +.nf +\f[C] +No remotes found, make a new one? +n) New remote +d) Delete remote +c) Copy remote +s) Set configuration password +q) Quit config +n/d/c/s/q> n + +Enter name for new remote. +name> pixeldrainfs + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +\&... +XX / Pixeldrain Filesystem + \[rs] (pixeldrain) +\&... +Storage> pixeldrain + +Option api_key. +API key for your pixeldrain account. +Found on https://pixeldrain.com/user/api_keys. +Enter a value. Press Enter to leave empty. +api_key> b1bb1e81-9b7b-406b-986a-c9b20be76e15 + +Option directory_id. +Root of the filesystem to use. Set to \[aq]me\[aq] to use your personal filesystem. +Set to a shared directory ID to use a shared directory. +Enter a string value. Press Enter for the default (me). +directory_id> + +Edit advanced config? +y) Yes +n) No (default) +y/n> + +Configuration complete. +Options: +- type: pixeldrain +- api_key: b1bb1e81-9b7b-406b-986a-c9b20be76e15 +Keep this \[dq]pixeldrainfs\[dq] remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> + +Current remotes: + +Name Type +==== ==== +pixeldrainfs pixeldrain + +e) Edit existing remote +n) New remote +d) Delete remote +r) Rename remote +c) Copy remote +s) Set configuration password +q) Quit config +e/n/d/r/c/s/q> q +\f[R] +.fi +.SS Usage without account +.PP +It is possible to gain read-only access to publicly shared directories +through rclone. +For this you only need a directory ID. +The directory ID can be found in the URL of a shared directory, the URL +will look like this \f[C]https://pixeldrain.com/d/abcd1234\f[R] where +\f[C]abcd1234\f[R] is the directory ID. +Directory IDs in your own filesystem can also be listed with the +\f[C]lsf\f[R] command: +.PP +\f[C]rclone lsf Pixeldrain: --dirs-only -Fpi\f[R] +.PP +This will print directories in your \f[C]Pixeldrain\f[R] home directory +and their public IDs. +.PP +Enter this directory ID in the rclone config and you will be able to +access the directory. +.SS Standard options +.PP +Here are the Standard options specific to pixeldrain (Pixeldrain +Filesystem). +.SS --pixeldrain-api-key +.PP +API key for your pixeldrain account. +Found on https://pixeldrain.com/user/api_keys. +.PP +Properties: +.IP \[bu] 2 +Config: api_key +.IP \[bu] 2 +Env Var: RCLONE_PIXELDRAIN_API_KEY +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS --pixeldrain-root-folder-id +.PP +Root of the filesystem to use. +.PP +Set to \[aq]me\[aq] to use your personal filesystem. +Set to a shared directory ID to use a shared directory. +.PP +Properties: +.IP \[bu] 2 +Config: root_folder_id +.IP \[bu] 2 +Env Var: RCLONE_PIXELDRAIN_ROOT_FOLDER_ID +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Default: \[dq]me\[dq] +.SS Advanced options +.PP +Here are the Advanced options specific to pixeldrain (Pixeldrain +Filesystem). +.SS --pixeldrain-api-url +.PP +The API endpoint to connect to. +In the vast majority of cases it\[aq]s fine to leave this at default. +It is only intended to be changed for testing purposes. +.PP +Properties: +.IP \[bu] 2 +Config: api_url +.IP \[bu] 2 +Env Var: RCLONE_PIXELDRAIN_API_URL +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Default: \[dq]https://pixeldrain.com/api\[dq] +.SS --pixeldrain-description +.PP +Description of the remote. +.PP +Properties: +.IP \[bu] 2 +Config: description +.IP \[bu] 2 +Env Var: RCLONE_PIXELDRAIN_DESCRIPTION +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false +.SS Metadata +.PP +Pixeldrain supports file modes and creation times. +.PP +Here are the possible system metadata items for the pixeldrain backend. +.PP +.TS +tab(@); +lw(11.1n) lw(11.1n) lw(11.1n) lw(16.6n) lw(20.3n). +T{ +Name +T}@T{ +Help +T}@T{ +Type +T}@T{ +Example +T}@T{ +Read Only +T} +_ +T{ +btime +T}@T{ +Time of file birth (creation) +T}@T{ +RFC 3339 +T}@T{ +2006-01-02T15:04:05.999999999Z07:00 +T}@T{ +N +T} +T{ +mode +T}@T{ +File mode +T}@T{ +octal, unix style +T}@T{ +755 +T}@T{ +N +T} +T{ +mtime +T}@T{ +Time of last modification +T}@T{ +RFC 3339 +T}@T{ +2006-01-02T15:04:05.999999999Z07:00 +T}@T{ +N +T} +.TE +.PP +See the metadata (https://rclone.org/docs/#metadata) docs for more info. .SH premiumize.me .PP Paths are specified as \f[C]remote:path\f[R] @@ -62178,11 +64433,11 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -type = premiumizeme -token = {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2029-08-07T18:44:15.548915378+01:00\[dq]} --------------------- +Configuration complete. +Options: +- type: premiumizeme +- token: {\[dq]access_token\[dq]:\[dq]XXX\[dq],\[dq]token_type\[dq]:\[dq]Bearer\[dq],\[dq]refresh_token\[dq]:\[dq]XXX\[dq],\[dq]expiry\[dq]:\[dq]2029-08-07T18:44:15.548915378+01:00\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -62468,12 +64723,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you\[at]protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you\[at]protonmail.com +- pass: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -63146,12 +65401,12 @@ Option 2fa. Enter a value. Press Enter to leave empty. 2fa> 123456 Remote config --------------------- -[remote] -type = protondrive -user = you\[at]protonmail.com -pass = *** ENCRYPTED *** --------------------- +Configuration complete. +Options: +- type: protondrive +- user: you\[at]protonmail.com +- pass: *** ENCRYPTED *** +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -64079,14 +66334,15 @@ y/g/n> n Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent. key_file> Remote config --------------------- -[remote] -host = example.com -user = sftpuser -port = -pass = -key_file = --------------------- +Configuration complete. +Options: +- type: sftp +- host: example.com +- user: sftpuser +- port: +- pass: +- key_file: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -64559,7 +66815,24 @@ Required: false .PP Raw PEM-encoded private key. .PP -If specified, will override key_file parameter. +Note that this should be on a single line with line endings replaced +with \[aq]\[aq], eg +.IP +.nf +\f[C] +key_pem = -----BEGIN RSA PRIVATE KEY-----\[rs]nMaMbaIXtE\[rs]n0gAMbMbaSsd\[rs]nMbaass\[rs]n-----END RSA PRIVATE KEY----- +\f[R] +.fi +.PP +This will generate the single line correctly: +.IP +.nf +\f[C] +awk \[aq]{printf \[dq]%s\[rs]\[rs]n\[dq], $0}\[aq] < \[ti]/.ssh/id_rsa +\f[R] +.fi +.PP +If specified, it will override the key_file parameter. .PP Properties: .IP \[bu] 2 @@ -65152,13 +67425,13 @@ Maximum number of SFTP simultaneous connections, 0 for unlimited. Note that setting this is very likely to cause deadlocks so it should be used with care. .PP -If you are doing a sync or copy then make sure concurrency is one more +If you are doing a sync or copy then make sure connections is one more than the sum of \f[C]--transfers\f[R] and \f[C]--checkers\f[R]. .PP If you use \f[C]--check-first\f[R] then it just needs to be one more than the maximum of \f[C]--checkers\f[R] and \f[C]--transfers\f[R]. .PP -So for \f[C]concurrency 3\f[R] you\[aq]d use +So for \f[C]connections 3\f[R] you\[aq]d use \f[C]--checkers 2 --transfers 2 --check-first\f[R] or \f[C]--checkers 1 --transfers 1\f[R]. .PP @@ -65927,11 +68200,11 @@ Access Grant. Enter a string value. Press Enter for the default (\[dq]\[dq]). access_grant> your-access-grant-received-by-someone-else Remote config --------------------- -[remote] -type = storj -access_grant = your-access-grant-received-by-someone-else --------------------- +Configuration complete. +Options: +- type: storj +- access_grant: your-access-grant-received-by-someone-else +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -65983,14 +68256,14 @@ Encryption Passphrase. To access existing objects enter passphrase used for uplo Enter a string value. Press Enter for the default (\[dq]\[dq]). passphrase> your-human-readable-encryption-passphrase Remote config --------------------- -[remote] -type = storj -satellite_address = 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S\[at]us1.storj.io:7777 -api_key = your-api-key-for-your-storj-project -passphrase = your-human-readable-encryption-passphrase -access_grant = the-access-grant-generated-from-the-api-key-and-passphrase --------------------- +Configuration complete. +Options: +- type: storj +- satellite_address: 12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S\[at]us1.storj.io:7777 +- api_key: your-api-key-for-your-storj-project +- passphrase: your-human-readable-encryption-passphrase +- access_grant: the-access-grant-generated-from-the-api-key-and-passphrase +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -66416,11 +68689,11 @@ Remote config Username (email address)> nick\[at]craig-wood.com Your Sugarsync password is only required during setup and will not be stored. password: --------------------- -[remote] -type = sugarsync -refresh_token = https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX --------------------- +Configuration complete. +Options: +- type: sugarsync +- refresh_token: https://api.sugarsync.com/app-authorization/XXXXXXXXXXXXXXXXXX +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -66681,11 +68954,6 @@ of an rclone union remote. See List of backends that do not support rclone about (https://rclone.org/overview/#optional-features) and rclone about (https://rclone.org/commands/rclone_about/) -.SH Tardigrade -.PP -The Tardigrade backend has been renamed to be the Storj -backend (https://rclone.org/storj/). -Old configuration files will continue to work. .SH Uloz.to .PP Paths are specified as \f[C]remote:path\f[R] @@ -67295,11 +69563,11 @@ Cache time of usage and free space (in seconds). This option is only useful when Enter a signed integer. Press Enter for the default (\[dq]120\[dq]). cache_time> Remote config --------------------- -[remote] -type = union -upstreams = remote1:dir1 remote2:dir2 remote3:dir3 --------------------- +Configuration complete. +Options: +- type: union +- upstreams: remote1:dir1 remote2:dir2 remote3:dir3 +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -67812,15 +70080,15 @@ password: Bearer token instead of user/pass (e.g. a Macaroon) bearer_token> Remote config --------------------- -[remote] -type = webdav -url = https://example.com/remote.php/webdav/ -vendor = nextcloud -user = user -pass = *** ENCRYPTED *** -bearer_token = --------------------- +Configuration complete. +Options: +- type: webdav +- url: https://example.com/remote.php/webdav/ +- vendor: nextcloud +- user: user +- pass: *** ENCRYPTED *** +- bearer_token: +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -68108,6 +70376,20 @@ Env Var: RCLONE_WEBDAV_OWNCLOUD_EXCLUDE_MOUNTS Type: bool .IP \[bu] 2 Default: false +.SS --webdav-unix-socket +.PP +Path to a unix domain socket to dial to, instead of opening a TCP +connection directly +.PP +Properties: +.IP \[bu] 2 +Config: unix_socket +.IP \[bu] 2 +Env Var: RCLONE_WEBDAV_UNIX_SOCKET +.IP \[bu] 2 +Type: string +.IP \[bu] 2 +Required: false .SS --webdav-description .PP Description of the remote. @@ -68388,12 +70670,13 @@ If your browser doesn\[aq]t open automatically go to the following link: http:// Log in and authorize rclone for access Waiting for code... Got code --------------------- -[remote] -client_id = -client_secret = -token = {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]OAuth\[dq],\[dq]expiry\[dq]:\[dq]2016-12-29T12:27:11.362788025Z\[dq]} --------------------- +Configuration complete. +Options: +- type: yandex +- client_id: +- client_secret: +- token: {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]OAuth\[dq],\[dq]expiry\[dq]:\[dq]2016-12-29T12:27:11.362788025Z\[dq]} +Keep this \[dq]remote\[dq] remote? y) Yes this is OK e) Edit this remote d) Delete this remote @@ -68582,6 +70865,21 @@ Env Var: RCLONE_YANDEX_ENCODING Type: Encoding .IP \[bu] 2 Default: Slash,Del,Ctl,InvalidUtf8,Dot +.SS --yandex-spoof-ua +.PP +Set the user agent to match an official version of the yandex disk +client. +May help with upload performance. +.PP +Properties: +.IP \[bu] 2 +Config: spoof_ua +.IP \[bu] 2 +Env Var: RCLONE_YANDEX_SPOOF_UA +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: true .SS --yandex-description .PP Description of the remote. @@ -68686,12 +70984,12 @@ Choose a number from below, or type in your own value 1 / General \[rs] \[dq]4u2869d2aa6fca04f4f2f896b6539243b85b1\[dq] Enter a Workspace ID> 1 --------------------- -[remote] -type = zoho -token = {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]Zoho-oauthtoken\[dq],\[dq]refresh_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]expiry\[dq]:\[dq]2020-10-12T00:54:52.370275223+02:00\[dq]} -root_folder_id = xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx --------------------- +Configuration complete. +Options: +- type: zoho +- token: {\[dq]access_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]token_type\[dq]:\[dq]Zoho-oauthtoken\[dq],\[dq]refresh_token\[dq]:\[dq]xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\[dq],\[dq]expiry\[dq]:\[dq]2020-10-12T00:54:52.370275223+02:00\[dq]} +- root_folder_id: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +Keep this \[dq]remote\[dq] remote? y) Yes this is OK (default) e) Edit this remote d) Delete this remote @@ -69384,13 +71682,9 @@ This format requires absolute paths and the use of prefix For convenience rclone will automatically convert regular paths into the corresponding extended-length paths, so in most cases you do not have to worry about this (read more below). -.PP -Note that Windows supports using the same prefix -\f[C]\[rs]\[rs]?\[rs]\f[R] to specify path to volumes identified by -their GUID, e.g. +Using the same prefix \f[C]\[rs]\[rs]?\[rs]\f[R] it is also possible to +specify path to volumes identified by their GUID, e.g. \f[C]\[rs]\[rs]?\[rs]Volume{b75e2c83-0000-0000-0000-602f00000000}\[rs]some\[rs]path\f[R]. -This is \f[I]not\f[R] supported in rclone, due to an -issue (https://github.com/golang/go/issues/39785) in go. .SS Long paths .PP Rclone handles long paths automatically, by converting all paths to @@ -69846,6 +72140,38 @@ Env Var: RCLONE_LOCAL_CASE_INSENSITIVE Type: bool .IP \[bu] 2 Default: false +.SS --local-no-clone +.PP +Disable reflink cloning for server-side copies. +.PP +Normally, for local-to-local transfers, rclone will \[dq]clone\[dq] the +file when possible, and fall back to \[dq]copying\[dq] only when cloning +is not supported. +.PP +Cloning creates a shallow copy (or \[dq]reflink\[dq]) which initially +shares blocks with the original file. +Unlike a \[dq]hardlink\[dq], the two files are independent and neither +will affect the other if subsequently modified. +.PP +Cloning is usually preferable to copying, as it is much faster and is +deduplicated by default (i.e. +having two identical files does not consume more storage than having +just one.) However, for use cases where data redundancy is preferable, +--local-no-clone can be used to disable cloning and force \[dq]deep\[dq] +copies. +.PP +Currently, cloning is only supported when using APFS on macOS (support +for other platforms may be added in the future.) +.PP +Properties: +.IP \[bu] 2 +Config: no_clone +.IP \[bu] 2 +Env Var: RCLONE_LOCAL_NO_CLONE +.IP \[bu] 2 +Type: bool +.IP \[bu] 2 +Default: false .SS --local-no-preallocate .PP Disable preallocation of disk space for transferred files. @@ -70146,6 +72472,355 @@ Options: .IP \[bu] 2 \[dq]error\[dq]: return an error based on option value .SH Changelog +.SS v1.68.0 - 2024-09-08 +.PP +See commits (https://github.com/rclone/rclone/compare/v1.67.0...v1.68.0) +.IP \[bu] 2 +New backends +.RS 2 +.IP \[bu] 2 +Files.com (Sam Harrison) +.IP \[bu] 2 +Gofile (https://rclone.org/gofile/) (Nick Craig-Wood) +.IP \[bu] 2 +Pixeldrain (https://rclone.org/pixeldrain/) (Fornax) +.RE +.IP \[bu] 2 +Changed backends +.RS 2 +.IP \[bu] 2 +S3 (https://rclone.org/s3/) backend updated to use AWS +SDKv2 (https://github.com/aws/aws-sdk-go-v2) as v1 is now unsupported. +.RS 2 +.IP \[bu] 2 +The matrix of providers and auth methods is huge and there could be +problems with obscure combinations. +.IP \[bu] 2 +Please report problems in a new +issue (https://github.com/rclone/rclone/issues/new/choose) on Github. +.RE +.RE +.IP \[bu] 2 +New commands +.RS 2 +.IP \[bu] 2 +config +encryption (https://rclone.org/commands/rclone_config_encryption/): set, +remove and check to manage config file encryption (Nick Craig-Wood) +.RE +.IP \[bu] 2 +New Features +.RS 2 +.IP \[bu] 2 +build +.RS 2 +.IP \[bu] 2 +Update to go1.23 and make go1.21 the minimum required version (Nick +Craig-Wood) +.IP \[bu] 2 +Update all dependencies (Nick Craig-Wood) +.IP \[bu] 2 +Disable wasm/js build due to go bug +#64856 (https://github.com/golang/go/issues/64856) (Nick Craig-Wood) +.IP \[bu] 2 +Enable custom linting rules with ruleguard via gocritic (albertony) +.IP \[bu] 2 +Update logging statements to make \f[C]--use-json-log\f[R] work always +(albertony) +.IP \[bu] 2 +Adding new code quality tests and fixing the fallout (albertony) +.RE +.IP \[bu] 2 +config +.RS 2 +.IP \[bu] 2 +Internal config re-organised to be more consistent and make it available +from the rc (Nick Craig-Wood) +.IP \[bu] 2 +Avoid remotes with empty names from the environment (albertony) +.IP \[bu] 2 +Make listing of remotes more consistent (albertony) +.IP \[bu] 2 +Make getting config values more consistent (albertony) +.IP \[bu] 2 +Use \f[C]--password-command\f[R] to set config file password if supplied +(Nick Craig-Wood) +.RE +.IP \[bu] 2 +doc fixes (albertony, crystalstall, David Seifert, Eng Zer Jun, Ernie +Hershey, Florian Klink, John Oxley, kapitainsky, Mathieu Moreau, Nick +Craig-Wood, nipil, P\['e]tr Bozs\['o], Russ Bubley, Sam Harrison, +Thearas, URenko, Will Miles, yuval-cloudinary) +.IP \[bu] 2 +fs: Allow semicolons as well as spaces in \f[C]--bwlimit\f[R] timetable +parsing (Kyle Reynolds) +.IP \[bu] 2 +help +.RS 2 +.IP \[bu] 2 +Global flags help command now takes glob filter (albertony) +.IP \[bu] 2 +Make help command output less distracting (albertony) +.RE +.IP \[bu] 2 +lib/encoder: Add Raw encoding for use where no encoding at all is +required, eg \f[C]--local-encoding Raw\f[R] (URenko) +.IP \[bu] 2 +listremotes: Added options for filtering, ordering and json output +(albertony) +.IP \[bu] 2 +nfsmount +.RS 2 +.IP \[bu] 2 +Make the \f[C]--sudo\f[R] flag work for umount as well as mount (Nick +Craig-Wood) +.IP \[bu] 2 +Add \f[C]-o tcp\f[R] option to NFS mount options to fix mounting under +Linux (Nick Craig-Wood) +.RE +.IP \[bu] 2 +operations: copy: generate stable partial suffix (Georg Welzel) +.IP \[bu] 2 +rc +.RS 2 +.IP \[bu] 2 +Add options/info (https://rclone.org/rc/#options-info) call to enumerate +options (Nick Craig-Wood) +.IP \[bu] 2 +Add option blocks parameter to +options/get (https://rclone.org/rc/#options-get) and options/info (Nick +Craig-Wood) +.IP \[bu] 2 +Add vfs/queue (https://rclone.org/rc/#vfs-queue) to show the status of +the upload queue (Nick Craig-Wood) +.IP \[bu] 2 +Add vfs/queue-set-expiry (https://rclone.org/rc/#vfs-queue-set-expiry) +to adjust expiry of items in the VFS queue (Nick Craig-Wood) +.IP \[bu] 2 +Add \f[C]--unix-socket\f[R] option to \f[C]rc\f[R] command (Florian +Klink) +.IP \[bu] 2 +Prevent unmount rc command from sending a \f[C]STOPPING=1\f[R] sd-notify +message (AThePeanut4) +.RE +.IP \[bu] 2 +rcserver: Implement prometheus +metrics (https://rclone.org/docs/#metrics) on a dedicated port (Oleg +Kunitsyn) +.IP \[bu] 2 +serve dlna +.RS 2 +.IP \[bu] 2 +Also look at \[dq]Subs\[dq] subdirectory (Florian Klink) +.IP \[bu] 2 +Don\[aq]t swallow \f[C]video.{idx,sub}\f[R] (Florian Klink) +.IP \[bu] 2 +Set more correct mime type (Florian Klink) +.RE +.IP \[bu] 2 +serve nfs +.RS 2 +.IP \[bu] 2 +Implement on disk cache for file handles selected with +\f[C]--nfs-cache-type\f[R] (Nick Craig-Wood) +.IP \[bu] 2 +Add tracing to filesystem calls (Nick Craig-Wood) +.IP \[bu] 2 +Mask unimplemented error from chmod (Nick Craig-Wood) +.IP \[bu] 2 +Unify the nfs library logging with rclone\[aq]s logging better (Nick +Craig-Wood) +.IP \[bu] 2 +Fix incorrect user id and group id exported to NFS (Nick Craig-Wood) +.RE +.IP \[bu] 2 +serve s3 +.RS 2 +.IP \[bu] 2 +Implement \f[C]--auth-proxy\f[R] (Sawjan Gurung) +.IP \[bu] 2 +Update to AWS SDKv2 by updating \f[C]github.com/rclone/gofakes3\f[R] +(Nick Craig-Wood) +.RE +.RE +.IP \[bu] 2 +Bug Fixes +.RS 2 +.IP \[bu] 2 +bisync: Fix sync time problems with backends that round time (eg +Dropbox) (nielash) +.IP \[bu] 2 +serve dlna: Fix panic: invalid argument to Int63n (Nick Craig-Wood) +.RE +.IP \[bu] 2 +VFS +.RS 2 +.IP \[bu] 2 +Add +--vfs-read-chunk-streams (https://rclone.org/commands/rclone_mount/#vfs-read-chunk-streams-0-1) +to parallel read chunks from files (Nick Craig-Wood) +.RS 2 +.IP \[bu] 2 +This can increase mount performance on high bandwidth or large latency +links +.RE +.IP \[bu] 2 +Fix cache encoding with special characters (URenko) +.RE +.IP \[bu] 2 +Local +.RS 2 +.IP \[bu] 2 +Fix encoding of root path fix (URenko) +.IP \[bu] 2 +Add server-side copy (using clone) with xattrs on macOS (nielash) +.RS 2 +.IP \[bu] 2 +\f[C]--local-no-clone\f[R] flag to disable cloning for server-side +copies (nielash) +.RE +.IP \[bu] 2 +Support setting custom \f[C]--metadata\f[R] during server-side Copy +(nielash) +.RE +.IP \[bu] 2 +Azure Blob +.RS 2 +.IP \[bu] 2 +Allow anonymous access for public resources (Nick Craig-Wood) +.RE +.IP \[bu] 2 +B2 +.RS 2 +.IP \[bu] 2 +Include custom upload headers in large file info (Pat Patterson) +.RE +.IP \[bu] 2 +Drive +.RS 2 +.IP \[bu] 2 +Fix copying Google Docs to a backend which only supports SHA1 (Nick +Craig-Wood) +.RE +.IP \[bu] 2 +Fichier +.RS 2 +.IP \[bu] 2 +Fix detection of Flood Detected error (Nick Craig-Wood) +.IP \[bu] 2 +Fix server side move (Nick Craig-Wood) +.RE +.IP \[bu] 2 +HTTP +.RS 2 +.IP \[bu] 2 +Reload client certificates on expiry (Saleh Dindar) +.IP \[bu] 2 +Support listening on passed FDs (Florian Klink) +.RE +.IP \[bu] 2 +Jottacloud +.RS 2 +.IP \[bu] 2 +Fix setting of metadata on server side move (albertony) +.RE +.IP \[bu] 2 +Onedrive +.RS 2 +.IP \[bu] 2 +Fix nil pointer error when uploading small files (Nick Craig-Wood) +.RE +.IP \[bu] 2 +Pcloud +.RS 2 +.IP \[bu] 2 +Implement \f[C]SetModTime\f[R] (Georg Welzel) +.IP \[bu] 2 +Implement \f[C]OpenWriterAt\f[R] feature to enable multipart uploads +(Georg Welzel) +.RE +.IP \[bu] 2 +Pikpak +.RS 2 +.IP \[bu] 2 +Improve data consistency by ensuring async tasks complete (wiserain) +.IP \[bu] 2 +Implement custom hash to replace wrong sha1 (wiserain) +.IP \[bu] 2 +Fix error with \f[C]copyto\f[R] command (wiserain) +.IP \[bu] 2 +Optimize file move by removing unnecessary \f[C]readMetaData()\f[R] call +(wiserain) +.IP \[bu] 2 +Non-buffered hash calculation for local source files (wiserain) +.IP \[bu] 2 +Optimize upload by pre-fetching gcid from API (wiserain) +.IP \[bu] 2 +Correct file transfer progress for uploads by hash (wiserain) +.IP \[bu] 2 +Update to using AWS SDK v2 (wiserain) +.RE +.IP \[bu] 2 +S3 +.RS 2 +.IP \[bu] 2 +Update to using AWS SDK v2 (Nick Craig-Wood) +.RS 2 +.IP \[bu] 2 +Add \f[C]--s3-sdk-log-mode\f[R] to control SDKv2 debugging (Nick +Craig-Wood) +.RE +.IP \[bu] 2 +Fix incorrect region for Magalu provider (Filipe Herculano) +.IP \[bu] 2 +Allow restoring from intelligent-tiering storage class (Pawel Palucha) +.RE +.IP \[bu] 2 +SFTP +.RS 2 +.IP \[bu] 2 +Use \f[C]uint32\f[R] for mtime to save memory (Tomasz Melcer) +.IP \[bu] 2 +Ignore useless errors when closing the connection pool (Nick Craig-Wood) +.IP \[bu] 2 +Support listening on passed FDs (Florian Klink) +.RE +.IP \[bu] 2 +Swift +.RS 2 +.IP \[bu] 2 +Add workarounds for bad listings in Ceph RGW (Paul Collins) +.IP \[bu] 2 +Add total/free space info in \f[C]about\f[R] command. +(fsantagostinobietti) +.RE +.IP \[bu] 2 +Ulozto +.RS 2 +.IP \[bu] 2 +Fix upload of > 2GB files on 32 bit platforms (Tobias Markus) +.RE +.IP \[bu] 2 +WebDAV +.RS 2 +.IP \[bu] 2 +Add \f[C]--webdav-unix-socket-path\f[R] to connect to a unix socket +(Florian Klink) +.RE +.IP \[bu] 2 +Yandex +.RS 2 +.IP \[bu] 2 +Implement custom user agent to help with upload speeds (Sebastian +B\[:u]nger) +.RE +.IP \[bu] 2 +Zoho +.RS 2 +.IP \[bu] 2 +Fix inefficiencies uploading with new API to avoid throttling (Nick +Craig-Wood) +.RE .SS v1.67.0 - 2024-06-14 .PP See commits (https://github.com/rclone/rclone/compare/v1.66.0...v1.67.0) @@ -84509,9 +87184,12 @@ This will make the garbage collector work harder, reducing memory size at the expense of CPU usage. .PP The most common cause of rclone using lots of memory is a single -directory with thousands or millions of files in. +directory with millions of files in. Rclone has to load this entirely into memory as rclone objects. Each rclone object takes 0.5k-1k of memory. +There is a workaround for +this (https://github.com/rclone/rclone/wiki/Big-syncs-with-millions-of-files) +which involves a bit of scripting. .SS Rclone changes fullwidth Unicode punctuation marks in file names .PP For example: On a Windows system, you have a file with name @@ -86286,6 +88964,55 @@ Micha\[/l] Dzienisiewicz Florian Klink .IP \[bu] 2 Bill Fraser +.IP \[bu] 2 +Thearas +.IP \[bu] 2 +Filipe Herculano +.IP \[bu] 2 +Russ Bubley +.IP \[bu] 2 +Paul Collins +.IP \[bu] 2 +Tomasz Melcer +.IP \[bu] 2 +itsHenry <2671230065@qq.com> +.IP \[bu] 2 +Ke Wang +.IP \[bu] 2 +AThePeanut4 <49614525+AThePeanut4@users.noreply.github.com> +.IP \[bu] 2 +Tobias Markus +.IP \[bu] 2 +Ernie Hershey +.IP \[bu] 2 +Will Miles +.IP \[bu] 2 +David Seifert <16636962+SoapGentoo@users.noreply.github.com> +.IP \[bu] 2 +Fornax +.IP \[bu] 2 +Sam Harrison +.IP \[bu] 2 +P\['e]ter Bozs\['o] <3806723+peterbozso@users.noreply.github.com> +.IP \[bu] 2 +Georg Welzel +.IP \[bu] 2 +John Oxley +.IP \[bu] 2 +Pawel Palucha +.IP \[bu] 2 +crystalstall +.IP \[bu] 2 +nipil +.IP \[bu] 2 +yuval-cloudinary <46710068+yuval-cloudinary@users.noreply.github.com> +.IP \[bu] 2 +Mathieu Moreau +.IP \[bu] 2 +fsantagostinobietti +<6057026+fsantagostinobietti@users.noreply.github.com> +.IP \[bu] 2 +Oleg Kunitsyn <114359669+hiddenmarten@users.noreply.github.com> .SH Contact the rclone project .SS Forum .PP diff --git a/vfs/rc.go b/vfs/rc.go index 6e84c26f9..72060984b 100644 --- a/vfs/rc.go +++ b/vfs/rc.go @@ -11,6 +11,7 @@ import ( "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/vfs/vfscache/writeback" ) const getVFSHelp = ` @@ -437,3 +438,111 @@ func rcStats(ctx context.Context, in rc.Params) (out rc.Params, err error) { } return vfs.Stats(), nil } + +func init() { + rc.Add(rc.Call{ + Path: "vfs/queue", + Title: "Queue info for a VFS.", + Help: strings.ReplaceAll(` +This returns info about the upload queue for the selected VFS. + +This is only useful if |--vfs-cache-mode| > off. If you call it when +the |--vfs-cache-mode| is off, it will return an empty result. + + { + "queued": // an array of files queued for upload + [ + { + "name": "file", // string: name (full path) of the file, + "id": 123, // integer: id of this item in the queue, + "size": 79, // integer: size of the file in bytes + "expiry": 1.5 // float: time until file is eligible for transfer, lowest goes first + "tries": 1, // integer: number of times we have tried to upload + "delay": 5.0, // float: seconds between upload attempts + "uploading": false, // boolean: true if item is being uploaded + }, + ], + } + +The |expiry| time is the time until the file is elegible for being +uploaded in floating point seconds. This may go negative. As rclone +only transfers |--transfers| files at once, only the lowest +|--transfers| expiry times will have |uploading| as |true|. So there +may be files with negative expiry times for which |uploading| is +|false|. + +`, "|", "`") + getVFSHelp, + Fn: rcQueue, + }) +} + +func rcQueue(ctx context.Context, in rc.Params) (out rc.Params, err error) { + vfs, err := getVFS(in) + if err != nil { + return nil, err + } + if vfs.cache == nil { + return nil, nil + } + return vfs.cache.Queue(), nil +} + +func init() { + rc.Add(rc.Call{ + Path: "vfs/queue-set-expiry", + Title: "Set the expiry time for an item queued for upload.", + Help: strings.ReplaceAll(` + +Use this to adjust the |expiry| time for an item in the upload queue. +You will need to read the |id| of the item using |vfs/queue| before +using this call. + +You can then set |expiry| to a floating point number of seconds from +now when the item is eligible for upload. If you want the item to be +uploaded as soon as possible then set it to a large negative number (eg +-1000000000). If you want the upload of the item to be delayed +for a long time then set it to a large positive number. + +Setting the |expiry| of an item which has already has started uploading +will have no effect - the item will carry on being uploaded. + +This will return an error if called with |--vfs-cache-mode| off or if +the |id| passed is not found. + +This takes the following parameters + +- |fs| - select the VFS in use (optional) +- |id| - a numeric ID as returned from |vfs/queue| +- |expiry| - a new expiry time as floating point seconds + +This returns an empty result on success, or an error. + +`, "|", "`") + getVFSHelp, + Fn: rcQueueSetExpiry, + }) +} + +func rcQueueSetExpiry(ctx context.Context, in rc.Params) (out rc.Params, err error) { + vfs, err := getVFS(in) + if err != nil { + return nil, err + } + if vfs.cache == nil { + return nil, rc.NewErrParamInvalid(errors.New("can't call this unless using the VFS cache")) + } + + // Read input values + id, err := in.GetInt64("id") + if err != nil { + return nil, err + } + expiry, err := in.GetFloat64("expiry") + if err != nil { + return nil, err + } + + // Set expiry + expiryTime := time.Now().Add(time.Duration(float64(time.Second) * expiry)) + err = vfs.cache.QueueSetExpiry(writeback.Handle(id), expiryTime) + return nil, err +} diff --git a/vfs/test_vfs/test_vfs.go b/vfs/test_vfs/test_vfs.go index 4725a7bd7..19d57a670 100644 --- a/vfs/test_vfs/test_vfs.go +++ b/vfs/test_vfs/test_vfs.go @@ -7,7 +7,6 @@ import ( "flag" "fmt" "io" - "log" "math" "math/rand" "os" @@ -16,6 +15,7 @@ import ( "sync/atomic" "time" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/lib/file" "github.com/rclone/rclone/lib/random" ) @@ -93,13 +93,13 @@ func (t *Test) randomTest() { // logf logs things - not shown unless -v func (t *Test) logf(format string, a ...interface{}) { if *verbose { - log.Printf(t.prefix+format, a...) + fs.Logf(nil, t.prefix+format, a) } } // errorf logs errors func (t *Test) errorf(format string, a ...interface{}) { - log.Printf(t.prefix+"ERROR: "+format, a...) + fs.Logf(nil, t.prefix+"ERROR: "+format, a) } // list test @@ -286,7 +286,7 @@ func main() { flag.Parse() args := flag.Args() if len(args) != 1 { - log.Fatalf("%s: Syntax [opts] ", os.Args[0]) + fs.Fatalf(nil, "%s: Syntax [opts] ", os.Args[0]) } dir := args[0] _ = file.MkdirAll(dir, 0777) diff --git a/vfs/vfscache/cache.go b/vfs/vfscache/cache.go index 3d34dd82a..a0af31bd1 100644 --- a/vfs/vfscache/cache.go +++ b/vfs/vfscache/cache.go @@ -170,6 +170,18 @@ func (c *Cache) Stats() (out rc.Params) { return out } +// Queue returns info about the Cache +func (c *Cache) Queue() (out rc.Params) { + out = make(rc.Params) + out["queue"] = c.writeback.Queue() + return out +} + +// QueueSetExpiry updates the expiry of a single item in the upload queue +func (c *Cache) QueueSetExpiry(id writeback.Handle, expiry time.Time) error { + return c.writeback.SetExpiry(id, expiry) +} + // createDir creates a directory path, along with any necessary parents func createDir(dir string) error { return file.MkdirAll(dir, 0700) diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index 61357c2f9..9f8abfa64 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -14,6 +14,7 @@ import ( "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/lib/diskusage" + "github.com/rclone/rclone/vfs/vfscache/writeback" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -727,3 +728,26 @@ func TestCacheStats(t *testing.T) { assert.Equal(t, 0, out["uploadsInProgress"]) assert.Equal(t, 0, out["uploadsQueued"]) } + +func TestCacheQueue(t *testing.T) { + _, c := newTestCache(t) + + out := c.Queue() + + // We've checked the contents of queue in the writeback tests + // Just check it is present here + queue, found := out["queue"] + require.True(t, found) + _, ok := queue.([]writeback.QueueInfo) + require.True(t, ok) +} + +func TestCacheQueueSetExpiry(t *testing.T) { + _, c := newTestCache(t) + + // Check this returns the correct error when called so we know + // it is plumbed in correctly. The actual tests are done in + // writeback. + err := c.QueueSetExpiry(123123, time.Now()) + assert.Equal(t, writeback.ErrorIDNotFound, err) +} diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index 702847719..b76688db1 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -737,7 +737,7 @@ func (item *Item) Close(storeFn StoreFn) (err error) { item.c.writeback.SetID(&item.writeBackID) id := item.writeBackID item.mu.Unlock() - item.c.writeback.Add(id, item.name, item.modified, func(ctx context.Context) error { + item.c.writeback.Add(id, item.name, item.info.Size, item.modified, func(ctx context.Context) error { return item.store(ctx, storeFn) }) item.mu.Lock() diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go index 9801a2d3c..6212bfec9 100644 --- a/vfs/vfscache/writeback/writeback.go +++ b/vfs/vfscache/writeback/writeback.go @@ -6,6 +6,7 @@ import ( "container/heap" "context" "errors" + "sort" "sync" "sync/atomic" "time" @@ -62,6 +63,7 @@ func New(ctx context.Context, opt *vfscommon.Options) *WriteBack { // writeBack.mu must be held to manipulate this type writeBackItem struct { name string // name of the item so we don't have to read it from item + size int64 // size of the item so we don't have to read it from item id Handle // id of the item index int // index into the priority queue for update expiry time.Time // When this expires we will write it back @@ -135,10 +137,11 @@ func (wb *WriteBack) _newExpiry() time.Time { // make a new writeBackItem // // call with the lock held -func (wb *WriteBack) _newItem(id Handle, name string) *writeBackItem { +func (wb *WriteBack) _newItem(id Handle, name string, size int64) *writeBackItem { wb.SetID(&id) wbItem := &writeBackItem{ name: name, + size: size, expiry: wb._newExpiry(), delay: time.Duration(wb.opt.WriteBack), id: id, @@ -256,13 +259,13 @@ func (wb *WriteBack) SetID(pid *Handle) { // // If modified is false then it it doesn't cancel a pending upload if // there is one as there is no need. -func (wb *WriteBack) Add(id Handle, name string, modified bool, putFn PutFn) Handle { +func (wb *WriteBack) Add(id Handle, name string, size int64, modified bool, putFn PutFn) Handle { wb.mu.Lock() defer wb.mu.Unlock() wbItem, ok := wb.lookup[id] if !ok { - wbItem = wb._newItem(id, name) + wbItem = wb._newItem(id, name, size) } else { if wbItem.uploading && modified { // We are uploading already so cancel the upload @@ -272,6 +275,7 @@ func (wb *WriteBack) Add(id Handle, name string, modified bool, putFn PutFn) Han wb.items._update(wbItem, wb._newExpiry()) } wbItem.putFn = putFn + wbItem.size = size wb._resetTimer() return wbItem.id } @@ -463,3 +467,70 @@ func (wb *WriteBack) Stats() (uploadsInProgress, uploadsQueued int) { defer wb.mu.Unlock() return wb.uploads, len(wb.items) } + +// QueueInfo is information about an item queued for upload, returned +// by Queue +type QueueInfo struct { + Name string `json:"name"` // name (full path) of the file, + ID Handle `json:"id"` // id of queue item + Size int64 `json:"size"` // integer size of the file in bytes + Expiry float64 `json:"expiry"` // seconds from now which the file is eligible for transfer, oldest goes first + Tries int `json:"tries"` // number of times we have tried to upload + Delay float64 `json:"delay"` // delay between upload attempts (s) + Uploading bool `json:"uploading"` // true if item is being uploaded +} + +// Queue return info about the current upload queue +func (wb *WriteBack) Queue() []QueueInfo { + wb.mu.Lock() + defer wb.mu.Unlock() + + items := make([]QueueInfo, 0, len(wb.lookup)) + now := time.Now() + + // Lookup all the items in no particular order + for _, wbItem := range wb.lookup { + items = append(items, QueueInfo{ + Name: wbItem.name, + ID: wbItem.id, + Size: wbItem.size, + Expiry: wbItem.expiry.Sub(now).Seconds(), + Tries: wbItem.tries, + Delay: wbItem.delay.Seconds(), + Uploading: wbItem.uploading, + }) + } + + // Sort by Uploading first then Expiry + sort.Slice(items, func(i, j int) bool { + if items[i].Uploading != items[j].Uploading { + return items[i].Uploading + } + return items[i].Expiry < items[j].Expiry + }) + + return items +} + +// ErrorIDNotFound is returned from SetExpiry when the item is not found +var ErrorIDNotFound = errors.New("id not found in queue") + +// SetExpiry sets the expiry time for an item in the writeback queue. +// +// id should be as returned from the Queue call +// +// If the item isn't found then it will return ErrorIDNotFound +func (wb *WriteBack) SetExpiry(id Handle, expiry time.Time) error { + wb.mu.Lock() + defer wb.mu.Unlock() + + wbItem, ok := wb.lookup[id] + if !ok { + return ErrorIDNotFound + } + + // Update the expiry with the user requested value + wb.items._update(wbItem, expiry) + wb._resetTimer() + return nil +} diff --git a/vfs/vfscache/writeback/writeback_test.go b/vfs/vfscache/writeback/writeback_test.go index 64645eccf..872d19e71 100644 --- a/vfs/vfscache/writeback/writeback_test.go +++ b/vfs/vfscache/writeback/writeback_test.go @@ -13,6 +13,7 @@ import ( "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func newTestWriteBack(t *testing.T) (wb *WriteBack, cancel func()) { @@ -122,15 +123,15 @@ func TestWriteBackItemCRUD(t *testing.T) { // _peekItem empty assert.Nil(t, wb._peekItem()) - wbItem1 := wb._newItem(0, "one") + wbItem1 := wb._newItem(0, "one", 10) checkOnHeap(t, wb, wbItem1) checkInLookup(t, wb, wbItem1) - wbItem2 := wb._newItem(0, "two") + wbItem2 := wb._newItem(0, "two", 10) checkOnHeap(t, wb, wbItem2) checkInLookup(t, wb, wbItem2) - wbItem3 := wb._newItem(0, "three") + wbItem3 := wb._newItem(0, "three", 10) checkOnHeap(t, wb, wbItem3) checkInLookup(t, wb, wbItem3) @@ -201,7 +202,7 @@ func TestWriteBackResetTimer(t *testing.T) { // Check timer is stopped assertTimerRunning(t, wb, false) - _ = wb._newItem(0, "three") + _ = wb._newItem(0, "three", 10) // Reset the timer on an queue with stuff wb._resetTimer() @@ -297,7 +298,7 @@ func TestWriteBackAddOK(t *testing.T) { wb.SetID(&inID) assert.Equal(t, Handle(1), inID) - id := wb.Add(inID, "one", true, pi.put) + id := wb.Add(inID, "one", 10, true, pi.put) assert.Equal(t, inID, id) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) @@ -321,7 +322,7 @@ func TestWriteBackAddFailRetry(t *testing.T) { pi := newPutItem(t) - id := wb.Add(0, "one", true, pi.put) + id := wb.Add(0, "one", 10, true, pi.put) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) @@ -354,8 +355,9 @@ func TestWriteBackAddUpdate(t *testing.T) { pi := newPutItem(t) - id := wb.Add(0, "one", true, pi.put) + id := wb.Add(0, "one", 10, true, pi.put) wbItem := wb.lookup[id] + assert.Equal(t, int64(10), wbItem.size) // check size checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) assert.Equal(t, "one", wb.string(t)) @@ -367,9 +369,10 @@ func TestWriteBackAddUpdate(t *testing.T) { // Now the upload has started add another one pi2 := newPutItem(t) - id2 := wb.Add(id, "one", true, pi2.put) + id2 := wb.Add(id, "one", 20, true, pi2.put) assert.Equal(t, id, id2) - checkOnHeap(t, wb, wbItem) // object awaiting writeback time + assert.Equal(t, int64(20), wbItem.size) // check size has changed + checkOnHeap(t, wb, wbItem) // object awaiting writeback time checkInLookup(t, wb, wbItem) // check the previous transfer was cancelled @@ -393,7 +396,7 @@ func TestWriteBackAddUpdateNotModified(t *testing.T) { pi := newPutItem(t) - id := wb.Add(0, "one", false, pi.put) + id := wb.Add(0, "one", 10, false, pi.put) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) @@ -406,7 +409,7 @@ func TestWriteBackAddUpdateNotModified(t *testing.T) { // Now the upload has started add another one pi2 := newPutItem(t) - id2 := wb.Add(id, "one", false, pi2.put) + id2 := wb.Add(id, "one", 10, false, pi2.put) assert.Equal(t, id, id2) checkNotOnHeap(t, wb, wbItem) // object still being transferred checkInLookup(t, wb, wbItem) @@ -432,7 +435,7 @@ func TestWriteBackAddUpdateNotStarted(t *testing.T) { pi := newPutItem(t) - id := wb.Add(0, "one", true, pi.put) + id := wb.Add(0, "one", 10, true, pi.put) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) @@ -441,7 +444,7 @@ func TestWriteBackAddUpdateNotStarted(t *testing.T) { // Immediately add another upload before the first has started pi2 := newPutItem(t) - id2 := wb.Add(id, "one", true, pi2.put) + id2 := wb.Add(id, "one", 10, true, pi2.put) assert.Equal(t, id, id2) checkOnHeap(t, wb, wbItem) // object still awaiting transfer checkInLookup(t, wb, wbItem) @@ -470,7 +473,7 @@ func TestWriteBackGetStats(t *testing.T) { pi := newPutItem(t) - wb.Add(0, "one", true, pi.put) + wb.Add(0, "one", 10, true, pi.put) inProgress, queued := wb.Stats() assert.Equal(t, queued, 1) @@ -491,6 +494,101 @@ func TestWriteBackGetStats(t *testing.T) { } +func TestWriteBackQueue(t *testing.T) { + wb, cancel := newTestWriteBack(t) + defer cancel() + + pi := newPutItem(t) + + id := wb.Add(0, "one", 10, true, pi.put) + + queue := wb.Queue() + require.Equal(t, 1, len(queue)) + assert.Greater(t, queue[0].Expiry, 0.0) + assert.Less(t, queue[0].Expiry, 1.0) + queue[0].Expiry = 0.0 + assert.Equal(t, []QueueInfo{ + { + Name: "one", + Size: 10, + Expiry: 0.0, + Tries: 0, + Delay: 0.1, + Uploading: false, + ID: id, + }, + }, queue) + + <-pi.started + + queue = wb.Queue() + require.Equal(t, 1, len(queue)) + assert.Less(t, queue[0].Expiry, 0.0) + assert.Greater(t, queue[0].Expiry, -1.0) + queue[0].Expiry = 0.0 + assert.Equal(t, []QueueInfo{ + { + Name: "one", + Size: 10, + Expiry: 0.0, + Tries: 1, + Delay: 0.1, + Uploading: true, + ID: id, + }, + }, queue) + + pi.finish(nil) // transfer successful + waitUntilNoTransfers(t, wb) + + queue = wb.Queue() + assert.Equal(t, []QueueInfo{}, queue) +} + +func TestWriteBackSetExpiry(t *testing.T) { + wb, cancel := newTestWriteBack(t) + defer cancel() + + err := wb.SetExpiry(123123123, time.Now()) + assert.Equal(t, ErrorIDNotFound, err) + + pi := newPutItem(t) + + id := wb.Add(0, "one", 10, true, pi.put) + wbItem := wb.lookup[id] + + // get the expiry time with locking so we don't cause races + getExpiry := func() time.Time { + wb.mu.Lock() + defer wb.mu.Unlock() + return wbItem.expiry + } + + expiry := time.Until(getExpiry()).Seconds() + assert.Greater(t, expiry, 0.0) + assert.Less(t, expiry, 1.0) + + newExpiry := time.Now().Add(100 * time.Second) + require.NoError(t, wb.SetExpiry(wbItem.id, newExpiry)) + assert.Equal(t, newExpiry, getExpiry()) + + // This starts the transfer + newExpiry = time.Now().Add(-100 * time.Second) + require.NoError(t, wb.SetExpiry(wbItem.id, newExpiry)) + assert.Equal(t, newExpiry, getExpiry()) + + <-pi.started + + expiry = time.Until(getExpiry()).Seconds() + assert.LessOrEqual(t, expiry, -100.0) + + pi.finish(nil) // transfer successful + waitUntilNoTransfers(t, wb) + + expiry = time.Until(getExpiry()).Seconds() + assert.LessOrEqual(t, expiry, -100.0) +} + // Test queuing more than fs.Config.Transfers func TestWriteBackMaxQueue(t *testing.T) { ctx := context.Background() @@ -506,7 +604,7 @@ func TestWriteBackMaxQueue(t *testing.T) { for i := 0; i < toTransfer; i++ { pi := newPutItem(t) pis = append(pis, pi) - wb.Add(0, fmt.Sprintf("number%d", 1), true, pi.put) + wb.Add(0, fmt.Sprintf("number%d", 1), 10, true, pi.put) } inProgress, queued := wb.Stats() @@ -551,7 +649,7 @@ func TestWriteBackRename(t *testing.T) { // add item pi1 := newPutItem(t) - id := wb.Add(0, "one", true, pi1.put) + id := wb.Add(0, "one", 10, true, pi1.put) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) @@ -566,7 +664,7 @@ func TestWriteBackRename(t *testing.T) { // add item pi2 := newPutItem(t) - id = wb.Add(id, "two", true, pi2.put) + id = wb.Add(id, "two", 10, true, pi2.put) wbItem = wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) @@ -591,9 +689,9 @@ func TestWriteBackRenameDuplicates(t *testing.T) { wb, cancel := newTestWriteBack(t) defer cancel() - // add item "one" + // add item "one", 10 pi1 := newPutItem(t) - id1 := wb.Add(0, "one", true, pi1.put) + id1 := wb.Add(0, "one", 10, true, pi1.put) wbItem1 := wb.lookup[id1] checkOnHeap(t, wb, wbItem1) checkInLookup(t, wb, wbItem1) @@ -605,7 +703,7 @@ func TestWriteBackRenameDuplicates(t *testing.T) { // add item "two" pi2 := newPutItem(t) - id2 := wb.Add(0, "two", true, pi2.put) + id2 := wb.Add(0, "two", 10, true, pi2.put) wbItem2 := wb.lookup[id2] checkOnHeap(t, wb, wbItem2) checkInLookup(t, wb, wbItem2) @@ -641,7 +739,7 @@ func TestWriteBackCancelUpload(t *testing.T) { // add item pi := newPutItem(t) - id := wb.Add(0, "one", true, pi.put) + id := wb.Add(0, "one", 10, true, pi.put) wbItem := wb.lookup[id] checkOnHeap(t, wb, wbItem) checkInLookup(t, wb, wbItem) diff --git a/vfs/vfstest/fs.go b/vfs/vfstest/fs.go index 97436d38e..1c74f54dc 100644 --- a/vfs/vfstest/fs.go +++ b/vfs/vfstest/fs.go @@ -8,7 +8,6 @@ import ( "flag" "fmt" "io" - "log" "os" "os/exec" "path" @@ -69,7 +68,7 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach if test.writeBack > 0 { what += fmt.Sprintf(",WriteBack=%v", test.writeBack) } - log.Printf("Starting test run with %s", what) + fs.Logf(nil, "Starting test run with %s", what) ok := t.Run(what, func(t *testing.T) { t.Run("TestTouchAndDelete", TestTouchAndDelete) t.Run("TestRenameOpenHandle", TestRenameOpenHandle) @@ -100,7 +99,7 @@ func RunTests(t *testing.T, useVFS bool, minimumRequiredCacheMode vfscommon.Cach t.Run("TestWriteFileDup", TestWriteFileDup) t.Run("TestWriteFileAppend", TestWriteFileAppend) }) - log.Printf("Finished test run with %s (ok=%v)", what, ok) + fs.Logf(nil, "Finished test run with %s (ok=%v)", what, ok) run.Finalise() if !ok { break @@ -146,12 +145,12 @@ func newRun(useVFS bool, vfsOpt *vfscommon.Options, mountFn mountlib.MountFn) *R var err error r.fremote, r.fremoteName, r.cleanRemote, err = fstest.RandomRemote() if err != nil { - log.Fatalf("Failed to open remote %q: %v", *fstest.RemoteName, err) + fs.Fatalf(nil, "Failed to open remote %q: %v", *fstest.RemoteName, err) } err = r.fremote.Mkdir(context.Background(), "") if err != nil { - log.Fatalf("Failed to open mkdir %q: %v", *fstest.RemoteName, err) + fs.Fatalf(nil, "Failed to open mkdir %q: %v", *fstest.RemoteName, err) } r.startMountSubProcess() @@ -176,14 +175,14 @@ func (r *Run) Finalise() { r.sendMountCommand("exit") _, err := r.cmd.Process.Wait() if err != nil { - log.Fatalf("mount sub process failed: %v", err) + fs.Fatalf(nil, "mount sub process failed: %v", err) } } r.cleanRemote() if !r.useVFS { err := os.RemoveAll(r.mountPath) if err != nil { - log.Printf("Failed to clean mountPath %q: %v", r.mountPath, err) + fs.Logf(nil, "Failed to clean mountPath %q: %v", r.mountPath, err) } } } diff --git a/vfs/vfstest/submount.go b/vfs/vfstest/submount.go index d8e8fb6ff..ac6400cdf 100644 --- a/vfs/vfstest/submount.go +++ b/vfs/vfstest/submount.go @@ -7,7 +7,6 @@ import ( "flag" "fmt" "io" - "log" "os" "os/exec" "runtime" @@ -48,7 +47,7 @@ func (r *Run) startMountSubProcess() { } r.os = realOs{} r.mountPath = findMountPath() - log.Printf("startMountSubProcess %q (%q) %q", r.fremote, r.fremoteName, r.mountPath) + fs.Logf(nil, "startMountSubProcess %q (%q) %q", r.fremote, r.fremoteName, r.mountPath) opt := runMountOpt{ MountPoint: r.mountPath, @@ -59,7 +58,7 @@ func (r *Run) startMountSubProcess() { opts, err := json.Marshal(&opt) if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } // Re-run this executable with a new option -run-mount @@ -68,32 +67,32 @@ func (r *Run) startMountSubProcess() { r.cmd.Stderr = os.Stderr r.out, err = r.cmd.StdinPipe() if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } r.in, err = r.cmd.StdoutPipe() if err != nil { - log.Fatal(err) + fs.Fatal(nil, fmt.Sprint(err)) } err = r.cmd.Start() if err != nil { - log.Fatal("startMountSubProcess failed", err) + fs.Fatal(nil, fmt.Sprint("startMountSubProcess failed", err)) } r.scanner = bufio.NewScanner(r.in) // Wait it for startup - log.Print("Waiting for mount to start") + fs.Log(nil, "Waiting for mount to start") for r.scanner.Scan() { rx := strings.TrimSpace(r.scanner.Text()) if rx == "STARTED" { break } - log.Printf("..Mount said: %s", rx) + fs.Logf(nil, "..Mount said: %s", rx) } if r.scanner.Err() != nil { - log.Printf("scanner err %v", r.scanner.Err()) + fs.Logf(nil, "scanner err %v", r.scanner.Err()) } - log.Printf("startMountSubProcess: end") + fs.Logf(nil, "startMountSubProcess: end") } // Find a free path to run the mount on @@ -101,7 +100,7 @@ func findMountPath() string { if runtime.GOOS != "windows" { mountPath, err := os.MkdirTemp("", "rclonefs-mount") if err != nil { - log.Fatalf("Failed to create mount dir: %v", err) + fs.Fatalf(nil, "Failed to create mount dir: %v", err) } return mountPath } @@ -110,7 +109,7 @@ func findMountPath() string { letter := file.FindUnusedDriveLetter() drive := "" if letter == 0 { - log.Fatalf("Couldn't find free drive letter for test") + fs.Fatalf(nil, "Couldn't find free drive letter for test") } else { drive = string(letter) + ":" } @@ -128,36 +127,36 @@ func isSubProcess() bool { // It reads commands from standard input and writes results to // standard output. func startMount(mountFn mountlib.MountFn, useVFS bool, opts string) { - log.Print("startMount") + fs.Log(nil, "startMount") ctx := context.Background() var opt runMountOpt err := json.Unmarshal([]byte(opts), &opt) if err != nil { - log.Fatalf("Unmarshal failed: %v", err) + fs.Fatalf(nil, "Unmarshal failed: %v", err) } fstest.Initialise() f, err := cache.Get(ctx, opt.Remote) if err != nil { - log.Fatalf("Failed to open remote %q: %v", opt.Remote, err) + fs.Fatalf(nil, "Failed to open remote %q: %v", opt.Remote, err) } err = f.Mkdir(ctx, "") if err != nil { - log.Fatalf("Failed to mkdir %q: %v", opt.Remote, err) + fs.Fatalf(nil, "Failed to mkdir %q: %v", opt.Remote, err) } - log.Printf("startMount: Mounting %q on %q with %q", opt.Remote, opt.MountPoint, opt.VFSOpt.CacheMode) + fs.Logf(nil, "startMount: Mounting %q on %q with %q", opt.Remote, opt.MountPoint, opt.VFSOpt.CacheMode) mnt := mountlib.NewMountPoint(mountFn, opt.MountPoint, f, &opt.MountOpt, &opt.VFSOpt) _, err = mnt.Mount() if err != nil { - log.Fatalf("mount FAILED %q: %v", opt.Remote, err) + fs.Fatalf(nil, "mount FAILED %q: %v", opt.Remote, err) } defer umount(mnt) - log.Printf("startMount: mount OK") + fs.Logf(nil, "startMount: mount OK") fmt.Println("STARTED") // signal to parent all is good // Read commands from stdin @@ -172,7 +171,7 @@ func startMount(mountFn mountlib.MountFn, useVFS bool, opts string) { err = scanner.Err() if err != nil { - log.Fatalf("scanner failed %q: %v", opt.Remote, err) + fs.Fatalf(nil, "scanner failed %q: %v", opt.Remote, err) } } @@ -221,17 +220,17 @@ func (r *Run) sendMountCommand(args ...string) { } else { _, err := io.WriteString(r.out, tx+"\n") if err != nil { - log.Fatalf("WriteString err %v", err) + fs.Fatalf(nil, "WriteString err %v", err) } if !r.scanner.Scan() { - log.Fatalf("Mount has gone away") + fs.Fatalf(nil, "Mount has gone away") } rx = strings.Trim(r.scanner.Text(), "\r\n") } in := strings.Split(rx, "\t") // log.Printf("Answer is %q", in) if in[0] != "OK" { - log.Fatalf("Error from mount: %q", in[1:]) + fs.Fatalf(nil, "Error from mount: %q", in[1:]) } } @@ -254,25 +253,25 @@ func umount(mnt *mountlib.MountPoint) { log.Printf("fusermount failed: %v", err) } */ - log.Printf("Unmounting %q", mnt.MountPoint) + fs.Logf(nil, "Unmounting %q", mnt.MountPoint) err := mnt.Unmount() if err != nil { - log.Printf("signal to umount failed - retrying: %v", err) + fs.Logf(nil, "signal to umount failed - retrying: %v", err) time.Sleep(3 * time.Second) err = mnt.Unmount() } if err != nil { - log.Fatalf("signal to umount failed: %v", err) + fs.Fatalf(nil, "signal to umount failed: %v", err) } - log.Printf("Waiting for umount") + fs.Logf(nil, "Waiting for umount") err = <-mnt.ErrChan if err != nil { - log.Fatalf("umount failed: %v", err) + fs.Fatalf(nil, "umount failed: %v", err) } // Cleanup the VFS cache - umount has called Shutdown err = mnt.VFS.CleanUp() if err != nil { - log.Printf("Failed to cleanup the VFS cache: %v", err) + fs.Logf(nil, "Failed to cleanup the VFS cache: %v", err) } }