mirror of
https://github.com/rclone/rclone.git
synced 2024-11-24 17:34:57 +01:00
Merge branch 'master' into feat/extend-docker-volume-plugin-options
This commit is contained in:
commit
25a02a253a
7
.github/workflows/build.yml
vendored
7
.github/workflows/build.yml
vendored
@ -17,12 +17,11 @@ on:
|
|||||||
manual:
|
manual:
|
||||||
description: Manual run (bypass default conditions)
|
description: Manual run (bypass default conditions)
|
||||||
type: boolean
|
type: boolean
|
||||||
required: true
|
|
||||||
default: true
|
default: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 60
|
timeout-minutes: 60
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
@ -217,7 +216,7 @@ jobs:
|
|||||||
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
if: env.RCLONE_CONFIG_PASS != '' && matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "lint"
|
name: "lint"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -296,7 +295,7 @@ jobs:
|
|||||||
run: govulncheck ./...
|
run: govulncheck ./...
|
||||||
|
|
||||||
android:
|
android:
|
||||||
if: ${{ github.event.inputs.manual == 'true' || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name)) }}
|
if: inputs.manual || (github.repository == 'rclone/rclone' && (github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name))
|
||||||
timeout-minutes: 30
|
timeout-minutes: 30
|
||||||
name: "android-all"
|
name: "android-all"
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -32,15 +32,27 @@ jobs:
|
|||||||
- name: Get actual major version
|
- name: Get actual major version
|
||||||
id: actual_major_version
|
id: actual_major_version
|
||||||
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
run: echo ::set-output name=ACTUAL_MAJOR_VERSION::$(echo $GITHUB_REF | cut -d / -f 3 | sed 's/v//g' | cut -d "." -f 1)
|
||||||
- name: Build and publish image
|
- name: Set up QEMU
|
||||||
uses: ilteoood/docker_buildx@1.1.0
|
uses: docker/setup-qemu-action@v3
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Login to Docker Hub
|
||||||
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
tag: latest,${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }},${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }},${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
username: ${{ secrets.DOCKER_HUB_USER }}
|
||||||
imageName: rclone/rclone
|
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
||||||
platform: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
- name: Build and publish image
|
||||||
publish: true
|
uses: docker/build-push-action@v6
|
||||||
dockerHubUser: ${{ secrets.DOCKER_HUB_USER }}
|
with:
|
||||||
dockerHubPassword: ${{ secrets.DOCKER_HUB_PASSWORD }}
|
file: Dockerfile
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
rclone/rclone:latest
|
||||||
|
rclone/rclone:${{ steps.actual_patch_version.outputs.ACTUAL_PATCH_VERSION }}
|
||||||
|
rclone/rclone:${{ steps.actual_minor_version.outputs.ACTUAL_MINOR_VERSION }}
|
||||||
|
rclone/rclone:${{ steps.actual_major_version.outputs.ACTUAL_MAJOR_VERSION }}
|
||||||
|
|
||||||
build_docker_volume_plugin:
|
build_docker_volume_plugin:
|
||||||
if: github.repository == 'rclone/rclone'
|
if: github.repository == 'rclone/rclone'
|
||||||
|
@ -100,10 +100,45 @@ linters-settings:
|
|||||||
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
# as documented here: https://staticcheck.io/docs/configuration/options/#checks
|
||||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-ST1023"]
|
||||||
gocritic:
|
gocritic:
|
||||||
disabled-checks:
|
# Enable all default checks with some exceptions and some additions (commented).
|
||||||
- appendAssign
|
# Cannot use both enabled-checks and disabled-checks, so must specify all to be used.
|
||||||
- captLocal
|
disable-all: true
|
||||||
- commentFormatting
|
enabled-checks:
|
||||||
- exitAfterDefer
|
#- appendAssign # Enabled by default
|
||||||
- ifElseChain
|
- argOrder
|
||||||
- singleCaseSwitch
|
- assignOp
|
||||||
|
- badCall
|
||||||
|
- badCond
|
||||||
|
#- captLocal # Enabled by default
|
||||||
|
- caseOrder
|
||||||
|
- codegenComment
|
||||||
|
#- commentFormatting # Enabled by default
|
||||||
|
- defaultCaseOrder
|
||||||
|
- deprecatedComment
|
||||||
|
- dupArg
|
||||||
|
- dupBranchBody
|
||||||
|
- dupCase
|
||||||
|
- dupSubExpr
|
||||||
|
- elseif
|
||||||
|
#- exitAfterDefer # Enabled by default
|
||||||
|
- flagDeref
|
||||||
|
- flagName
|
||||||
|
#- ifElseChain # Enabled by default
|
||||||
|
- mapKey
|
||||||
|
- newDeref
|
||||||
|
- offBy1
|
||||||
|
- regexpMust
|
||||||
|
- ruleguard # Not enabled by default
|
||||||
|
#- singleCaseSwitch # Enabled by default
|
||||||
|
- sloppyLen
|
||||||
|
- sloppyTypeAssert
|
||||||
|
- switchTrue
|
||||||
|
- typeSwitchVar
|
||||||
|
- underef
|
||||||
|
- unlambda
|
||||||
|
- unslice
|
||||||
|
- valSwap
|
||||||
|
- wrapperFunc
|
||||||
|
settings:
|
||||||
|
ruleguard:
|
||||||
|
rules: "${configDir}/bin/rules.go"
|
||||||
|
@ -490,7 +490,7 @@ alphabetical order of full name of remote (e.g. `drive` is ordered as
|
|||||||
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
- `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||||
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
- make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||||
- update them in your backend with `bin/make_backend_docs.py remote`
|
- update them in your backend with `bin/make_backend_docs.py remote`
|
||||||
- `docs/content/overview.md` - overview docs
|
- `docs/content/overview.md` - overview docs - add an entry into the Features table and the Optional Features table.
|
||||||
- `docs/content/docs.md` - list of remotes in config section
|
- `docs/content/docs.md` - list of remotes in config section
|
||||||
- `docs/content/_index.md` - front page of rclone.org
|
- `docs/content/_index.md` - front page of rclone.org
|
||||||
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
- `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||||
|
3982
MANUAL.html
generated
3982
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
3810
MANUAL.txt
generated
3810
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
6
Makefile
6
Makefile
@ -144,10 +144,14 @@ MANUAL.txt: MANUAL.md
|
|||||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||||
|
|
||||||
commanddocs: rclone
|
commanddocs: rclone
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||||
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
|
|
||||||
backenddocs: rclone bin/make_backend_docs.py
|
backenddocs: rclone bin/make_backend_docs.py
|
||||||
|
-@rmdir -p '$$HOME/.config/rclone'
|
||||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||||
|
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||||
|
|
||||||
rcdocs: rclone
|
rcdocs: rclone
|
||||||
bin/make_rc_docs.sh
|
bin/make_rc_docs.sh
|
||||||
|
@ -62,9 +62,11 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||||
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
* HDFS (Hadoop Distributed Filesystem) [:page_facing_up:](https://rclone.org/hdfs/)
|
||||||
|
* Hetzner Storage Box [:page_facing_up:](https://rclone.org/sftp/#hetzner-storage-box)
|
||||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||||
|
* iCloud Drive [:page_facing_up:](https://rclone.org/iclouddrive/)
|
||||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||||
@ -91,6 +93,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
* OpenStack Swift [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
* Oracle Object Storage [:page_facing_up:](https://rclone.org/oracleobjectstorage/)
|
||||||
|
* Outscale [:page_facing_up:](https://rclone.org/s3/#outscale)
|
||||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||||
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
* Petabox [:page_facing_up:](https://rclone.org/s3/#petabox)
|
||||||
@ -104,6 +107,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
|||||||
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
* Quatrix [:page_facing_up:](https://rclone.org/quatrix/)
|
||||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||||
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
* RackCorp Object Storage [:page_facing_up:](https://rclone.org/s3/#RackCorp)
|
||||||
|
* rsync.net [:page_facing_up:](https://rclone.org/sftp/#rsync-net)
|
||||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||||
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
* Seafile [:page_facing_up:](https://rclone.org/seafile/)
|
||||||
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
* SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs)
|
||||||
|
@ -168,6 +168,8 @@ docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/a
|
|||||||
|
|
||||||
To make a full build then set the tags correctly and add `--push`
|
To make a full build then set the tags correctly and add `--push`
|
||||||
|
|
||||||
|
Note that you can't only build one architecture - you need to build them all.
|
||||||
|
|
||||||
```
|
```
|
||||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||||
```
|
```
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
_ "github.com/rclone/rclone/backend/hdfs"
|
_ "github.com/rclone/rclone/backend/hdfs"
|
||||||
_ "github.com/rclone/rclone/backend/hidrive"
|
_ "github.com/rclone/rclone/backend/hidrive"
|
||||||
_ "github.com/rclone/rclone/backend/http"
|
_ "github.com/rclone/rclone/backend/http"
|
||||||
|
_ "github.com/rclone/rclone/backend/iclouddrive"
|
||||||
_ "github.com/rclone/rclone/backend/imagekit"
|
_ "github.com/rclone/rclone/backend/imagekit"
|
||||||
_ "github.com/rclone/rclone/backend/internetarchive"
|
_ "github.com/rclone/rclone/backend/internetarchive"
|
||||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||||
|
@ -209,6 +209,22 @@ rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
|
|||||||
keys instead of setting ` + "`service_principal_file`" + `.
|
keys instead of setting ` + "`service_principal_file`" + `.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_instance_discovery",
|
||||||
|
Help: `Skip requesting Microsoft Entra instance metadata
|
||||||
|
|
||||||
|
This should be set true only by applications authenticating in
|
||||||
|
disconnected clouds, or private clouds such as Azure Stack.
|
||||||
|
|
||||||
|
It determines whether rclone requests Microsoft Entra instance
|
||||||
|
metadata from ` + "`https://login.microsoft.com/`" + ` before
|
||||||
|
authenticating.
|
||||||
|
|
||||||
|
Setting this to true will skip this request, making you responsible
|
||||||
|
for ensuring the configured authority is valid and trustworthy.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "use_msi",
|
Name: "use_msi",
|
||||||
Help: `Use a managed service identity to authenticate (only works in Azure).
|
Help: `Use a managed service identity to authenticate (only works in Azure).
|
||||||
@ -243,6 +259,20 @@ msi_client_id, or msi_mi_res_id parameters.`,
|
|||||||
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
Help: "Uses local storage emulator if provided as 'true'.\n\nLeave blank if using real azure storage endpoint.",
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_az",
|
||||||
|
Help: `Use Azure CLI tool az for authentication
|
||||||
|
|
||||||
|
Set to use the [Azure CLI tool az](https://learn.microsoft.com/en-us/cli/azure/)
|
||||||
|
as the sole means of authentication.
|
||||||
|
|
||||||
|
Setting this can be useful if you wish to use the az CLI on a host with
|
||||||
|
a System Managed Identity that you do not want to use.
|
||||||
|
|
||||||
|
Don't set env_auth at the same time.
|
||||||
|
`,
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
Help: "Endpoint for the service.\n\nLeave blank normally.",
|
||||||
@ -438,10 +468,12 @@ type Options struct {
|
|||||||
Username string `config:"username"`
|
Username string `config:"username"`
|
||||||
Password string `config:"password"`
|
Password string `config:"password"`
|
||||||
ServicePrincipalFile string `config:"service_principal_file"`
|
ServicePrincipalFile string `config:"service_principal_file"`
|
||||||
|
DisableInstanceDiscovery bool `config:"disable_instance_discovery"`
|
||||||
UseMSI bool `config:"use_msi"`
|
UseMSI bool `config:"use_msi"`
|
||||||
MSIObjectID string `config:"msi_object_id"`
|
MSIObjectID string `config:"msi_object_id"`
|
||||||
MSIClientID string `config:"msi_client_id"`
|
MSIClientID string `config:"msi_client_id"`
|
||||||
MSIResourceID string `config:"msi_mi_res_id"`
|
MSIResourceID string `config:"msi_mi_res_id"`
|
||||||
|
UseAZ bool `config:"use_az"`
|
||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
UploadConcurrency int `config:"upload_concurrency"`
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
@ -725,7 +757,8 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
// Read credentials from the environment
|
// Read credentials from the environment
|
||||||
options := azidentity.DefaultAzureCredentialOptions{
|
options := azidentity.DefaultAzureCredentialOptions{
|
||||||
ClientOptions: policyClientOptions,
|
ClientOptions: policyClientOptions,
|
||||||
|
DisableInstanceDiscovery: opt.DisableInstanceDiscovery,
|
||||||
}
|
}
|
||||||
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
cred, err = azidentity.NewDefaultAzureCredential(&options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -875,6 +908,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
|
||||||
}
|
}
|
||||||
|
case opt.UseAZ:
|
||||||
|
var options = azidentity.AzureCLICredentialOptions{}
|
||||||
|
cred, err = azidentity.NewAzureCLICredential(&options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create Azure CLI credentials: %w", err)
|
||||||
|
}
|
||||||
case opt.Account != "":
|
case opt.Account != "":
|
||||||
// Anonymous access
|
// Anonymous access
|
||||||
anonymous = true
|
anonymous = true
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/jwtutil"
|
"github.com/rclone/rclone/lib/jwtutil"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"github.com/youmark/pkcs8"
|
"github.com/youmark/pkcs8"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@ -256,7 +257,6 @@ func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||||
|
|
||||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||||
if len(rest) > 0 {
|
if len(rest) > 0 {
|
||||||
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
return nil, fmt.Errorf("box: extra data included in private key: %w", err)
|
||||||
@ -619,7 +619,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//fmt.Printf("...Error %v\n", err)
|
// fmt.Printf("...Error %v\n", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
// fmt.Printf("...Id %q\n", *info.Id)
|
// fmt.Printf("...Id %q\n", *info.Id)
|
||||||
@ -966,6 +966,26 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check if dest already exists
|
||||||
|
item, err := f.preUploadCheck(ctx, leaf, directoryID, src.Size())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if item != nil { // dest already exists, need to copy to temp name and then move
|
||||||
|
tempSuffix := "-rclone-copy-" + random.String(8)
|
||||||
|
fs.Debugf(remote, "dst already exists, copying to temp name %v", remote+tempSuffix)
|
||||||
|
tempObj, err := f.Copy(ctx, src, remote+tempSuffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fs.Debugf(remote+tempSuffix, "moving to real name %v", remote)
|
||||||
|
err = f.deleteObject(ctx, item.ID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.Move(ctx, tempObj, remote)
|
||||||
|
}
|
||||||
|
|
||||||
// Copy the object
|
// Copy the object
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
55
backend/cache/cache_internal_test.go
vendored
55
backend/cache/cache_internal_test.go
vendored
@ -10,7 +10,6 @@ import (
|
|||||||
goflag "flag"
|
goflag "flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -93,7 +92,7 @@ func TestMain(m *testing.M) {
|
|||||||
goflag.Parse()
|
goflag.Parse()
|
||||||
var rc int
|
var rc int
|
||||||
|
|
||||||
log.Printf("Running with the following params: \n remote: %v", remoteName)
|
fs.Logf(nil, "Running with the following params: \n remote: %v", remoteName)
|
||||||
runInstance = newRun()
|
runInstance = newRun()
|
||||||
rc = m.Run()
|
rc = m.Run()
|
||||||
os.Exit(rc)
|
os.Exit(rc)
|
||||||
@ -408,7 +407,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
// update in the wrapped fs
|
// update in the wrapped fs
|
||||||
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
originalSize, err := runInstance.size(t, rootFs, "data.bin")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("original size: %v", originalSize)
|
fs.Logf(nil, "original size: %v", originalSize)
|
||||||
|
|
||||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -425,7 +424,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
|||||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(data2)), o.Size())
|
require.Equal(t, int64(len(data2)), o.Size())
|
||||||
log.Printf("updated size: %v", len(data2))
|
fs.Logf(nil, "updated size: %v", len(data2))
|
||||||
|
|
||||||
// get a new instance from the cache
|
// get a new instance from the cache
|
||||||
if runInstance.wrappedIsExternal {
|
if runInstance.wrappedIsExternal {
|
||||||
@ -485,49 +484,49 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
li, err := runInstance.list(t, rootFs, "test")
|
li, err := runInstance.list(t, rootFs, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 2 {
|
if len(li) != 2 {
|
||||||
log.Printf("not expected listing /test: %v", li)
|
fs.Logf(nil, "not expected listing /test: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test: %v", li)
|
return fmt.Errorf("not expected listing /test: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/one")
|
li, err = runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 0 {
|
if len(li) != 0 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
li, err = runInstance.list(t, rootFs, "test/second")
|
li, err = runInstance.list(t, rootFs, "test/second")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/second: %v", li)
|
fs.Logf(nil, "not expected listing /test/second: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/second: %v", li)
|
return fmt.Errorf("not expected listing /test/second: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "data.bin" {
|
if fi.Name() != "data.bin" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/second/data.bin" {
|
if di.Remote() != "test/second/data.bin" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
fs.Logf(nil, "unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("complete listing: %v", li)
|
fs.Logf(nil, "complete listing: %v", li)
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -577,43 +576,43 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
|||||||
err = runInstance.retryBlock(func() error {
|
err = runInstance.retryBlock(func() error {
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test")
|
fs.Logf(nil, "not found /test")
|
||||||
return fmt.Errorf("not found /test")
|
return fmt.Errorf("not found /test")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one")
|
fs.Logf(nil, "not found /test/one")
|
||||||
return fmt.Errorf("not found /test/one")
|
return fmt.Errorf("not found /test/one")
|
||||||
}
|
}
|
||||||
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
found = boltDb.HasEntry(path.Join(cfs.Root(), runInstance.encryptRemoteIfNeeded(t, "test"), runInstance.encryptRemoteIfNeeded(t, "one"), runInstance.encryptRemoteIfNeeded(t, "test2")))
|
||||||
if !found {
|
if !found {
|
||||||
log.Printf("not found /test/one/test2")
|
fs.Logf(nil, "not found /test/one/test2")
|
||||||
return fmt.Errorf("not found /test/one/test2")
|
return fmt.Errorf("not found /test/one/test2")
|
||||||
}
|
}
|
||||||
li, err := runInstance.list(t, rootFs, "test/one")
|
li, err := runInstance.list(t, rootFs, "test/one")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("err: %v", err)
|
fs.Logf(nil, "err: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(li) != 1 {
|
if len(li) != 1 {
|
||||||
log.Printf("not expected listing /test/one: %v", li)
|
fs.Logf(nil, "not expected listing /test/one: %v", li)
|
||||||
return fmt.Errorf("not expected listing /test/one: %v", li)
|
return fmt.Errorf("not expected listing /test/one: %v", li)
|
||||||
}
|
}
|
||||||
if fi, ok := li[0].(os.FileInfo); ok {
|
if fi, ok := li[0].(os.FileInfo); ok {
|
||||||
if fi.Name() != "test2" {
|
if fi.Name() != "test2" {
|
||||||
log.Printf("not expected name: %v", fi.Name())
|
fs.Logf(nil, "not expected name: %v", fi.Name())
|
||||||
return fmt.Errorf("not expected name: %v", fi.Name())
|
return fmt.Errorf("not expected name: %v", fi.Name())
|
||||||
}
|
}
|
||||||
} else if di, ok := li[0].(fs.DirEntry); ok {
|
} else if di, ok := li[0].(fs.DirEntry); ok {
|
||||||
if di.Remote() != "test/one/test2" {
|
if di.Remote() != "test/one/test2" {
|
||||||
log.Printf("not expected remote: %v", di.Remote())
|
fs.Logf(nil, "not expected remote: %v", di.Remote())
|
||||||
return fmt.Errorf("not expected remote: %v", di.Remote())
|
return fmt.Errorf("not expected remote: %v", di.Remote())
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Printf("unexpected listing: %v", li)
|
fs.Logf(nil, "unexpected listing: %v", li)
|
||||||
return fmt.Errorf("unexpected listing: %v", li)
|
return fmt.Errorf("unexpected listing: %v", li)
|
||||||
}
|
}
|
||||||
log.Printf("complete listing /test/one/test2")
|
fs.Logf(nil, "complete listing /test/one/test2")
|
||||||
return nil
|
return nil
|
||||||
}, 12, time.Second*10)
|
}, 12, time.Second*10)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -771,24 +770,24 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
|
|
||||||
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err := runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
time.Sleep(time.Second * 30)
|
time.Sleep(time.Second * 30)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
di, err = runInstance.list(t, rootFs, "test/dir1/dir2")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 1)
|
require.Len(t, di, 1)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test/dir1")
|
di, err = runInstance.list(t, rootFs, "test/dir1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
|
|
||||||
di, err = runInstance.list(t, rootFs, "test")
|
di, err = runInstance.list(t, rootFs, "test")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
log.Printf("len: %v", len(di))
|
fs.Logf(nil, "len: %v", len(di))
|
||||||
require.Len(t, di, 4)
|
require.Len(t, di, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -829,7 +828,7 @@ func newRun() *run {
|
|||||||
} else {
|
} else {
|
||||||
r.tmpUploadDir = uploadDir
|
r.tmpUploadDir = uploadDir
|
||||||
}
|
}
|
||||||
log.Printf("Temp Upload Dir: %v", r.tmpUploadDir)
|
fs.Logf(nil, "Temp Upload Dir: %v", r.tmpUploadDir)
|
||||||
|
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -120,6 +120,7 @@ var (
|
|||||||
"text/html": ".html",
|
"text/html": ".html",
|
||||||
"text/plain": ".txt",
|
"text/plain": ".txt",
|
||||||
"text/tab-separated-values": ".tsv",
|
"text/tab-separated-values": ".tsv",
|
||||||
|
"text/markdown": ".md",
|
||||||
}
|
}
|
||||||
_mimeTypeToExtensionLinks = map[string]string{
|
_mimeTypeToExtensionLinks = map[string]string{
|
||||||
"application/x-link-desktop": ".desktop",
|
"application/x-link-desktop": ".desktop",
|
||||||
|
@ -95,7 +95,7 @@ func TestInternalParseExtensions(t *testing.T) {
|
|||||||
wantErr error
|
wantErr error
|
||||||
}{
|
}{
|
||||||
{"doc", []string{".doc"}, nil},
|
{"doc", []string{".doc"}, nil},
|
||||||
{" docx ,XLSX, pptx,svg", []string{".docx", ".xlsx", ".pptx", ".svg"}, nil},
|
{" docx ,XLSX, pptx,svg,md", []string{".docx", ".xlsx", ".pptx", ".svg", ".md"}, nil},
|
||||||
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
{"docx,svg,Docx", []string{".docx", ".svg"}, nil},
|
||||||
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
{"docx,potato,docx", []string{".docx"}, errors.New(`couldn't find MIME type for extension ".potato"`)},
|
||||||
} {
|
} {
|
||||||
|
@ -386,7 +386,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
oldToken = strings.TrimSpace(oldToken)
|
oldToken = strings.TrimSpace(oldToken)
|
||||||
if ok && oldToken != "" && oldToken[0] != '{' {
|
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||||
fs.Infof(name, "Converting token to new format")
|
fs.Infof(name, "Converting token to new format")
|
||||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":%q,"token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
return nil, fmt.Errorf("NewFS convert token: %w", err)
|
||||||
|
@ -61,7 +61,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
return false, err // No such user
|
return false, err // No such user
|
||||||
case 186:
|
case 186:
|
||||||
return false, err // IP blocked?
|
return false, err // IP blocked?
|
||||||
case 374:
|
case 374, 412: // Flood detected seems to be #412 now
|
||||||
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
fs.Debugf(nil, "Sleeping for 30 seconds due to: %v", err)
|
||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
default:
|
default:
|
||||||
|
@ -441,23 +441,28 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't move - not same remote type")
|
fs.Debugf(src, "Can't move - not same remote type")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
srcFs := srcObj.fs
|
||||||
|
|
||||||
// Find current directory ID
|
// Find current directory ID
|
||||||
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
srcLeaf, srcDirectoryID, err := srcFs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If it is in the correct directory, just rename it
|
// If it is in the correct directory, just rename it
|
||||||
var url string
|
var url string
|
||||||
if currentDirectoryID == directoryID {
|
if srcDirectoryID == dstDirectoryID {
|
||||||
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
// No rename needed
|
||||||
|
if srcLeaf == dstLeaf {
|
||||||
|
return src, nil
|
||||||
|
}
|
||||||
|
resp, err := f.renameFile(ctx, srcObj.file.URL, dstLeaf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
||||||
}
|
}
|
||||||
@ -466,11 +471,16 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
url = resp.URLs[0].URL
|
url = resp.URLs[0].URL
|
||||||
} else {
|
} else {
|
||||||
folderID, err := strconv.Atoi(directoryID)
|
dstFolderID, err := strconv.Atoi(dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
rename := dstLeaf
|
||||||
|
// No rename needed
|
||||||
|
if srcLeaf == dstLeaf {
|
||||||
|
rename = ""
|
||||||
|
}
|
||||||
|
resp, err := f.moveFile(ctx, srcObj.file.URL, dstFolderID, rename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't move file: %w", err)
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -247,18 +247,16 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
|
|
||||||
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
func newClientConfig(ctx context.Context, opt *Options) (config files_sdk.Config, err error) {
|
||||||
if opt.Site != "" {
|
if opt.Site != "" {
|
||||||
config.Subdomain = opt.Site
|
if strings.Contains(opt.Site, ".") {
|
||||||
|
|
||||||
_, err = url.Parse(config.Endpoint())
|
|
||||||
if err != nil {
|
|
||||||
config.Subdomain = ""
|
|
||||||
config.EndpointOverride = opt.Site
|
config.EndpointOverride = opt.Site
|
||||||
|
} else {
|
||||||
|
config.Subdomain = opt.Site
|
||||||
|
}
|
||||||
|
|
||||||
_, err = url.Parse(config.Endpoint())
|
_, err = url.ParseRequestURI(config.Endpoint())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
err = fmt.Errorf("invalid domain or subdomain: %v", opt.Site)
|
||||||
return
|
return
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -355,15 +353,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
item, err = f.readMetaDataForPath(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
if files_sdk.IsNotExist(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if item.IsDir() {
|
if item.IsDir() {
|
||||||
d := fs.NewDir(remote, item.ModTime())
|
d := fs.NewDir(remote, item.ModTime())
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
|
@ -180,12 +180,28 @@ If this is set and no password is supplied then rclone will ask for a password
|
|||||||
Default: "",
|
Default: "",
|
||||||
Help: `Socks 5 proxy host.
|
Help: `Socks 5 proxy host.
|
||||||
|
|
||||||
Supports the format user:pass@host:port, user@host:port, host:port.
|
Supports the format user:pass@host:port, user@host:port, host:port.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
myUser:myPass@localhost:9005
|
myUser:myPass@localhost:9005
|
||||||
`,
|
`,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_check_upload",
|
||||||
|
Default: false,
|
||||||
|
Help: `Don't check the upload is OK
|
||||||
|
|
||||||
|
Normally rclone will try to check the upload exists after it has
|
||||||
|
uploaded a file to make sure the size and modification time are as
|
||||||
|
expected.
|
||||||
|
|
||||||
|
This flag stops rclone doing these checks. This enables uploading to
|
||||||
|
folders which are write only.
|
||||||
|
|
||||||
|
You will likely need to use the --inplace flag also if uploading to
|
||||||
|
a write only folder.
|
||||||
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
@ -232,6 +248,7 @@ type Options struct {
|
|||||||
AskPassword bool `config:"ask_password"`
|
AskPassword bool `config:"ask_password"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
SocksProxy string `config:"socks_proxy"`
|
SocksProxy string `config:"socks_proxy"`
|
||||||
|
NoCheckUpload bool `config:"no_check_upload"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
@ -1303,6 +1320,16 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return fmt.Errorf("update stor: %w", err)
|
return fmt.Errorf("update stor: %w", err)
|
||||||
}
|
}
|
||||||
o.fs.putFtpConnection(&c, nil)
|
o.fs.putFtpConnection(&c, nil)
|
||||||
|
if o.fs.opt.NoCheckUpload {
|
||||||
|
o.info = &FileInfo{
|
||||||
|
Name: o.remote,
|
||||||
|
Size: uint64(src.Size()),
|
||||||
|
ModTime: src.ModTime(ctx),
|
||||||
|
precise: true,
|
||||||
|
IsDir: false,
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
if err = o.SetModTime(ctx, src.ModTime(ctx)); err != nil {
|
||||||
return fmt.Errorf("SetModTime: %w", err)
|
return fmt.Errorf("SetModTime: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -1105,6 +1105,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find existing object
|
||||||
|
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Create temporary object
|
// Create temporary object
|
||||||
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
dstObj, dstLeaf, dstDirectoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1112,7 +1118,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do the move
|
// Do the move
|
||||||
info, err := f.moveTo(ctx, srcObj.id, path.Base(srcObj.remote), dstLeaf, srcObj.dirID, dstDirectoryID)
|
info, err := f.moveTo(ctx, srcObj.id, srcLeaf, dstLeaf, srcDirectoryID, dstDirectoryID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1463,6 +1469,13 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
if o.id == "" {
|
if o.id == "" {
|
||||||
return nil, errors.New("can't download - no id")
|
return nil, errors.New("can't download - no id")
|
||||||
}
|
}
|
||||||
|
if o.url == "" {
|
||||||
|
// On upload an Object is returned with no url, so fetch it here if needed
|
||||||
|
err = o.readMetaData(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("read metadata: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
fs.FixRangeOption(options, o.size)
|
fs.FixRangeOption(options, o.size)
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
@ -60,16 +60,14 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
// Description of how to auth for this app
|
||||||
// Description of how to auth for this app
|
var storageConfig = &oauth2.Config{
|
||||||
storageConfig = &oauth2.Config{
|
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
Endpoint: google.Endpoint,
|
||||||
Endpoint: google.Endpoint,
|
ClientID: rcloneClientID,
|
||||||
ClientID: rcloneClientID,
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
}
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
@ -106,6 +104,12 @@ func init() {
|
|||||||
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
Hide: fs.OptionHideBoth,
|
Hide: fs.OptionHideBoth,
|
||||||
Sensitive: true,
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "access_token",
|
||||||
|
Help: "Short-lived access token.\n\nLeave blank normally.\nNeeded only if you want use short-lived access token instead of interactive login.",
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Sensitive: true,
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "anonymous",
|
Name: "anonymous",
|
||||||
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
Help: "Access public buckets and objects without credentials.\n\nSet to 'true' if you just want to download files and don't configure credentials.",
|
||||||
@ -379,6 +383,7 @@ type Options struct {
|
|||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
EnvAuth bool `config:"env_auth"`
|
EnvAuth bool `config:"env_auth"`
|
||||||
DirectoryMarkers bool `config:"directory_markers"`
|
DirectoryMarkers bool `config:"directory_markers"`
|
||||||
|
AccessToken string `config:"access_token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@ -535,6 +540,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||||
}
|
}
|
||||||
|
} else if opt.AccessToken != "" {
|
||||||
|
ts := oauth2.Token{AccessToken: opt.AccessToken}
|
||||||
|
oAuthClient = oauth2.NewClient(ctx, oauth2.StaticTokenSource(&ts))
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -944,7 +952,6 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
return f.createDirectoryMarker(ctx, bucket, dir)
|
return f.createDirectoryMarker(ctx, bucket, dir)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
// mkdirParent creates the parent bucket/directory if it doesn't exist
|
||||||
|
@ -28,7 +28,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/log"
|
|
||||||
"github.com/rclone/rclone/lib/batcher"
|
"github.com/rclone/rclone/lib/batcher"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
@ -160,6 +159,34 @@ listings and transferred.
|
|||||||
Without this flag, archived media will not be visible in directory
|
Without this flag, archived media will not be visible in directory
|
||||||
listings and won't be transferred.`,
|
listings and won't be transferred.`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "proxy",
|
||||||
|
Default: "",
|
||||||
|
Help: strings.ReplaceAll(`Use the gphotosdl proxy for downloading the full resolution images
|
||||||
|
|
||||||
|
The Google API will deliver images and video which aren't full
|
||||||
|
resolution, and/or have EXIF data missing.
|
||||||
|
|
||||||
|
However if you ue the gphotosdl proxy tnen you can download original,
|
||||||
|
unchanged images.
|
||||||
|
|
||||||
|
This runs a headless browser in the background.
|
||||||
|
|
||||||
|
Download the software from [gphotosdl](https://github.com/rclone/gphotosdl)
|
||||||
|
|
||||||
|
First run with
|
||||||
|
|
||||||
|
gphotosdl -login
|
||||||
|
|
||||||
|
Then once you have logged into google photos close the browser window
|
||||||
|
and run
|
||||||
|
|
||||||
|
gphotosdl
|
||||||
|
|
||||||
|
Then supply the parameter |--gphotos-proxy "http://localhost:8282"| to make
|
||||||
|
rclone use the proxy.
|
||||||
|
`, "|", "`"),
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@ -181,6 +208,7 @@ type Options struct {
|
|||||||
BatchMode string `config:"batch_mode"`
|
BatchMode string `config:"batch_mode"`
|
||||||
BatchSize int `config:"batch_size"`
|
BatchSize int `config:"batch_size"`
|
||||||
BatchTimeout fs.Duration `config:"batch_timeout"`
|
BatchTimeout fs.Duration `config:"batch_timeout"`
|
||||||
|
Proxy string `config:"proxy"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
@ -454,7 +482,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Med
|
|||||||
// NewObject finds the Object at remote. If it can't be found
|
// NewObject finds the Object at remote. If it can't be found
|
||||||
// it returns the error fs.ErrorObjectNotFound.
|
// it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
defer log.Trace(f, "remote=%q", remote)("")
|
// defer log.Trace(f, "remote=%q", remote)("")
|
||||||
return f.newObjectWithInfo(ctx, remote, nil)
|
return f.newObjectWithInfo(ctx, remote, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -667,7 +695,7 @@ func (f *Fs) listUploads(ctx context.Context, dir string) (entries fs.DirEntries
|
|||||||
// This should return ErrDirNotFound if the directory isn't
|
// This should return ErrDirNotFound if the directory isn't
|
||||||
// found.
|
// found.
|
||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil || pattern.isFile {
|
if pattern == nil || pattern.isFile {
|
||||||
return nil, fs.ErrorDirNotFound
|
return nil, fs.ErrorDirNotFound
|
||||||
@ -684,7 +712,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
defer log.Trace(f, "src=%+v", src)("")
|
// defer log.Trace(f, "src=%+v", src)("")
|
||||||
// Temporary Object under construction
|
// Temporary Object under construction
|
||||||
o := &Object{
|
o := &Object{
|
||||||
fs: f,
|
fs: f,
|
||||||
@ -737,7 +765,7 @@ func (f *Fs) getOrCreateAlbum(ctx context.Context, albumTitle string) (album *ap
|
|||||||
|
|
||||||
// Mkdir creates the album if it doesn't exist
|
// Mkdir creates the album if it doesn't exist
|
||||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||||
defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
// defer log.Trace(f, "dir=%q", dir)("err=%v", &err)
|
||||||
match, prefix, pattern := patterns.match(f.root, dir, false)
|
match, prefix, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil {
|
if pattern == nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@ -761,7 +789,7 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
|||||||
//
|
//
|
||||||
// Returns an error if it isn't empty
|
// Returns an error if it isn't empty
|
||||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||||
defer log.Trace(f, "dir=%q")("err=%v", &err)
|
// defer log.Trace(f, "dir=%q")("err=%v", &err)
|
||||||
match, _, pattern := patterns.match(f.root, dir, false)
|
match, _, pattern := patterns.match(f.root, dir, false)
|
||||||
if pattern == nil {
|
if pattern == nil {
|
||||||
return fs.ErrorDirNotFound
|
return fs.ErrorDirNotFound
|
||||||
@ -834,7 +862,7 @@ func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
|||||||
|
|
||||||
// Size returns the size of an object in bytes
|
// Size returns the size of an object in bytes
|
||||||
func (o *Object) Size() int64 {
|
func (o *Object) Size() int64 {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
if !o.fs.opt.ReadSize || o.bytes >= 0 {
|
||||||
return o.bytes
|
return o.bytes
|
||||||
}
|
}
|
||||||
@ -935,7 +963,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
|
|||||||
// It attempts to read the objects mtime and if that isn't present the
|
// It attempts to read the objects mtime and if that isn't present the
|
||||||
// LastModified returned in the http headers
|
// LastModified returned in the http headers
|
||||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
err := o.readMetaData(ctx)
|
err := o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
fs.Debugf(o, "ModTime: Failed to read metadata: %v", err)
|
||||||
@ -965,16 +993,20 @@ func (o *Object) downloadURL() string {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
defer log.Trace(o, "")("")
|
// defer log.Trace(o, "")("")
|
||||||
err = o.readMetaData(ctx)
|
err = o.readMetaData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
fs.Debugf(o, "Open: Failed to read metadata: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
url := o.downloadURL()
|
||||||
|
if o.fs.opt.Proxy != "" {
|
||||||
|
url = strings.TrimRight(o.fs.opt.Proxy, "/") + "/id/" + o.id
|
||||||
|
}
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: o.downloadURL(),
|
RootURL: url,
|
||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
@ -1067,7 +1099,7 @@ func (f *Fs) commitBatch(ctx context.Context, items []uploadedItem, results []*a
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
// defer log.Trace(o, "src=%+v", src)("err=%v", &err)
|
||||||
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
match, _, pattern := patterns.match(o.fs.root, o.remote, true)
|
||||||
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
if pattern == nil || !pattern.isFile || !pattern.canUpload {
|
||||||
return errCantUpload
|
return errCantUpload
|
||||||
|
166
backend/iclouddrive/api/client.go
Normal file
166
backend/iclouddrive/api/client.go
Normal file
@ -0,0 +1,166 @@
|
|||||||
|
// Package api provides functionality for interacting with the iCloud API.
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
baseEndpoint = "https://www.icloud.com"
|
||||||
|
homeEndpoint = "https://www.icloud.com"
|
||||||
|
setupEndpoint = "https://setup.icloud.com/setup/ws/1"
|
||||||
|
authEndpoint = "https://idmsa.apple.com/appleauth/auth"
|
||||||
|
)
|
||||||
|
|
||||||
|
type sessionSave func(*Session)
|
||||||
|
|
||||||
|
// Client defines the client configuration
|
||||||
|
type Client struct {
|
||||||
|
appleID string
|
||||||
|
password string
|
||||||
|
srv *rest.Client
|
||||||
|
Session *Session
|
||||||
|
sessionSaveCallback sessionSave
|
||||||
|
|
||||||
|
drive *DriveService
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Client instance with the provided Apple ID, password, trust token, cookies, and session save callback.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - appleID: the Apple ID of the user.
|
||||||
|
// - password: the password of the user.
|
||||||
|
// - trustToken: the trust token for the session.
|
||||||
|
// - clientID: the client id for the session.
|
||||||
|
// - cookies: the cookies for the session.
|
||||||
|
// - sessionSaveCallback: the callback function to save the session.
|
||||||
|
func New(appleID, password, trustToken string, clientID string, cookies []*http.Cookie, sessionSaveCallback sessionSave) (*Client, error) {
|
||||||
|
icloud := &Client{
|
||||||
|
appleID: appleID,
|
||||||
|
password: password,
|
||||||
|
srv: rest.NewClient(fshttp.NewClient(context.Background())),
|
||||||
|
Session: NewSession(),
|
||||||
|
sessionSaveCallback: sessionSaveCallback,
|
||||||
|
}
|
||||||
|
|
||||||
|
icloud.Session.TrustToken = trustToken
|
||||||
|
icloud.Session.Cookies = cookies
|
||||||
|
icloud.Session.ClientID = clientID
|
||||||
|
return icloud, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveService returns the DriveService instance associated with the Client.
|
||||||
|
func (c *Client) DriveService() (*DriveService, error) {
|
||||||
|
var err error
|
||||||
|
if c.drive == nil {
|
||||||
|
c.drive, err = NewDriveService(c)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.drive, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request makes a request and retries it if the session is invalid.
|
||||||
|
//
|
||||||
|
// This function is the main entry point for making requests to the iCloud
|
||||||
|
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||||
|
// reauthenticate and retry the request.
|
||||||
|
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
|
if err != nil && resp != nil {
|
||||||
|
// try to reauth
|
||||||
|
if resp.StatusCode == 401 || resp.StatusCode == 421 {
|
||||||
|
err = c.Authenticate(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Session.Requires2FA() {
|
||||||
|
return nil, errors.New("trust token expired, please reauth")
|
||||||
|
}
|
||||||
|
return c.RequestNoReAuth(ctx, opts, request, response)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestNoReAuth makes a request without re-authenticating.
|
||||||
|
//
|
||||||
|
// This function is useful when you have a session that is already
|
||||||
|
// authenticated, but you need to make a request without triggering
|
||||||
|
// a re-authentication.
|
||||||
|
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
// Make the request without re-authenticating
|
||||||
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authenticate authenticates the client with the iCloud API.
|
||||||
|
func (c *Client) Authenticate(ctx context.Context) error {
|
||||||
|
if c.Session.Cookies != nil {
|
||||||
|
if err := c.Session.ValidateSession(ctx); err == nil {
|
||||||
|
fs.Debugf("icloud", "Valid session, no need to reauth")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
c.Session.Cookies = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fs.Debugf("icloud", "Authenticating as %s\n", c.appleID)
|
||||||
|
err := c.Session.SignIn(ctx, c.appleID, c.password)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = c.Session.AuthWithToken(ctx)
|
||||||
|
if err == nil && c.sessionSaveCallback != nil {
|
||||||
|
c.sessionSaveCallback(c.Session)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignIn signs in the client using the provided context and credentials.
|
||||||
|
func (c *Client) SignIn(ctx context.Context) error {
|
||||||
|
return c.Session.SignIn(ctx, c.appleID, c.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntoReader marshals the provided values into a JSON encoded reader
|
||||||
|
func IntoReader(values any) (*bytes.Reader, error) {
|
||||||
|
m, err := json.Marshal(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bytes.NewReader(m), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestError holds info on a result state, icloud can return a 200 but the result is unknown
|
||||||
|
type RequestError struct {
|
||||||
|
Status string
|
||||||
|
Text string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfy the error interface.
|
||||||
|
func (e *RequestError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.Text, e.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRequestError(Status string, Text string) *RequestError {
|
||||||
|
return &RequestError{
|
||||||
|
Status: strings.ToLower(Status),
|
||||||
|
Text: Text,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// newErr orf makes a new error from sprintf parameters.
|
||||||
|
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
||||||
|
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||||
|
}
|
913
backend/iclouddrive/api/drive.go
Normal file
913
backend/iclouddrive/api/drive.go
Normal file
@ -0,0 +1,913 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultZone = "com.apple.CloudDocs"
|
||||||
|
statusOk = "OK"
|
||||||
|
statusEtagConflict = "ETAG_CONFLICT"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DriveService represents an iCloud Drive service.
|
||||||
|
type DriveService struct {
|
||||||
|
icloud *Client
|
||||||
|
RootID string
|
||||||
|
endpoint string
|
||||||
|
docsEndpoint string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDriveService creates a new DriveService instance.
|
||||||
|
func NewDriveService(icloud *Client) (*DriveService, error) {
|
||||||
|
return &DriveService{icloud: icloud, RootID: "FOLDER::com.apple.CloudDocs::root", endpoint: icloud.Session.AccountInfo.Webservices["drivews"].URL, docsEndpoint: icloud.Session.AccountInfo.Webservices["docws"].URL}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemByDriveID retrieves a DriveItem by its Drive ID.
|
||||||
|
func (d *DriveService) GetItemByDriveID(ctx context.Context, id string, includeChildren bool) (*DriveItem, *http.Response, error) {
|
||||||
|
items, resp, err := d.GetItemsByDriveID(ctx, []string{id}, includeChildren)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemsByDriveID retrieves DriveItems by their Drive IDs.
|
||||||
|
func (d *DriveService) GetItemsByDriveID(ctx context.Context, ids []string, includeChildren bool) ([]*DriveItem, *http.Response, error) {
|
||||||
|
var err error
|
||||||
|
_items := []map[string]any{}
|
||||||
|
for _, id := range ids {
|
||||||
|
_items = append(_items, map[string]any{
|
||||||
|
"drivewsid": id,
|
||||||
|
"partialData": false,
|
||||||
|
"includeHierarchy": false,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var body *bytes.Reader
|
||||||
|
var path string
|
||||||
|
if !includeChildren {
|
||||||
|
values := []map[string]any{{
|
||||||
|
"items": _items,
|
||||||
|
}}
|
||||||
|
body, err = IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
path = "/retrieveItemDetails"
|
||||||
|
} else {
|
||||||
|
values := _items
|
||||||
|
body, err = IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
path = "/retrieveItemDetailsInFolders"
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: path,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var items []*DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return items, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocByPath retrieves a document by its path.
|
||||||
|
func (d *DriveService) GetDocByPath(ctx context.Context, path string) (*Document, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("unified_format", "false")
|
||||||
|
body, err := IntoReader(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var item []*Document
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemByPath retrieves a DriveItem by its path.
|
||||||
|
func (d *DriveService) GetItemByPath(ctx context.Context, path string) (*DriveItem, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("unified_format", "true")
|
||||||
|
|
||||||
|
body, err := IntoReader(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_path",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var item []*DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocByItemID retrieves a document by its item ID.
|
||||||
|
func (d *DriveService) GetDocByItemID(ctx context.Context, id string) (*Document, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("document_id", id)
|
||||||
|
values.Set("unified_format", "false") // important
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/ws/" + defaultZone + "/list/lookup_by_id",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
var item *Document
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemRawByItemID retrieves a DriveItemRaw by its item ID.
|
||||||
|
func (d *DriveService) GetItemRawByItemID(ctx context.Context, id string) (*DriveItemRaw, *http.Response, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/v1/item/" + id,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
}
|
||||||
|
var item *DriveItemRaw
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetItemsInFolder retrieves a list of DriveItemRaw objects in a folder with the given ID.
|
||||||
|
func (d *DriveService) GetItemsInFolder(ctx context.Context, id string, limit int64) ([]*DriveItemRaw, *http.Response, error) {
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("limit", strconv.FormatInt(limit, 10))
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/v1/enumerate/" + id,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Parameters: values,
|
||||||
|
}
|
||||||
|
|
||||||
|
items := struct {
|
||||||
|
Items []*DriveItemRaw `json:"drive_item"`
|
||||||
|
}{}
|
||||||
|
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDownloadURLByDriveID retrieves the download URL for a file in the DriveService.
|
||||||
|
func (d *DriveService) GetDownloadURLByDriveID(ctx context.Context, id string) (string, *http.Response, error) {
|
||||||
|
_, zone, docid := DeconstructDriveID(id)
|
||||||
|
values := url.Values{}
|
||||||
|
values.Set("document_id", docid)
|
||||||
|
|
||||||
|
if zone == "" {
|
||||||
|
zone = defaultZone
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/ws/" + zone + "/download/by_id",
|
||||||
|
Parameters: values,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
}
|
||||||
|
|
||||||
|
var filer *FileRequest
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &filer)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var url string
|
||||||
|
if filer.DataToken != nil {
|
||||||
|
url = filer.DataToken.URL
|
||||||
|
} else {
|
||||||
|
url = filer.PackageToken.URL
|
||||||
|
}
|
||||||
|
|
||||||
|
return url, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile downloads a file from the given URL using the provided options.
|
||||||
|
func (d *DriveService) DownloadFile(ctx context.Context, url string, opt []fs.OpenOption) (*http.Response, error) {
|
||||||
|
opts := &rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: url,
|
||||||
|
Options: opt,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := d.icloud.srv.Call(ctx, opts)
|
||||||
|
if err != nil {
|
||||||
|
// icloud has some weird http codes
|
||||||
|
if resp.StatusCode == 330 {
|
||||||
|
loc, err := resp.Location()
|
||||||
|
if err == nil {
|
||||||
|
return d.DownloadFile(ctx, loc.String(), opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
return d.icloud.srv.Call(ctx, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemToTrashByItemID moves an item to the trash based on the item ID.
|
||||||
|
func (d *DriveService) MoveItemToTrashByItemID(ctx context.Context, id, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.MoveItemToTrashByID(ctx, doc.DriveID(), etag, force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemToTrashByID moves an item to the trash based on the item ID.
|
||||||
|
func (d *DriveService) MoveItemToTrashByID(ctx context.Context, drivewsid, etag string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": drivewsid,
|
||||||
|
"etag": etag,
|
||||||
|
"clientId": drivewsid,
|
||||||
|
}}}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/moveItemsToTrash",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
item := struct {
|
||||||
|
Items []*DriveItem `json:"items"`
|
||||||
|
}{}
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &item)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if item.Items[0].Status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && item.Items[0].Status == "ETAG_CONFLICT" {
|
||||||
|
return d.MoveItemToTrashByID(ctx, drivewsid, item.Items[0].Etag, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = newRequestError(item.Items[0].Status, "unknown request status")
|
||||||
|
}
|
||||||
|
|
||||||
|
return item.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNewFolderByItemID creates a new folder by item ID.
|
||||||
|
func (d *DriveService) CreateNewFolderByItemID(ctx context.Context, id, name string) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.CreateNewFolderByDriveID(ctx, doc.DriveID(), name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateNewFolderByDriveID creates a new folder by its Drive ID.
|
||||||
|
func (d *DriveService) CreateNewFolderByDriveID(ctx context.Context, drivewsid, name string) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"destinationDrivewsId": drivewsid,
|
||||||
|
"folders": []map[string]any{{
|
||||||
|
"clientId": "FOLDER::UNKNOWN_ZONE::TempId-" + uuid.New().String(),
|
||||||
|
"name": name,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/createFolders",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var fResp *CreateFoldersResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &fResp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
status := fResp.Folders[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
err = newRequestError(status, "unknown request status")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fResp.Folders[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameItemByItemID renames a DriveItem by its item ID.
|
||||||
|
func (d *DriveService) RenameItemByItemID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
doc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.RenameItemByDriveID(ctx, doc.DriveID(), doc.Etag, name, force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RenameItemByDriveID renames a DriveItem by its drive ID.
|
||||||
|
func (d *DriveService) RenameItemByDriveID(ctx context.Context, id, etag, name string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": id,
|
||||||
|
"name": name,
|
||||||
|
"etag": etag,
|
||||||
|
// "extension": split[1],
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/renameItems",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var items *DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status := items.Items[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && status == "ETAG_CONFLICT" {
|
||||||
|
return d.RenameItemByDriveID(ctx, id, items.Items[0].Etag, name, false)
|
||||||
|
}
|
||||||
|
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemByItemID moves an item by its item ID to a destination item ID.
|
||||||
|
func (d *DriveService) MoveItemByItemID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
docSrc, resp, err := d.GetDocByItemID(ctx, id)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
docDst, resp, err := d.GetDocByItemID(ctx, dstID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return d.MoveItemByDriveID(ctx, docSrc.DriveID(), docSrc.Etag, docDst.DriveID(), force)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveItemByDocID moves an item by its doc ID.
|
||||||
|
// func (d *DriveService) MoveItemByDocID(ctx context.Context, srcDocID, srcEtag, dstDocID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
// return d.MoveItemByDriveID(ctx, srcDocID, srcEtag, docDst.DriveID(), force)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// MoveItemByDriveID moves an item by its drive ID.
|
||||||
|
func (d *DriveService) MoveItemByDriveID(ctx context.Context, id, etag, dstID string, force bool) (*DriveItem, *http.Response, error) {
|
||||||
|
values := map[string]any{
|
||||||
|
"destinationDrivewsId": dstID,
|
||||||
|
"items": []map[string]any{{
|
||||||
|
"drivewsid": id,
|
||||||
|
"etag": etag,
|
||||||
|
"clientId": id,
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/moveItems",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.endpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
var items *DriveItem
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &items)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
status := items.Items[0].Status
|
||||||
|
if status != statusOk {
|
||||||
|
// rerun with latest etag
|
||||||
|
if force && status == "ETAG_CONFLICT" {
|
||||||
|
return d.MoveItemByDriveID(ctx, id, items.Items[0].Etag, dstID, false)
|
||||||
|
}
|
||||||
|
err = newRequestErrorf(status, "unknown inner status for: %s %s", opts.Method, resp.Request.URL)
|
||||||
|
}
|
||||||
|
|
||||||
|
return items.Items[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyDocByItemID copies a document by its item ID.
|
||||||
|
func (d *DriveService) CopyDocByItemID(ctx context.Context, itemID string) (*DriveItemRaw, *http.Response, error) {
|
||||||
|
// putting name in info doesnt work. extension does work so assume this is a bug in the endpoint
|
||||||
|
values := map[string]any{
|
||||||
|
"info_to_update": map[string]any{},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/v1/item/copy/" + itemID,
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
var info *DriveItemRaw
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &info)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return info, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateUpload creates an url for an upload.
|
||||||
|
func (d *DriveService) CreateUpload(ctx context.Context, size int64, name string) (*UploadResponse, *http.Response, error) {
|
||||||
|
// first we need to request an upload url
|
||||||
|
values := map[string]any{
|
||||||
|
"filename": name,
|
||||||
|
"type": "FILE",
|
||||||
|
"size": strconv.FormatInt(size, 10),
|
||||||
|
"content_type": GetContentTypeForFile(name),
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/upload/web",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var responseInfo []*UploadResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return responseInfo[0], resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload uploads a file to the given url
|
||||||
|
func (d *DriveService) Upload(ctx context.Context, in io.Reader, size int64, name, uploadURL string) (*SingleFileResponse, *http.Response, error) {
|
||||||
|
// TODO: implement multipart upload
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: uploadURL,
|
||||||
|
Body: in,
|
||||||
|
ContentLength: &size,
|
||||||
|
ContentType: GetContentTypeForFile(name),
|
||||||
|
// MultipartContentName: "files",
|
||||||
|
MultipartFileName: name,
|
||||||
|
}
|
||||||
|
var singleFileResponse *SingleFileResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &singleFileResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
return singleFileResponse, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFile updates a file in the DriveService.
|
||||||
|
//
|
||||||
|
// ctx: the context.Context object for the request.
|
||||||
|
// r: a pointer to the UpdateFileInfo struct containing the information for the file update.
|
||||||
|
// Returns a pointer to the DriveItem struct representing the updated file, the http.Response object, and an error if any.
|
||||||
|
func (d *DriveService) UpdateFile(ctx context.Context, r *UpdateFileInfo) (*DriveItem, *http.Response, error) {
|
||||||
|
body, err := IntoReader(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/ws/" + defaultZone + "/update/documents",
|
||||||
|
ExtraHeaders: d.icloud.Session.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: d.docsEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
var responseInfo *DocumentUpdateResponse
|
||||||
|
resp, err := d.icloud.Request(ctx, opts, nil, &responseInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
doc := responseInfo.Results[0].Document
|
||||||
|
item := DriveItem{
|
||||||
|
Drivewsid: "FILE::com.apple.CloudDocs::" + doc.DocumentID,
|
||||||
|
Docwsid: doc.DocumentID,
|
||||||
|
Itemid: doc.ItemID,
|
||||||
|
Etag: doc.Etag,
|
||||||
|
ParentID: doc.ParentID,
|
||||||
|
DateModified: time.Unix(r.Mtime, 0),
|
||||||
|
DateCreated: time.Unix(r.Mtime, 0),
|
||||||
|
Type: doc.Type,
|
||||||
|
Name: doc.Name,
|
||||||
|
Size: doc.Size,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &item, resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFileInfo represents the information for an update to a file in the DriveService.
|
||||||
|
type UpdateFileInfo struct {
|
||||||
|
AllowConflict bool `json:"allow_conflict"`
|
||||||
|
Btime int64 `json:"btime"`
|
||||||
|
Command string `json:"command"`
|
||||||
|
CreateShortGUID bool `json:"create_short_guid"`
|
||||||
|
Data struct {
|
||||||
|
Receipt string `json:"receipt,omitempty"`
|
||||||
|
ReferenceSignature string `json:"reference_signature,omitempty"`
|
||||||
|
Signature string `json:"signature,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
WrappingKey string `json:"wrapping_key,omitempty"`
|
||||||
|
} `json:"data,omitempty"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
FileFlags FileFlags `json:"file_flags"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
|
Path struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
StartingDocumentID string `json:"starting_document_id"`
|
||||||
|
} `json:"path"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileFlags defines the file flags for a document.
|
||||||
|
type FileFlags struct {
|
||||||
|
IsExecutable bool `json:"is_executable"`
|
||||||
|
IsHidden bool `json:"is_hidden"`
|
||||||
|
IsWritable bool `json:"is_writable"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUpdateFileInfo creates a new UpdateFileInfo object with default values.
|
||||||
|
//
|
||||||
|
// Returns an UpdateFileInfo object.
|
||||||
|
func NewUpdateFileInfo() UpdateFileInfo {
|
||||||
|
return UpdateFileInfo{
|
||||||
|
Command: "add_file",
|
||||||
|
CreateShortGUID: true,
|
||||||
|
AllowConflict: true,
|
||||||
|
FileFlags: FileFlags{
|
||||||
|
IsExecutable: true,
|
||||||
|
IsHidden: false,
|
||||||
|
IsWritable: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItemRaw is a raw drive item.
|
||||||
|
// not suure what to call this but there seems to be a "unified" and non "unified" drive item response. This is the non unified.
|
||||||
|
type DriveItemRaw struct {
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
ItemInfo *DriveItemRawInfo `json:"item_info"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitName splits the name of a DriveItemRaw into its name and extension.
|
||||||
|
//
|
||||||
|
// It returns the name and extension as separate strings. If the name ends with a dot,
|
||||||
|
// it means there is no extension, so an empty string is returned for the extension.
|
||||||
|
// If the name does not contain a dot, it means
|
||||||
|
func (d *DriveItemRaw) SplitName() (string, string) {
|
||||||
|
name := d.ItemInfo.Name
|
||||||
|
// ends with a dot, no extension
|
||||||
|
if strings.HasSuffix(name, ".") {
|
||||||
|
return name, ""
|
||||||
|
}
|
||||||
|
lastInd := strings.LastIndex(name, ".")
|
||||||
|
|
||||||
|
if lastInd == -1 {
|
||||||
|
return name, ""
|
||||||
|
}
|
||||||
|
return name[:lastInd], name[lastInd+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ModTime returns the modification time of the DriveItemRaw.
|
||||||
|
//
|
||||||
|
// It parses the ModifiedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||||
|
// If the parsing fails, it returns the zero value of time.Time.
|
||||||
|
// The returned time.Time value represents the modification time of the DriveItemRaw.
|
||||||
|
func (d *DriveItemRaw) ModTime() time.Time {
|
||||||
|
i, err := strconv.ParseInt(d.ItemInfo.ModifiedAt, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.UnixMilli(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedTime returns the creation time of the DriveItemRaw.
|
||||||
|
//
|
||||||
|
// It parses the CreatedAt field of the ItemInfo struct and converts it to a time.Time value.
|
||||||
|
// If the parsing fails, it returns the zero value of time.Time.
|
||||||
|
// The returned time.Time
|
||||||
|
func (d *DriveItemRaw) CreatedTime() time.Time {
|
||||||
|
i, err := strconv.ParseInt(d.ItemInfo.CreatedAt, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}
|
||||||
|
}
|
||||||
|
return time.UnixMilli(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItemRawInfo is the raw information about a drive item.
|
||||||
|
type DriveItemRawInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
// Extension is absolutely borked on endpoints so dont use it.
|
||||||
|
Extension string `json:"extension"`
|
||||||
|
Size int64 `json:"size,string"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
ModifiedAt string `json:"modified_at"`
|
||||||
|
CreatedAt string `json:"created_at"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntoDriveItem converts a DriveItemRaw into a DriveItem.
|
||||||
|
//
|
||||||
|
// It takes no parameters.
|
||||||
|
// It returns a pointer to a DriveItem.
|
||||||
|
func (d *DriveItemRaw) IntoDriveItem() *DriveItem {
|
||||||
|
name, extension := d.SplitName()
|
||||||
|
return &DriveItem{
|
||||||
|
Itemid: d.ItemID,
|
||||||
|
Name: name,
|
||||||
|
Extension: extension,
|
||||||
|
Type: d.ItemInfo.Type,
|
||||||
|
Etag: d.ItemInfo.Version,
|
||||||
|
DateModified: d.ModTime(),
|
||||||
|
DateCreated: d.CreatedTime(),
|
||||||
|
Size: d.ItemInfo.Size,
|
||||||
|
Urls: d.ItemInfo.Urls,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocumentUpdateResponse is the response of a document update request.
|
||||||
|
type DocumentUpdateResponse struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
Results []struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
OperationID interface{} `json:"operation_id"`
|
||||||
|
Document *Document `json:"document"`
|
||||||
|
} `json:"results"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Document represents a document on iCloud.
|
||||||
|
type Document struct {
|
||||||
|
Status struct {
|
||||||
|
StatusCode int `json:"status_code"`
|
||||||
|
ErrorMessage string `json:"error_message"`
|
||||||
|
} `json:"status"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Deleted bool `json:"deleted"`
|
||||||
|
Mtime int64 `json:"mtime"`
|
||||||
|
LastEditorName string `json:"last_editor_name"`
|
||||||
|
Data DocumentData `json:"data"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Btime int64 `json:"btime"`
|
||||||
|
Zone string `json:"zone"`
|
||||||
|
FileFlags struct {
|
||||||
|
IsExecutable bool `json:"is_executable"`
|
||||||
|
IsWritable bool `json:"is_writable"`
|
||||||
|
IsHidden bool `json:"is_hidden"`
|
||||||
|
} `json:"file_flags"`
|
||||||
|
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||||
|
RestorePath interface{} `json:"restorePath"`
|
||||||
|
HasChainedParent bool `json:"hasChainedParent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveID returns the drive ID of the Document.
|
||||||
|
func (d *Document) DriveID() string {
|
||||||
|
if d.Zone == "" {
|
||||||
|
d.Zone = defaultZone
|
||||||
|
}
|
||||||
|
return d.Type + "::" + d.Zone + "::" + d.DocumentID
|
||||||
|
}
|
||||||
|
|
||||||
|
// DocumentData represents the data of a document.
|
||||||
|
type DocumentData struct {
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
Owner string `json:"owner"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
ReferenceSignature string `json:"reference_signature"`
|
||||||
|
WrappingKey string `json:"wrapping_key"`
|
||||||
|
PcsInfo string `json:"pcsInfo"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SingleFileResponse is the response of a single file request.
|
||||||
|
type SingleFileResponse struct {
|
||||||
|
SingleFile *SingleFileInfo `json:"singleFile"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SingleFileInfo represents the information of a single file.
|
||||||
|
type SingleFileInfo struct {
|
||||||
|
ReferenceSignature string `json:"referenceChecksum"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Signature string `json:"fileChecksum"`
|
||||||
|
WrappingKey string `json:"wrappingKey"`
|
||||||
|
Receipt string `json:"receipt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadResponse is the response of an upload request.
|
||||||
|
type UploadResponse struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileRequestToken represents the token of a file request.
|
||||||
|
type FileRequestToken struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
Token string `json:"token"`
|
||||||
|
Signature string `json:"signature"`
|
||||||
|
WrappingKey string `json:"wrapping_key"`
|
||||||
|
ReferenceSignature string `json:"reference_signature"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileRequest represents the request of a file.
|
||||||
|
type FileRequest struct {
|
||||||
|
DocumentID string `json:"document_id"`
|
||||||
|
ItemID string `json:"item_id"`
|
||||||
|
OwnerDsid int64 `json:"owner_dsid"`
|
||||||
|
DataToken *FileRequestToken `json:"data_token,omitempty"`
|
||||||
|
PackageToken *FileRequestToken `json:"package_token,omitempty"`
|
||||||
|
DoubleEtag string `json:"double_etag"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFoldersResponse is the response of a create folders request.
|
||||||
|
type CreateFoldersResponse struct {
|
||||||
|
Folders []*DriveItem `json:"folders"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DriveItem represents an item on iCloud.
|
||||||
|
type DriveItem struct {
|
||||||
|
DateCreated time.Time `json:"dateCreated"`
|
||||||
|
Drivewsid string `json:"drivewsid"`
|
||||||
|
Docwsid string `json:"docwsid"`
|
||||||
|
Itemid string `json:"item_id"`
|
||||||
|
Zone string `json:"zone"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
ParentID string `json:"parentId"`
|
||||||
|
Hierarchy []DriveItem `json:"hierarchy"`
|
||||||
|
Etag string `json:"etag"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
AssetQuota int64 `json:"assetQuota"`
|
||||||
|
FileCount int64 `json:"fileCount"`
|
||||||
|
ShareCount int64 `json:"shareCount"`
|
||||||
|
ShareAliasCount int64 `json:"shareAliasCount"`
|
||||||
|
DirectChildrenCount int64 `json:"directChildrenCount"`
|
||||||
|
Items []*DriveItem `json:"items"`
|
||||||
|
NumberOfItems int64 `json:"numberOfItems"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
Extension string `json:"extension,omitempty"`
|
||||||
|
DateModified time.Time `json:"dateModified,omitempty"`
|
||||||
|
DateChanged time.Time `json:"dateChanged,omitempty"`
|
||||||
|
Size int64 `json:"size,omitempty"`
|
||||||
|
LastOpenTime time.Time `json:"lastOpenTime,omitempty"`
|
||||||
|
Urls struct {
|
||||||
|
URLDownload string `json:"url_download"`
|
||||||
|
} `json:"urls"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFolder returns true if the item is a folder.
|
||||||
|
func (d *DriveItem) IsFolder() bool {
|
||||||
|
return d.Type == "FOLDER" || d.Type == "APP_CONTAINER" || d.Type == "APP_LIBRARY"
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadURL returns the download URL of the item.
|
||||||
|
func (d *DriveItem) DownloadURL() string {
|
||||||
|
return d.Urls.URLDownload
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullName returns the full name of the item.
|
||||||
|
// name + extension
|
||||||
|
func (d *DriveItem) FullName() string {
|
||||||
|
if d.Extension != "" {
|
||||||
|
return d.Name + "." + d.Extension
|
||||||
|
}
|
||||||
|
return d.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDocIDFromDriveID returns the DocumentID from the drive ID.
|
||||||
|
func GetDocIDFromDriveID(id string) string {
|
||||||
|
split := strings.Split(id, "::")
|
||||||
|
return split[len(split)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeconstructDriveID returns the document type, zone, and document ID from the drive ID.
|
||||||
|
func DeconstructDriveID(id string) (docType, zone, docid string) {
|
||||||
|
split := strings.Split(id, "::")
|
||||||
|
if len(split) < 3 {
|
||||||
|
return "", "", id
|
||||||
|
}
|
||||||
|
return split[0], split[1], split[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstructDriveID constructs a drive ID from the given components.
|
||||||
|
func ConstructDriveID(id string, zone string, t string) string {
|
||||||
|
return strings.Join([]string{t, zone, id}, "::")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetContentTypeForFile detects content type for given file name.
|
||||||
|
func GetContentTypeForFile(name string) string {
|
||||||
|
// detect MIME type by looking at the filename only
|
||||||
|
mimeType := mime.TypeByExtension(filepath.Ext(name))
|
||||||
|
if mimeType == "" {
|
||||||
|
// api requires a mime type passed in
|
||||||
|
mimeType = "text/plain"
|
||||||
|
}
|
||||||
|
return strings.Split(mimeType, ";")[0]
|
||||||
|
}
|
412
backend/iclouddrive/api/session.go
Normal file
412
backend/iclouddrive/api/session.go
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/oracle/oci-go-sdk/v65/common"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Session represents an iCloud session
|
||||||
|
type Session struct {
|
||||||
|
SessionToken string `json:"session_token"`
|
||||||
|
Scnt string `json:"scnt"`
|
||||||
|
SessionID string `json:"session_id"`
|
||||||
|
AccountCountry string `json:"account_country"`
|
||||||
|
TrustToken string `json:"trust_token"`
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
Cookies []*http.Cookie `json:"cookies"`
|
||||||
|
AccountInfo AccountInfo `json:"account_info"`
|
||||||
|
|
||||||
|
srv *rest.Client `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the session as a string
|
||||||
|
// func (s *Session) String() string {
|
||||||
|
// jsession, _ := json.Marshal(s)
|
||||||
|
// return string(jsession)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Request makes a request
|
||||||
|
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
||||||
|
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return resp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if val := resp.Header.Get("X-Apple-ID-Account-Country"); val != "" {
|
||||||
|
s.AccountCountry = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-ID-Session-Id"); val != "" {
|
||||||
|
s.SessionID = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-Session-Token"); val != "" {
|
||||||
|
s.SessionToken = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("X-Apple-TwoSV-Trust-Token"); val != "" {
|
||||||
|
s.TrustToken = val
|
||||||
|
}
|
||||||
|
if val := resp.Header.Get("scnt"); val != "" {
|
||||||
|
s.Scnt = val
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Requires2FA returns true if the session requires 2FA
|
||||||
|
func (s *Session) Requires2FA() bool {
|
||||||
|
return s.AccountInfo.DsInfo.HsaVersion == 2 && s.AccountInfo.HsaChallengeRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignIn signs in the session
|
||||||
|
func (s *Session) SignIn(ctx context.Context, appleID, password string) error {
|
||||||
|
trustTokens := []string{}
|
||||||
|
if s.TrustToken != "" {
|
||||||
|
trustTokens = []string{s.TrustToken}
|
||||||
|
}
|
||||||
|
values := map[string]any{
|
||||||
|
"accountName": appleID,
|
||||||
|
"password": password,
|
||||||
|
"rememberMe": true,
|
||||||
|
"trustTokens": trustTokens,
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/signin",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
ExtraHeaders: s.GetAuthHeaders(map[string]string{}),
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
IgnoreStatus: true, // need to handle 409 for hsa2
|
||||||
|
NoResponse: true,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("isRememberMeEnabled", "true")
|
||||||
|
_, err = s.Request(ctx, opts, nil, nil)
|
||||||
|
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// AuthWithToken authenticates the session
|
||||||
|
func (s *Session) AuthWithToken(ctx context.Context) error {
|
||||||
|
values := map[string]any{
|
||||||
|
"accountCountryCode": s.AccountCountry,
|
||||||
|
"dsWebAuthToken": s.SessionToken,
|
||||||
|
"extended_login": true,
|
||||||
|
"trustToken": s.TrustToken,
|
||||||
|
}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/accountLogin",
|
||||||
|
ExtraHeaders: GetCommonHeaders(map[string]string{}),
|
||||||
|
RootURL: setupEndpoint,
|
||||||
|
Body: body,
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||||
|
if err == nil {
|
||||||
|
s.Cookies = resp.Cookies()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate2FACode validates the 2FA code
|
||||||
|
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||||
|
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
||||||
|
body, err := IntoReader(values)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
headers := s.GetAuthHeaders(map[string]string{})
|
||||||
|
headers["scnt"] = s.Scnt
|
||||||
|
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/verify/trusteddevice/securitycode",
|
||||||
|
ExtraHeaders: headers,
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
Body: body,
|
||||||
|
NoResponse: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = s.Request(ctx, opts, nil, nil)
|
||||||
|
if err == nil {
|
||||||
|
if err := s.TrustSession(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("validate2FACode failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TrustSession trusts the session
|
||||||
|
func (s *Session) TrustSession(ctx context.Context) error {
|
||||||
|
headers := s.GetAuthHeaders(map[string]string{})
|
||||||
|
headers["scnt"] = s.Scnt
|
||||||
|
headers["X-Apple-ID-Session-Id"] = s.SessionID
|
||||||
|
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/2sv/trust",
|
||||||
|
ExtraHeaders: headers,
|
||||||
|
RootURL: authEndpoint,
|
||||||
|
NoResponse: true,
|
||||||
|
ContentLength: common.Int64(0),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := s.Request(ctx, opts, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("trustSession failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.AuthWithToken(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateSession validates the session
|
||||||
|
func (s *Session) ValidateSession(ctx context.Context) error {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/validate",
|
||||||
|
ExtraHeaders: s.GetHeaders(map[string]string{}),
|
||||||
|
RootURL: setupEndpoint,
|
||||||
|
ContentLength: common.Int64(0),
|
||||||
|
}
|
||||||
|
_, err := s.Request(ctx, opts, nil, &s.AccountInfo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("validateSession failed: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAuthHeaders returns the authentication headers for the session.
|
||||||
|
//
|
||||||
|
// It takes an `overwrite` map[string]string parameter which allows
|
||||||
|
// overwriting the default headers. It returns a map[string]string.
|
||||||
|
func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := map[string]string{
|
||||||
|
"Accept": "application/json",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"X-Apple-OAuth-Client-Id": s.ClientID,
|
||||||
|
"X-Apple-OAuth-Client-Type": "firstPartyAuth",
|
||||||
|
"X-Apple-OAuth-Redirect-URI": "https://www.icloud.com",
|
||||||
|
"X-Apple-OAuth-Require-Grant-Code": "true",
|
||||||
|
"X-Apple-OAuth-Response-Mode": "web_message",
|
||||||
|
"X-Apple-OAuth-Response-Type": "code",
|
||||||
|
"X-Apple-OAuth-State": s.ClientID,
|
||||||
|
"X-Apple-Widget-Key": s.ClientID,
|
||||||
|
"Origin": homeEndpoint,
|
||||||
|
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||||
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
|
}
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHeaders Gets the authentication headers required for a request
|
||||||
|
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := GetCommonHeaders(map[string]string{})
|
||||||
|
headers["Cookie"] = s.GetCookieString()
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCookieString returns the cookie header string for the session.
|
||||||
|
func (s *Session) GetCookieString() string {
|
||||||
|
cookieHeader := ""
|
||||||
|
// we only care about name and value.
|
||||||
|
for _, cookie := range s.Cookies {
|
||||||
|
cookieHeader = cookieHeader + cookie.Name + "=" + cookie.Value + ";"
|
||||||
|
}
|
||||||
|
return cookieHeader
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCommonHeaders generates common HTTP headers with optional overwrite.
|
||||||
|
func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
||||||
|
headers := map[string]string{
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Origin": baseEndpoint,
|
||||||
|
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||||
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
|
}
|
||||||
|
for k, v := range overwrite {
|
||||||
|
headers[k] = v
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeCookies merges two slices of http.Cookies, ensuring no duplicates are added.
|
||||||
|
func MergeCookies(left []*http.Cookie, right []*http.Cookie) ([]*http.Cookie, error) {
|
||||||
|
var hashes []string
|
||||||
|
for _, cookie := range right {
|
||||||
|
hashes = append(hashes, cookie.Raw)
|
||||||
|
}
|
||||||
|
for _, cookie := range left {
|
||||||
|
if !slices.Contains(hashes, cookie.Raw) {
|
||||||
|
right = append(right, cookie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return right, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCookiesForDomain filters the provided cookies based on the domain of the given URL.
|
||||||
|
func GetCookiesForDomain(url *url.URL, cookies []*http.Cookie) ([]*http.Cookie, error) {
|
||||||
|
var domainCookies []*http.Cookie
|
||||||
|
for _, cookie := range cookies {
|
||||||
|
if strings.HasSuffix(url.Host, cookie.Domain) {
|
||||||
|
domainCookies = append(domainCookies, cookie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return domainCookies, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSession creates a new Session instance with default values.
|
||||||
|
func NewSession() *Session {
|
||||||
|
session := &Session{}
|
||||||
|
session.srv = rest.NewClient(fshttp.NewClient(context.Background())).SetRoot(baseEndpoint)
|
||||||
|
//session.ClientID = "auth-" + uuid.New().String()
|
||||||
|
return session
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountInfo represents an account info
|
||||||
|
type AccountInfo struct {
|
||||||
|
DsInfo *ValidateDataDsInfo `json:"dsInfo"`
|
||||||
|
HasMinimumDeviceForPhotosWeb bool `json:"hasMinimumDeviceForPhotosWeb"`
|
||||||
|
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||||
|
Webservices map[string]*webService `json:"webservices"`
|
||||||
|
PcsEnabled bool `json:"pcsEnabled"`
|
||||||
|
TermsUpdateNeeded bool `json:"termsUpdateNeeded"`
|
||||||
|
ConfigBag struct {
|
||||||
|
Urls struct {
|
||||||
|
AccountCreateUI string `json:"accountCreateUI"`
|
||||||
|
AccountLoginUI string `json:"accountLoginUI"`
|
||||||
|
AccountLogin string `json:"accountLogin"`
|
||||||
|
AccountRepairUI string `json:"accountRepairUI"`
|
||||||
|
DownloadICloudTerms string `json:"downloadICloudTerms"`
|
||||||
|
RepairDone string `json:"repairDone"`
|
||||||
|
AccountAuthorizeUI string `json:"accountAuthorizeUI"`
|
||||||
|
VettingURLForEmail string `json:"vettingUrlForEmail"`
|
||||||
|
AccountCreate string `json:"accountCreate"`
|
||||||
|
GetICloudTerms string `json:"getICloudTerms"`
|
||||||
|
VettingURLForPhone string `json:"vettingUrlForPhone"`
|
||||||
|
} `json:"urls"`
|
||||||
|
AccountCreateEnabled bool `json:"accountCreateEnabled"`
|
||||||
|
} `json:"configBag"`
|
||||||
|
HsaTrustedBrowser bool `json:"hsaTrustedBrowser"`
|
||||||
|
AppsOrder []string `json:"appsOrder"`
|
||||||
|
Version int `json:"version"`
|
||||||
|
IsExtendedLogin bool `json:"isExtendedLogin"`
|
||||||
|
PcsServiceIdentitiesIncluded bool `json:"pcsServiceIdentitiesIncluded"`
|
||||||
|
IsRepairNeeded bool `json:"isRepairNeeded"`
|
||||||
|
HsaChallengeRequired bool `json:"hsaChallengeRequired"`
|
||||||
|
RequestInfo struct {
|
||||||
|
Country string `json:"country"`
|
||||||
|
TimeZone string `json:"timeZone"`
|
||||||
|
Region string `json:"region"`
|
||||||
|
} `json:"requestInfo"`
|
||||||
|
PcsDeleted bool `json:"pcsDeleted"`
|
||||||
|
ICloudInfo struct {
|
||||||
|
SafariBookmarksHasMigratedToCloudKit bool `json:"SafariBookmarksHasMigratedToCloudKit"`
|
||||||
|
} `json:"iCloudInfo"`
|
||||||
|
Apps map[string]*ValidateDataApp `json:"apps"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDataDsInfo represents an validation info
|
||||||
|
type ValidateDataDsInfo struct {
|
||||||
|
HsaVersion int `json:"hsaVersion"`
|
||||||
|
LastName string `json:"lastName"`
|
||||||
|
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||||
|
TantorMigrated bool `json:"tantorMigrated"`
|
||||||
|
Dsid string `json:"dsid"`
|
||||||
|
HsaEnabled bool `json:"hsaEnabled"`
|
||||||
|
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||||
|
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||||
|
Locale string `json:"locale"`
|
||||||
|
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||||
|
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||||
|
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||||
|
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||||
|
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||||
|
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||||
|
Gilligvited bool `json:"gilligvited"`
|
||||||
|
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
||||||
|
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||||
|
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||||
|
CountryCode string `json:"countryCode"`
|
||||||
|
NotificationID string `json:"notificationId"`
|
||||||
|
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||||
|
ADsID string `json:"aDsID"`
|
||||||
|
Locked bool `json:"locked"`
|
||||||
|
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||||
|
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||||
|
PrimaryEmail string `json:"primaryEmail"`
|
||||||
|
AppleIDEntries []struct {
|
||||||
|
IsPrimary bool `json:"isPrimary"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
} `json:"appleIdEntries"`
|
||||||
|
GilliganEnabled bool `json:"gilligan-enabled"`
|
||||||
|
IsWebAccessAllowed bool `json:"isWebAccessAllowed"`
|
||||||
|
FullName string `json:"fullName"`
|
||||||
|
MailFlags struct {
|
||||||
|
IsThreadingAvailable bool `json:"isThreadingAvailable"`
|
||||||
|
IsSearchV2Provisioned bool `json:"isSearchV2Provisioned"`
|
||||||
|
SCKMail bool `json:"sCKMail"`
|
||||||
|
IsMppSupportedInCurrentCountry bool `json:"isMppSupportedInCurrentCountry"`
|
||||||
|
} `json:"mailFlags"`
|
||||||
|
LanguageCode string `json:"languageCode"`
|
||||||
|
AppleID string `json:"appleId"`
|
||||||
|
HasUnreleasedOS bool `json:"hasUnreleasedOS"`
|
||||||
|
AnalyticsOptInStatus bool `json:"analyticsOptInStatus"`
|
||||||
|
FirstName string `json:"firstName"`
|
||||||
|
ICloudAppleIDAlias string `json:"iCloudAppleIdAlias"`
|
||||||
|
NotesMigrated bool `json:"notesMigrated"`
|
||||||
|
BeneficiaryInfo struct {
|
||||||
|
IsBeneficiary bool `json:"isBeneficiary"`
|
||||||
|
} `json:"beneficiaryInfo"`
|
||||||
|
HasPaymentInfo bool `json:"hasPaymentInfo"`
|
||||||
|
PcsDelet bool `json:"pcsDelet"`
|
||||||
|
AppleIDAlias string `json:"appleIdAlias"`
|
||||||
|
BrMigrated bool `json:"brMigrated"`
|
||||||
|
StatusCode int `json:"statusCode"`
|
||||||
|
FamilyEligible bool `json:"familyEligible"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDataApp represents an app
|
||||||
|
type ValidateDataApp struct {
|
||||||
|
CanLaunchWithOneFactor bool `json:"canLaunchWithOneFactor"`
|
||||||
|
IsQualifiedForBeta bool `json:"isQualifiedForBeta"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// WebService represents a web service
|
||||||
|
type webService struct {
|
||||||
|
PcsRequired bool `json:"pcsRequired"`
|
||||||
|
URL string `json:"url"`
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
1174
backend/iclouddrive/iclouddrive.go
Normal file
1174
backend/iclouddrive/iclouddrive.go
Normal file
File diff suppressed because it is too large
Load Diff
18
backend/iclouddrive/iclouddrive_test.go
Normal file
18
backend/iclouddrive/iclouddrive_test.go
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
//go:build !plan9 && !solaris
|
||||||
|
|
||||||
|
package iclouddrive_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/iclouddrive"
|
||||||
|
"github.com/rclone/rclone/fstest/fstests"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestIntegration runs integration tests against the remote
|
||||||
|
func TestIntegration(t *testing.T) {
|
||||||
|
fstests.Run(t, &fstests.Opt{
|
||||||
|
RemoteName: "TestICloudDrive:",
|
||||||
|
NilObject: (*iclouddrive.Object)(nil),
|
||||||
|
})
|
||||||
|
}
|
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
7
backend/iclouddrive/iclouddrive_unsupported.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// Build for iclouddrive for unsupported platforms to stop go complaining
|
||||||
|
// about "no buildable Go source files "
|
||||||
|
|
||||||
|
//go:build plan9 || solaris
|
||||||
|
|
||||||
|
// Package iclouddrive implements the iCloud Drive backend
|
||||||
|
package iclouddrive
|
@ -1555,7 +1555,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
info, err := f.copyOrMove(ctx, "mv", srcObj.filePath(), remote)
|
||||||
|
|
||||||
if err != nil && meta != nil {
|
if err == nil && meta != nil {
|
||||||
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
createTime, createTimeMeta := srcObj.parseFsMetadataTime(meta, "btime")
|
||||||
if !createTimeMeta {
|
if !createTimeMeta {
|
||||||
createTime = srcObj.createTime
|
createTime = srcObj.createTime
|
||||||
|
@ -6,6 +6,7 @@ package local
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/go-darwin/apfs"
|
"github.com/go-darwin/apfs"
|
||||||
@ -22,7 +23,7 @@ import (
|
|||||||
//
|
//
|
||||||
// If it isn't possible then return fs.ErrorCantCopy
|
// If it isn't possible then return fs.ErrorCantCopy
|
||||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||||
if runtime.GOOS != "darwin" || f.opt.TranslateSymlinks || f.opt.NoClone {
|
if runtime.GOOS != "darwin" || f.opt.NoClone {
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
srcObj, ok := src.(*Object)
|
srcObj, ok := src.(*Object)
|
||||||
@ -30,6 +31,9 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
fs.Debugf(src, "Can't clone - not same remote type")
|
fs.Debugf(src, "Can't clone - not same remote type")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
|
if f.opt.TranslateSymlinks && srcObj.translatedLink { // in --links mode, use cloning only for regular files
|
||||||
|
return nil, fs.ErrorCantCopy
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch metadata if --metadata is in use
|
// Fetch metadata if --metadata is in use
|
||||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||||
@ -44,11 +48,18 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = Clone(srcObj.path, f.localPath(remote))
|
srcPath := srcObj.path
|
||||||
|
if f.opt.FollowSymlinks { // in --copy-links mode, find the real file being pointed to and pass that in instead
|
||||||
|
srcPath, err = filepath.EvalSymlinks(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Clone(srcPath, f.localPath(remote))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
fs.Debugf(remote, "server-side cloned!")
|
|
||||||
|
|
||||||
// Set metadata if --metadata is in use
|
// Set metadata if --metadata is in use
|
||||||
if meta != nil {
|
if meta != nil {
|
||||||
|
@ -73,7 +73,6 @@ func TestUpdatingCheck(t *testing.T) {
|
|||||||
r.WriteFile(filePath, "content updated", time.Now())
|
r.WriteFile(filePath, "content updated", time.Now())
|
||||||
_, err = in.Read(buf)
|
_, err = in.Read(buf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test corrupted on transfer
|
// Test corrupted on transfer
|
||||||
@ -224,7 +223,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||||
|
|
||||||
// Reupload it with different contents but same size and timestamp
|
// Reupload it with different contents but same size and timestamp
|
||||||
var b = bytes.NewBufferString("CONTENT")
|
b := bytes.NewBufferString("CONTENT")
|
||||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||||
err = o.Update(ctx, b, src)
|
err = o.Update(ctx, b, src)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -395,7 +394,6 @@ func TestMetadata(t *testing.T) {
|
|||||||
assert.Equal(t, "wedges", m["potato"])
|
assert.Equal(t, "wedges", m["potato"])
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFilter(t *testing.T) {
|
func TestFilter(t *testing.T) {
|
||||||
@ -572,4 +570,35 @@ func TestCopySymlink(t *testing.T) {
|
|||||||
linkContents, err := os.Readlink(dstPath)
|
linkContents, err := os.Readlink(dstPath)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "file.txt", linkContents)
|
assert.Equal(t, "file.txt", linkContents)
|
||||||
|
|
||||||
|
// Set fs into "-L/--copy-links" mode
|
||||||
|
f.opt.FollowSymlinks = true
|
||||||
|
f.opt.TranslateSymlinks = false
|
||||||
|
f.lstat = os.Stat
|
||||||
|
|
||||||
|
// Create dst
|
||||||
|
require.NoError(t, f.Mkdir(ctx, "dst2"))
|
||||||
|
|
||||||
|
// Do copy from src into dst
|
||||||
|
src, err = f.NewObject(ctx, "src/link.txt")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, src)
|
||||||
|
dst, err = operations.Copy(ctx, f, nil, "dst2/link.txt", src)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, dst)
|
||||||
|
|
||||||
|
// Test that we made a NON-symlink and it has the right contents
|
||||||
|
dstPath = filepath.Join(r.LocalName, "dst2", "link.txt")
|
||||||
|
fi, err := os.Lstat(dstPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, fi.Mode()&os.ModeSymlink == 0)
|
||||||
|
want := fstest.NewItem("dst2/link.txt", "hello world", when)
|
||||||
|
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||||
|
|
||||||
|
// Test that copying a normal file also works
|
||||||
|
dst, err = operations.Copy(ctx, f, nil, "dst2/file.txt", dst)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, dst)
|
||||||
|
want = fstest.NewItem("dst2/file.txt", "hello world", when)
|
||||||
|
fstest.CompareItems(t, []fs.DirEntry{dst}, []fstest.Item{want}, nil, f.precision, "")
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package local
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -72,12 +73,12 @@ func (o *Object) parseMetadataInt(m fs.Metadata, key string, base int) (result i
|
|||||||
value, ok := m[key]
|
value, ok := m[key]
|
||||||
if ok {
|
if ok {
|
||||||
var err error
|
var err error
|
||||||
result64, err := strconv.ParseInt(value, base, 64)
|
parsed, err := strconv.ParseInt(value, base, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
fs.Debugf(o, "failed to parse metadata %s: %q: %v", key, value, err)
|
||||||
ok = false
|
ok = false
|
||||||
}
|
}
|
||||||
result = int(result64)
|
result = int(parsed)
|
||||||
}
|
}
|
||||||
return result, ok
|
return result, ok
|
||||||
}
|
}
|
||||||
@ -128,9 +129,14 @@ func (o *Object) writeMetadataToFile(m fs.Metadata) (outErr error) {
|
|||||||
}
|
}
|
||||||
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
mode, hasMode := o.parseMetadataInt(m, "mode", 8)
|
||||||
if hasMode {
|
if hasMode {
|
||||||
err = os.Chmod(o.path, os.FileMode(mode))
|
if mode >= 0 {
|
||||||
if err != nil {
|
umode := uint(mode)
|
||||||
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
if umode <= math.MaxUint32 {
|
||||||
|
err = os.Chmod(o.path, os.FileMode(umode))
|
||||||
|
if err != nil {
|
||||||
|
outErr = fmt.Errorf("failed to change permissions: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// FIXME not parsing rdev yet
|
// FIXME not parsing rdev yet
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -14,7 +15,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
"github.com/rclone/rclone/lib/errcount"
|
"github.com/rclone/rclone/lib/errcount"
|
||||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -942,7 +942,8 @@ func errorHandler(resp *http.Response) error {
|
|||||||
// Decode error response
|
// Decode error response
|
||||||
errResponse := new(api.Error)
|
errResponse := new(api.Error)
|
||||||
err := rest.DecodeJSON(resp, &errResponse)
|
err := rest.DecodeJSON(resp, &errResponse)
|
||||||
if err != nil {
|
// Redirects have no body so don't report an error
|
||||||
|
if err != nil && resp.Header.Get("Location") == "" {
|
||||||
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
||||||
}
|
}
|
||||||
if errResponse.ErrorInfo.Code == "" {
|
if errResponse.ErrorInfo.Code == "" {
|
||||||
@ -1544,9 +1545,12 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|||||||
|
|
||||||
// Precision return the precision of this Fs
|
// Precision return the precision of this Fs
|
||||||
func (f *Fs) Precision() time.Duration {
|
func (f *Fs) Precision() time.Duration {
|
||||||
if f.driveType == driveTypePersonal {
|
// While this is true for some OneDrive personal accounts, it
|
||||||
return time.Millisecond
|
// isn't true for all of them. See #8101 for details
|
||||||
}
|
//
|
||||||
|
// if f.driveType == driveTypePersonal {
|
||||||
|
// return time.Millisecond
|
||||||
|
// }
|
||||||
return time.Second
|
return time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -16,7 +17,6 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||||
|
@ -404,6 +404,32 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
return dstObj, nil
|
return dstObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// About gets quota information
|
||||||
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
|
var uInfo usersInfoResponse
|
||||||
|
var resp *http.Response
|
||||||
|
|
||||||
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/users/info.json/" + f.session.SessionID,
|
||||||
|
}
|
||||||
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &uInfo)
|
||||||
|
return f.shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
usage = &fs.Usage{
|
||||||
|
Used: fs.NewUsageValue(uInfo.StorageUsed),
|
||||||
|
Total: fs.NewUsageValue(uInfo.MaxStorage * 1024 * 1024), // MaxStorage appears to be in MB
|
||||||
|
Free: fs.NewUsageValue(uInfo.MaxStorage*1024*1024 - uInfo.StorageUsed),
|
||||||
|
}
|
||||||
|
return usage, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Move src to this remote using server-side move operations.
|
// Move src to this remote using server-side move operations.
|
||||||
//
|
//
|
||||||
// This is stored with the remote path given.
|
// This is stored with the remote path given.
|
||||||
@ -1147,6 +1173,7 @@ var (
|
|||||||
_ fs.Mover = (*Fs)(nil)
|
_ fs.Mover = (*Fs)(nil)
|
||||||
_ fs.DirMover = (*Fs)(nil)
|
_ fs.DirMover = (*Fs)(nil)
|
||||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||||
|
_ fs.Abouter = (*Fs)(nil)
|
||||||
_ fs.Object = (*Object)(nil)
|
_ fs.Object = (*Object)(nil)
|
||||||
_ fs.IDer = (*Object)(nil)
|
_ fs.IDer = (*Object)(nil)
|
||||||
_ fs.ParentIDer = (*Object)(nil)
|
_ fs.ParentIDer = (*Object)(nil)
|
||||||
|
@ -231,3 +231,10 @@ type permissions struct {
|
|||||||
type uploadFileChunkReply struct {
|
type uploadFileChunkReply struct {
|
||||||
TotalWritten int64 `json:"TotalWritten"`
|
TotalWritten int64 `json:"TotalWritten"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// usersInfoResponse describes OpenDrive users/info.json response
|
||||||
|
type usersInfoResponse struct {
|
||||||
|
// This response contains many other values but these are the only ones currently in use
|
||||||
|
StorageUsed int64 `json:"StorageUsed,string"`
|
||||||
|
MaxStorage int64 `json:"MaxStorage,string"`
|
||||||
|
}
|
||||||
|
@ -109,6 +109,37 @@ type Hashes struct {
|
|||||||
SHA256 string `json:"sha256"`
|
SHA256 string `json:"sha256"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileTruncateResponse is the response from /file_truncate
|
||||||
|
type FileTruncateResponse struct {
|
||||||
|
Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileCloseResponse is the response from /file_close
|
||||||
|
type FileCloseResponse struct {
|
||||||
|
Error
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileOpenResponse is the response from /file_open
|
||||||
|
type FileOpenResponse struct {
|
||||||
|
Error
|
||||||
|
Fileid int64 `json:"fileid"`
|
||||||
|
FileDescriptor int64 `json:"fd"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileChecksumResponse is the response from /file_checksum
|
||||||
|
type FileChecksumResponse struct {
|
||||||
|
Error
|
||||||
|
MD5 string `json:"md5"`
|
||||||
|
SHA1 string `json:"sha1"`
|
||||||
|
SHA256 string `json:"sha256"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilePWriteResponse is the response from /file_pwrite
|
||||||
|
type FilePWriteResponse struct {
|
||||||
|
Error
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
// UploadFileResponse is the response from /uploadfile
|
// UploadFileResponse is the response from /uploadfile
|
||||||
type UploadFileResponse struct {
|
type UploadFileResponse struct {
|
||||||
Error
|
Error
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -146,7 +147,8 @@ we have to rely on user password authentication for it.`,
|
|||||||
Help: "Your pcloud password.",
|
Help: "Your pcloud password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}}...),
|
},
|
||||||
|
}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,15 +163,16 @@ type Options struct {
|
|||||||
|
|
||||||
// Fs represents a remote pcloud
|
// Fs represents a remote pcloud
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
ts *oauthutil.TokenSource // the token source, used to create new clients
|
||||||
cleanupSrv *rest.Client // the connection used for the cleanup method
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
cleanupSrv *rest.Client // the connection used for the cleanup method
|
||||||
pacer *fs.Pacer // pacer for API calls
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
pacer *fs.Pacer // pacer for API calls
|
||||||
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a pcloud object
|
// Object describes a pcloud object
|
||||||
@ -317,6 +320,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
ts: ts,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
@ -326,6 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: false,
|
CaseInsensitive: false,
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
|
PartialUploads: true,
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
if !canCleanup {
|
if !canCleanup {
|
||||||
f.features.CleanUp = nil
|
f.features.CleanUp = nil
|
||||||
@ -333,7 +338,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// Renew the token in the background
|
// Renew the token in the background
|
||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
|
||||||
_, err := f.readMetaDataForPath(ctx, "")
|
_, err := f.readMetaDataForPath(ctx, "")
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
@ -375,6 +380,56 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// OpenWriterAt opens with a handle for random access writes
|
||||||
|
//
|
||||||
|
// Pass in the remote desired and the size if known.
|
||||||
|
//
|
||||||
|
// It truncates any existing object
|
||||||
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
||||||
|
client, err := f.newSingleConnClient(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("create client: %w", err)
|
||||||
|
}
|
||||||
|
// init an empty file
|
||||||
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("resolve src: %w", err)
|
||||||
|
}
|
||||||
|
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("open file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
writer := &writerAt{
|
||||||
|
ctx: ctx,
|
||||||
|
client: client,
|
||||||
|
fs: f,
|
||||||
|
size: size,
|
||||||
|
remote: remote,
|
||||||
|
fd: openResult.FileDescriptor,
|
||||||
|
fileID: openResult.Fileid,
|
||||||
|
}
|
||||||
|
|
||||||
|
return writer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new http client, accepting keep-alive headers, limited to single connection.
|
||||||
|
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
|
||||||
|
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
|
||||||
|
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
|
||||||
|
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
||||||
|
baseClient := fshttp.NewClient(ctx)
|
||||||
|
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
||||||
|
t.MaxConnsPerHost = 1
|
||||||
|
t.DisableKeepAlives = false
|
||||||
|
})
|
||||||
|
// Set our own http client in the context
|
||||||
|
ctx = oauthutil.Context(ctx, baseClient)
|
||||||
|
// create a new oauth client, re-use the token source
|
||||||
|
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
||||||
|
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
@ -1094,9 +1149,42 @@ func (o *Object) ModTime(ctx context.Context) time.Time {
|
|||||||
|
|
||||||
// SetModTime sets the modification time of the local fs object
|
// SetModTime sets the modification time of the local fs object
|
||||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||||
// Pcloud doesn't have a way of doing this so returning this
|
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
|
||||||
// error will cause the file to be re-uploaded to set the time.
|
if err != nil {
|
||||||
return fs.ErrorCantSetModTime
|
return err
|
||||||
|
}
|
||||||
|
fileID := fileIDtoNumber(o.id)
|
||||||
|
filename = o.fs.opt.Enc.FromStandardName(filename)
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/copyfile",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("fileid", fileID)
|
||||||
|
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||||
|
opts.Parameters.Set("toname", filename)
|
||||||
|
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
||||||
|
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
|
||||||
|
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
|
||||||
|
|
||||||
|
result := &api.ItemResult{}
|
||||||
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
|
||||||
|
err = result.Error.Update(err)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("update mtime: copyfile: %w", err)
|
||||||
|
}
|
||||||
|
if err := o.setMetaData(&result.Metadata); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Storable returns a boolean showing whether this object storable
|
// Storable returns a boolean showing whether this object storable
|
||||||
|
216
backend/pcloud/writer_at.go
Normal file
216
backend/pcloud/writer_at.go
Normal file
@ -0,0 +1,216 @@
|
|||||||
|
package pcloud
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/backend/pcloud/api"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/lib/rest"
|
||||||
|
)
|
||||||
|
|
||||||
|
// writerAt implements fs.WriterAtCloser, adding the OpenWrtierAt feature to pcloud.
|
||||||
|
type writerAt struct {
|
||||||
|
ctx context.Context
|
||||||
|
client *rest.Client
|
||||||
|
fs *Fs
|
||||||
|
size int64
|
||||||
|
remote string
|
||||||
|
fd int64
|
||||||
|
fileID int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close implements WriterAt.Close.
|
||||||
|
func (c *writerAt) Close() error {
|
||||||
|
// close fd
|
||||||
|
if _, err := c.fileClose(c.ctx); err != nil {
|
||||||
|
return fmt.Errorf("close fd: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Avoiding race conditions: Depending on the tcp connection, there might be
|
||||||
|
// caching issues when checking the size immediately after write.
|
||||||
|
// Hence we try avoiding them by checking the resulting size on a different connection.
|
||||||
|
if c.size < 0 {
|
||||||
|
// Without knowing the size, we cannot do size checks.
|
||||||
|
// Falling back to a sleep of 1s for sake of hope.
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sizeOk := false
|
||||||
|
sizeLastSeen := int64(0)
|
||||||
|
for retry := 0; retry < 5; retry++ {
|
||||||
|
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||||
|
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get uploaded obj: %w", err)
|
||||||
|
}
|
||||||
|
sizeLastSeen = obj.Size()
|
||||||
|
if obj.Size() == c.size {
|
||||||
|
sizeOk = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !sizeOk {
|
||||||
|
return fmt.Errorf("incorrect size after upload: got %d, want %d", sizeLastSeen, c.size)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteAt implements fs.WriteAt.
|
||||||
|
func (c *writerAt) WriteAt(buffer []byte, offset int64) (n int, err error) {
|
||||||
|
contentLength := len(buffer)
|
||||||
|
|
||||||
|
inSHA1Bytes := sha1.Sum(buffer)
|
||||||
|
inSHA1 := hex.EncodeToString(inSHA1Bytes[:])
|
||||||
|
|
||||||
|
// get target hash
|
||||||
|
outChecksum, err := c.fileChecksum(c.ctx, offset, int64(contentLength))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
outSHA1 := outChecksum.SHA1
|
||||||
|
|
||||||
|
if outSHA1 == "" || inSHA1 == "" {
|
||||||
|
return 0, fmt.Errorf("expect both hashes to be filled: src: %q, target: %q", inSHA1, outSHA1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check hash of buffer, skip if fits
|
||||||
|
if inSHA1 == outSHA1 {
|
||||||
|
return contentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// upload buffer with offset if necessary
|
||||||
|
if _, err := c.filePWrite(c.ctx, offset, buffer); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return contentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call pcloud file_open using folderid and name with O_CREAT and O_WRITE flags, see [API Doc.]
|
||||||
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_open.html
|
||||||
|
func fileOpenNew(ctx context.Context, c *rest.Client, srcFs *Fs, directoryID, filename string) (*api.FileOpenResponse, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/file_open",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
filename = srcFs.opt.Enc.FromStandardName(filename)
|
||||||
|
opts.Parameters.Set("name", filename)
|
||||||
|
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
||||||
|
opts.Parameters.Set("flags", "0x0042") // O_CREAT, O_WRITE
|
||||||
|
|
||||||
|
result := &api.FileOpenResponse{}
|
||||||
|
err := srcFs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := c.CallJSON(ctx, &opts, nil, result)
|
||||||
|
err = result.Error.Update(err)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("open new file descriptor: %w", err)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call pcloud file_checksum, see [API Doc.]
|
||||||
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_checksum.html
|
||||||
|
func (c *writerAt) fileChecksum(
|
||||||
|
ctx context.Context,
|
||||||
|
offset, count int64,
|
||||||
|
) (*api.FileChecksumResponse, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/file_checksum",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
|
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||||
|
opts.Parameters.Set("count", strconv.FormatInt(count, 10))
|
||||||
|
|
||||||
|
result := &api.FileChecksumResponse{}
|
||||||
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
|
err = result.Error.Update(err)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("checksum of fd %d with offset %d and size %d: %w", c.fd, offset, count, err)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call pcloud file_pwrite, see [API Doc.]
|
||||||
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_pwrite.html
|
||||||
|
func (c *writerAt) filePWrite(
|
||||||
|
ctx context.Context,
|
||||||
|
offset int64,
|
||||||
|
buf []byte,
|
||||||
|
) (*api.FilePWriteResponse, error) {
|
||||||
|
contentLength := int64(len(buf))
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/file_pwrite",
|
||||||
|
Body: bytes.NewReader(buf),
|
||||||
|
ContentLength: &contentLength,
|
||||||
|
Parameters: url.Values{},
|
||||||
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
|
Close: false,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
|
opts.Parameters.Set("offset", strconv.FormatInt(offset, 10))
|
||||||
|
|
||||||
|
result := &api.FilePWriteResponse{}
|
||||||
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
|
err = result.Error.Update(err)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("write %d bytes to fd %d with offset %d: %w", contentLength, c.fd, offset, err)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call pcloud file_close, see [API Doc.]
|
||||||
|
// [API Doc]: https://docs.pcloud.com/methods/fileops/file_close.html
|
||||||
|
func (c *writerAt) fileClose(ctx context.Context) (*api.FileCloseResponse, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "PUT",
|
||||||
|
Path: "/file_close",
|
||||||
|
Parameters: url.Values{},
|
||||||
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
||||||
|
Close: true,
|
||||||
|
}
|
||||||
|
opts.Parameters.Set("fd", strconv.FormatInt(c.fd, 10))
|
||||||
|
|
||||||
|
result := &api.FileCloseResponse{}
|
||||||
|
err := c.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err := c.client.CallJSON(ctx, &opts, nil, result)
|
||||||
|
err = result.Error.Update(err)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("close file descriptor: %w", err)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
@ -513,6 +513,72 @@ type RequestDecompress struct {
|
|||||||
DefaultParent bool `json:"default_parent,omitempty"`
|
DefaultParent bool `json:"default_parent,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------ authorization
|
||||||
|
|
||||||
|
// CaptchaToken is a response to requestCaptchaToken api call
|
||||||
|
type CaptchaToken struct {
|
||||||
|
CaptchaToken string `json:"captcha_token"`
|
||||||
|
ExpiresIn int64 `json:"expires_in"` // currently 300s
|
||||||
|
// API doesn't provide Expiry field and thus it should be populated from ExpiresIn on retrieval
|
||||||
|
Expiry time.Time `json:"expiry,omitempty"`
|
||||||
|
URL string `json:"url,omitempty"` // a link for users to solve captcha
|
||||||
|
}
|
||||||
|
|
||||||
|
// expired reports whether the token is expired.
|
||||||
|
// t must be non-nil.
|
||||||
|
func (t *CaptchaToken) expired() bool {
|
||||||
|
if t.Expiry.IsZero() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
expiryDelta := time.Duration(10) * time.Second // same as oauth2's defaultExpiryDelta
|
||||||
|
return t.Expiry.Round(0).Add(-expiryDelta).Before(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||||
|
func (t *CaptchaToken) Valid() bool {
|
||||||
|
return t != nil && t.CaptchaToken != "" && !t.expired()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CaptchaTokenRequest is to request for captcha token
|
||||||
|
type CaptchaTokenRequest struct {
|
||||||
|
Action string `json:"action,omitempty"`
|
||||||
|
CaptchaToken string `json:"captcha_token,omitempty"`
|
||||||
|
ClientID string `json:"client_id,omitempty"`
|
||||||
|
DeviceID string `json:"device_id,omitempty"`
|
||||||
|
Meta *CaptchaTokenMeta `json:"meta,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CaptchaTokenMeta contains meta info for CaptchaTokenRequest
|
||||||
|
type CaptchaTokenMeta struct {
|
||||||
|
CaptchaSign string `json:"captcha_sign,omitempty"`
|
||||||
|
ClientVersion string `json:"client_version,omitempty"`
|
||||||
|
PackageName string `json:"package_name,omitempty"`
|
||||||
|
Timestamp string `json:"timestamp,omitempty"`
|
||||||
|
UserID string `json:"user_id,omitempty"` // webdrive uses this instead of UserName
|
||||||
|
UserName string `json:"username,omitempty"`
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
PhoneNumber string `json:"phone_number,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token represents oauth2 token used for pikpak which needs to be converted to be compatible with oauth2.Token
|
||||||
|
type Token struct {
|
||||||
|
TokenType string `json:"token_type"`
|
||||||
|
AccessToken string `json:"access_token"`
|
||||||
|
RefreshToken string `json:"refresh_token"`
|
||||||
|
ExpiresIn int `json:"expires_in"`
|
||||||
|
Sub string `json:"sub"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expiry returns expiry from expires in, so it should be called on retrieval
|
||||||
|
// e must be non-nil.
|
||||||
|
func (e *Token) Expiry() (t time.Time) {
|
||||||
|
if v := e.ExpiresIn; v != 0 {
|
||||||
|
return time.Now().Add(time.Duration(v) * time.Second)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// NOT implemented YET
|
// NOT implemented YET
|
||||||
|
@ -3,8 +3,10 @@ package pikpak
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
"crypto/sha1"
|
"crypto/sha1"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -14,10 +16,13 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/backend/pikpak/api"
|
"github.com/rclone/rclone/backend/pikpak/api"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -262,15 +267,20 @@ func (f *Fs) getGcid(ctx context.Context, src fs.ObjectInfo) (gcid string, err e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if src.Size() == 0 {
|
||||||
|
// If src is zero-length, the API will return
|
||||||
|
// Error "cid and file_size is required" (400)
|
||||||
|
// In this case, we can simply return cid == gcid
|
||||||
|
return cid, nil
|
||||||
|
}
|
||||||
|
|
||||||
params := url.Values{}
|
params := url.Values{}
|
||||||
params.Set("cid", cid)
|
params.Set("cid", cid)
|
||||||
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
params.Set("file_size", strconv.FormatInt(src.Size(), 10))
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/drive/v1/resource/cid",
|
Path: "/drive/v1/resource/cid",
|
||||||
Parameters: params,
|
Parameters: params,
|
||||||
ExtraHeaders: map[string]string{"x-device-id": f.deviceID},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info := struct {
|
info := struct {
|
||||||
@ -368,11 +378,23 @@ func calcGcid(r io.Reader, size int64) (string, error) {
|
|||||||
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
return hex.EncodeToString(totalHash.Sum(nil)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unWrapObjectInfo returns the underlying Object unwrapped as much as
|
||||||
|
// possible or nil even if it is an OverrideRemote
|
||||||
|
func unWrapObjectInfo(oi fs.ObjectInfo) fs.Object {
|
||||||
|
if o, ok := oi.(fs.Object); ok {
|
||||||
|
return fs.UnWrapObject(o)
|
||||||
|
} else if do, ok := oi.(*fs.OverrideRemote); ok {
|
||||||
|
// Unwrap if it is an operations.OverrideRemote
|
||||||
|
return do.UnWrap()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// calcCid calculates Cid from source
|
// calcCid calculates Cid from source
|
||||||
//
|
//
|
||||||
// Cid is a simplified version of Gcid
|
// Cid is a simplified version of Gcid
|
||||||
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
||||||
srcObj := fs.UnWrapObjectInfo(src)
|
srcObj := unWrapObjectInfo(src)
|
||||||
if srcObj == nil {
|
if srcObj == nil {
|
||||||
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
return "", fmt.Errorf("failed to unwrap object from src: %s", src)
|
||||||
}
|
}
|
||||||
@ -408,6 +430,8 @@ func calcCid(ctx context.Context, src fs.ObjectInfo) (cid string, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ------------------------------------------------------------ authorization
|
||||||
|
|
||||||
// randomly generates device id used for request header 'x-device-id'
|
// randomly generates device id used for request header 'x-device-id'
|
||||||
//
|
//
|
||||||
// original javascript implementation
|
// original javascript implementation
|
||||||
@ -428,3 +452,206 @@ func genDeviceID() string {
|
|||||||
}
|
}
|
||||||
return string(base)
|
return string(base)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var md5Salt = []string{
|
||||||
|
"C9qPpZLN8ucRTaTiUMWYS9cQvWOE",
|
||||||
|
"+r6CQVxjzJV6LCV",
|
||||||
|
"F",
|
||||||
|
"pFJRC",
|
||||||
|
"9WXYIDGrwTCz2OiVlgZa90qpECPD6olt",
|
||||||
|
"/750aCr4lm/Sly/c",
|
||||||
|
"RB+DT/gZCrbV",
|
||||||
|
"",
|
||||||
|
"CyLsf7hdkIRxRm215hl",
|
||||||
|
"7xHvLi2tOYP0Y92b",
|
||||||
|
"ZGTXXxu8E/MIWaEDB+Sm/",
|
||||||
|
"1UI3",
|
||||||
|
"E7fP5Pfijd+7K+t6Tg/NhuLq0eEUVChpJSkrKxpO",
|
||||||
|
"ihtqpG6FMt65+Xk+tWUH2",
|
||||||
|
"NhXXU9rg4XXdzo7u5o",
|
||||||
|
}
|
||||||
|
|
||||||
|
func md5Sum(text string) string {
|
||||||
|
hash := md5.Sum([]byte(text))
|
||||||
|
return hex.EncodeToString(hash[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcCaptchaSign(deviceID string) (timestamp, sign string) {
|
||||||
|
timestamp = fmt.Sprint(time.Now().UnixMilli())
|
||||||
|
str := fmt.Sprint(clientID, clientVersion, packageName, deviceID, timestamp)
|
||||||
|
for _, salt := range md5Salt {
|
||||||
|
str = md5Sum(str + salt)
|
||||||
|
}
|
||||||
|
sign = "1." + str
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCaptchaTokenRequest(action, oldToken string, opt *Options) (req *api.CaptchaTokenRequest) {
|
||||||
|
req = &api.CaptchaTokenRequest{
|
||||||
|
Action: action,
|
||||||
|
CaptchaToken: oldToken, // can be empty initially
|
||||||
|
ClientID: clientID,
|
||||||
|
DeviceID: opt.DeviceID,
|
||||||
|
Meta: new(api.CaptchaTokenMeta),
|
||||||
|
}
|
||||||
|
switch action {
|
||||||
|
case "POST:/v1/auth/signin":
|
||||||
|
req.Meta.UserName = opt.Username
|
||||||
|
default:
|
||||||
|
timestamp, captchaSign := calcCaptchaSign(opt.DeviceID)
|
||||||
|
req.Meta.CaptchaSign = captchaSign
|
||||||
|
req.Meta.Timestamp = timestamp
|
||||||
|
req.Meta.ClientVersion = clientVersion
|
||||||
|
req.Meta.PackageName = packageName
|
||||||
|
req.Meta.UserID = opt.UserID
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CaptchaTokenSource stores updated captcha tokens in the config file
|
||||||
|
type CaptchaTokenSource struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
m configmap.Mapper
|
||||||
|
opt *Options
|
||||||
|
token *api.CaptchaToken
|
||||||
|
ctx context.Context
|
||||||
|
rst *pikpakClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// initialize CaptchaTokenSource from rclone.conf if possible
|
||||||
|
func newCaptchaTokenSource(ctx context.Context, opt *Options, m configmap.Mapper) *CaptchaTokenSource {
|
||||||
|
token := new(api.CaptchaToken)
|
||||||
|
tokenString, ok := m.Get("captcha_token")
|
||||||
|
if !ok || tokenString == "" {
|
||||||
|
fs.Debugf(nil, "failed to read captcha token out of config file")
|
||||||
|
} else {
|
||||||
|
if err := json.Unmarshal([]byte(tokenString), token); err != nil {
|
||||||
|
fs.Debugf(nil, "failed to parse captcha token out of config file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &CaptchaTokenSource{
|
||||||
|
m: m,
|
||||||
|
opt: opt,
|
||||||
|
token: token,
|
||||||
|
ctx: ctx,
|
||||||
|
rst: newPikpakClient(getClient(ctx, opt), opt),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// requestToken retrieves captcha token from API
|
||||||
|
func (cts *CaptchaTokenSource) requestToken(ctx context.Context, req *api.CaptchaTokenRequest) (err error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
RootURL: "https://user.mypikpak.com/v1/shield/captcha/init",
|
||||||
|
}
|
||||||
|
var info *api.CaptchaToken
|
||||||
|
_, err = cts.rst.CallJSON(ctx, &opts, &req, &info)
|
||||||
|
if err == nil && info.ExpiresIn != 0 {
|
||||||
|
// populate to Expiry
|
||||||
|
info.Expiry = time.Now().Add(time.Duration(info.ExpiresIn) * time.Second)
|
||||||
|
cts.token = info // update with a new one
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cts *CaptchaTokenSource) refreshToken(opts *rest.Opts) (string, error) {
|
||||||
|
oldToken := ""
|
||||||
|
if cts.token != nil {
|
||||||
|
oldToken = cts.token.CaptchaToken
|
||||||
|
}
|
||||||
|
action := "GET:/drive/v1/about"
|
||||||
|
if opts.RootURL == "" && opts.Path != "" {
|
||||||
|
action = fmt.Sprintf("%s:%s", opts.Method, opts.Path)
|
||||||
|
} else if u, err := url.Parse(opts.RootURL); err == nil {
|
||||||
|
action = fmt.Sprintf("%s:%s", opts.Method, u.Path)
|
||||||
|
}
|
||||||
|
req := newCaptchaTokenRequest(action, oldToken, cts.opt)
|
||||||
|
if err := cts.requestToken(cts.ctx, req); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to retrieve captcha token from api: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// put it into rclone.conf
|
||||||
|
tokenBytes, err := json.Marshal(cts.token)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to marshal captcha token: %w", err)
|
||||||
|
}
|
||||||
|
cts.m.Set("captcha_token", string(tokenBytes))
|
||||||
|
return cts.token.CaptchaToken, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invalidate resets existing captcha token for a forced refresh
|
||||||
|
func (cts *CaptchaTokenSource) Invalidate() {
|
||||||
|
cts.mu.Lock()
|
||||||
|
cts.token.CaptchaToken = ""
|
||||||
|
cts.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token returns a valid captcha token
|
||||||
|
func (cts *CaptchaTokenSource) Token(opts *rest.Opts) (string, error) {
|
||||||
|
cts.mu.Lock()
|
||||||
|
defer cts.mu.Unlock()
|
||||||
|
if cts.token.Valid() {
|
||||||
|
return cts.token.CaptchaToken, nil
|
||||||
|
}
|
||||||
|
return cts.refreshToken(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pikpakClient wraps rest.Client with a handle of captcha token
|
||||||
|
type pikpakClient struct {
|
||||||
|
opt *Options
|
||||||
|
client *rest.Client
|
||||||
|
captcha *CaptchaTokenSource
|
||||||
|
}
|
||||||
|
|
||||||
|
// newPikpakClient takes an (oauth) http.Client and makes a new api instance for pikpak with
|
||||||
|
// * error handler
|
||||||
|
// * root url
|
||||||
|
// * default headers
|
||||||
|
func newPikpakClient(c *http.Client, opt *Options) *pikpakClient {
|
||||||
|
client := rest.NewClient(c).SetErrorHandler(errorHandler).SetRoot(rootURL)
|
||||||
|
for key, val := range map[string]string{
|
||||||
|
"Referer": "https://mypikpak.com/",
|
||||||
|
"x-client-id": clientID,
|
||||||
|
"x-client-version": clientVersion,
|
||||||
|
"x-device-id": opt.DeviceID,
|
||||||
|
// "x-device-model": "firefox%2F129.0",
|
||||||
|
// "x-device-name": "PC-Firefox",
|
||||||
|
// "x-device-sign": fmt.Sprintf("wdi10.%sxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", opt.DeviceID),
|
||||||
|
// "x-net-work-type": "NONE",
|
||||||
|
// "x-os-version": "Win32",
|
||||||
|
// "x-platform-version": "1",
|
||||||
|
// "x-protocol-version": "301",
|
||||||
|
// "x-provider-name": "NONE",
|
||||||
|
// "x-sdk-version": "8.0.3",
|
||||||
|
} {
|
||||||
|
client.SetHeader(key, val)
|
||||||
|
}
|
||||||
|
return &pikpakClient{
|
||||||
|
client: client,
|
||||||
|
opt: opt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This should be called right after pikpakClient initialized
|
||||||
|
func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper) *pikpakClient {
|
||||||
|
c.captcha = newCaptchaTokenSource(ctx, c.opt, m)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
||||||
|
if c.captcha != nil {
|
||||||
|
token, err := c.captcha.Token(opts)
|
||||||
|
if err != nil || token == "" {
|
||||||
|
return nil, fserrors.FatalError(fmt.Errorf("couldn't get captcha token: %v", err))
|
||||||
|
}
|
||||||
|
if opts.ExtraHeaders == nil {
|
||||||
|
opts.ExtraHeaders = make(map[string]string)
|
||||||
|
}
|
||||||
|
opts.ExtraHeaders["x-captcha-token"] = token
|
||||||
|
}
|
||||||
|
return c.client.CallJSON(ctx, opts, request, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *pikpakClient) Call(ctx context.Context, opts *rest.Opts) (resp *http.Response, err error) {
|
||||||
|
return c.client.Call(ctx, opts)
|
||||||
|
}
|
||||||
|
@ -23,6 +23,7 @@ package pikpak
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -51,6 +52,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config/configstruct"
|
"github.com/rclone/rclone/fs/config/configstruct"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/rclone/rclone/fs/fserrors"
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/lib/atexit"
|
"github.com/rclone/rclone/lib/atexit"
|
||||||
"github.com/rclone/rclone/lib/dircache"
|
"github.com/rclone/rclone/lib/dircache"
|
||||||
@ -64,15 +66,17 @@ import (
|
|||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
const (
|
const (
|
||||||
rcloneClientID = "YNxT9w7GMdWvEOKa"
|
clientID = "YUMx5nI8ZU8Ap8pm"
|
||||||
rcloneEncryptedClientSecret = "aqrmB6M1YJ1DWCBxVxFSjFo7wzWEky494YMmkqgAl1do1WKOe2E"
|
clientVersion = "2.0.0"
|
||||||
minSleep = 100 * time.Millisecond
|
packageName = "mypikpak.com"
|
||||||
maxSleep = 2 * time.Second
|
defaultUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:129.0) Gecko/20100101 Firefox/129.0"
|
||||||
taskWaitTime = 500 * time.Millisecond
|
minSleep = 100 * time.Millisecond
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
maxSleep = 2 * time.Second
|
||||||
rootURL = "https://api-drive.mypikpak.com"
|
taskWaitTime = 500 * time.Millisecond
|
||||||
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
rootURL = "https://api-drive.mypikpak.com"
|
||||||
|
minChunkSize = fs.SizeSuffix(manager.MinUploadPartSize)
|
||||||
|
defaultUploadConcurrency = manager.DefaultUploadConcurrency
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@ -85,43 +89,53 @@ var (
|
|||||||
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
TokenURL: "https://user.mypikpak.com/v1/auth/token",
|
||||||
AuthStyle: oauth2.AuthStyleInParams,
|
AuthStyle: oauth2.AuthStyleInParams,
|
||||||
},
|
},
|
||||||
ClientID: rcloneClientID,
|
ClientID: clientID,
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// Returns OAuthOptions modified for pikpak
|
|
||||||
func pikpakOAuthOptions() []fs.Option {
|
|
||||||
opts := []fs.Option{}
|
|
||||||
for _, opt := range oauthutil.SharedOptions {
|
|
||||||
if opt.Name == config.ConfigClientID {
|
|
||||||
opt.Advanced = true
|
|
||||||
} else if opt.Name == config.ConfigClientSecret {
|
|
||||||
opt.Advanced = true
|
|
||||||
}
|
|
||||||
opts = append(opts, opt)
|
|
||||||
}
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
// pikpakAutorize retrieves OAuth token using user/pass and save it to rclone.conf
|
||||||
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
func pikpakAuthorize(ctx context.Context, opt *Options, name string, m configmap.Mapper) error {
|
||||||
// override default client id/secret
|
if opt.Username == "" {
|
||||||
if id, ok := m.Get("client_id"); ok && id != "" {
|
return errors.New("no username")
|
||||||
oauthConfig.ClientID = id
|
|
||||||
}
|
|
||||||
if secret, ok := m.Get("client_secret"); ok && secret != "" {
|
|
||||||
oauthConfig.ClientSecret = secret
|
|
||||||
}
|
}
|
||||||
pass, err := obscure.Reveal(opt.Password)
|
pass, err := obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
return fmt.Errorf("failed to decode password - did you obscure it?: %w", err)
|
||||||
}
|
}
|
||||||
t, err := oauthConfig.PasswordCredentialsToken(ctx, opt.Username, pass)
|
// new device id if necessary
|
||||||
|
if len(opt.DeviceID) != 32 {
|
||||||
|
opt.DeviceID = genDeviceID()
|
||||||
|
m.Set("device_id", opt.DeviceID)
|
||||||
|
fs.Infof(nil, "Using new device id %q", opt.DeviceID)
|
||||||
|
}
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
RootURL: "https://user.mypikpak.com/v1/auth/signin",
|
||||||
|
}
|
||||||
|
req := map[string]string{
|
||||||
|
"username": opt.Username,
|
||||||
|
"password": pass,
|
||||||
|
"client_id": clientID,
|
||||||
|
}
|
||||||
|
var token api.Token
|
||||||
|
rst := newPikpakClient(getClient(ctx, opt), opt).SetCaptchaTokener(ctx, m)
|
||||||
|
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||||
|
if apiErr, ok := err.(*api.Error); ok {
|
||||||
|
if apiErr.Reason == "captcha_invalid" && apiErr.Code == 4002 {
|
||||||
|
rst.captcha.Invalidate()
|
||||||
|
_, err = rst.CallJSON(ctx, &opts, req, &token)
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
return fmt.Errorf("failed to retrieve token using username/password: %w", err)
|
||||||
}
|
}
|
||||||
|
t := &oauth2.Token{
|
||||||
|
AccessToken: token.AccessToken,
|
||||||
|
TokenType: token.TokenType,
|
||||||
|
RefreshToken: token.RefreshToken,
|
||||||
|
Expiry: token.Expiry(),
|
||||||
|
}
|
||||||
return oauthutil.PutToken(name, m, t, false)
|
return oauthutil.PutToken(name, m, t, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +174,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
},
|
},
|
||||||
Options: append(pikpakOAuthOptions(), []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "Pikpak username.",
|
Help: "Pikpak username.",
|
||||||
Required: true,
|
Required: true,
|
||||||
@ -170,6 +184,18 @@ func init() {
|
|||||||
Help: "Pikpak password.",
|
Help: "Pikpak password.",
|
||||||
Required: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "device_id",
|
||||||
|
Help: "Device ID used for authorization.",
|
||||||
|
Advanced: true,
|
||||||
|
Sensitive: true,
|
||||||
|
}, {
|
||||||
|
Name: "user_agent",
|
||||||
|
Default: defaultUserAgent,
|
||||||
|
Advanced: true,
|
||||||
|
Help: fmt.Sprintf(`HTTP user agent for pikpak.
|
||||||
|
|
||||||
|
Defaults to "%s" or "--pikpak-user-agent" provided on command line.`, defaultUserAgent),
|
||||||
}, {
|
}, {
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: `ID of the root folder.
|
Help: `ID of the root folder.
|
||||||
@ -248,7 +274,7 @@ this may help to speed up the transfers.`,
|
|||||||
encoder.EncodeRightSpace |
|
encoder.EncodeRightSpace |
|
||||||
encoder.EncodeRightPeriod |
|
encoder.EncodeRightPeriod |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
}}...),
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -256,6 +282,9 @@ this may help to speed up the transfers.`,
|
|||||||
type Options struct {
|
type Options struct {
|
||||||
Username string `config:"user"`
|
Username string `config:"user"`
|
||||||
Password string `config:"pass"`
|
Password string `config:"pass"`
|
||||||
|
UserID string `config:"user_id"` // only available during runtime
|
||||||
|
DeviceID string `config:"device_id"`
|
||||||
|
UserAgent string `config:"user_agent"`
|
||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
UseTrash bool `config:"use_trash"`
|
UseTrash bool `config:"use_trash"`
|
||||||
TrashedOnly bool `config:"trashed_only"`
|
TrashedOnly bool `config:"trashed_only"`
|
||||||
@ -271,11 +300,10 @@ type Fs struct {
|
|||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
rst *rest.Client // the connection to the server
|
rst *pikpakClient // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *fs.Pacer // pacer for API calls
|
pacer *fs.Pacer // pacer for API calls
|
||||||
rootFolderID string // the id of the root folder
|
rootFolderID string // the id of the root folder
|
||||||
deviceID string // device id used for api requests
|
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
m configmap.Mapper
|
m configmap.Mapper
|
||||||
tokenMu *sync.Mutex // when renewing tokens
|
tokenMu *sync.Mutex // when renewing tokens
|
||||||
@ -429,6 +457,12 @@ func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (b
|
|||||||
} else if apiErr.Reason == "file_space_not_enough" {
|
} else if apiErr.Reason == "file_space_not_enough" {
|
||||||
// "file_space_not_enough" (8): Storage space is not enough
|
// "file_space_not_enough" (8): Storage space is not enough
|
||||||
return false, fserrors.FatalError(err)
|
return false, fserrors.FatalError(err)
|
||||||
|
} else if apiErr.Reason == "captcha_invalid" && apiErr.Code == 9 {
|
||||||
|
// "captcha_invalid" (9): Verification code is invalid
|
||||||
|
// This error occurred on the POST:/drive/v1/files endpoint
|
||||||
|
// when a zero-byte file was uploaded with an invalid captcha token
|
||||||
|
f.rst.captcha.Invalidate()
|
||||||
|
return true, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -452,13 +486,36 @@ func errorHandler(resp *http.Response) error {
|
|||||||
return errResponse
|
return errResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getClient makes an http client according to the options
|
||||||
|
func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||||
|
// Override few config settings and create a client
|
||||||
|
newCtx, ci := fs.AddConfig(ctx)
|
||||||
|
ci.UserAgent = opt.UserAgent
|
||||||
|
return fshttp.NewClient(newCtx)
|
||||||
|
}
|
||||||
|
|
||||||
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
// newClientWithPacer sets a new http/rest client with a pacer to Fs
|
||||||
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
func (f *Fs) newClientWithPacer(ctx context.Context) (err error) {
|
||||||
f.client, _, err = oauthutil.NewClient(ctx, f.name, f.m, oauthConfig)
|
var ts *oauthutil.TokenSource
|
||||||
|
f.client, ts, err = oauthutil.NewClientWithBaseClient(ctx, f.name, f.m, oauthConfig, getClient(ctx, &f.opt))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create oauth client: %w", err)
|
return fmt.Errorf("failed to create oauth client: %w", err)
|
||||||
}
|
}
|
||||||
f.rst = rest.NewClient(f.client).SetRoot(rootURL).SetErrorHandler(errorHandler)
|
token, err := ts.Token()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// parse user_id from oauth access token for later use
|
||||||
|
if parts := strings.Split(token.AccessToken, "."); len(parts) > 1 {
|
||||||
|
jsonStr, _ := base64.URLEncoding.DecodeString(parts[1] + "===")
|
||||||
|
info := struct {
|
||||||
|
UserID string `json:"sub,omitempty"`
|
||||||
|
}{}
|
||||||
|
if jsonErr := json.Unmarshal(jsonStr, &info); jsonErr == nil {
|
||||||
|
f.opt.UserID = info.UserID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.rst = newPikpakClient(f.client, &f.opt).SetCaptchaTokener(ctx, f.m)
|
||||||
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
f.pacer = fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -491,10 +548,19 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
|||||||
CanHaveEmptyDirectories: true, // can have empty directories
|
CanHaveEmptyDirectories: true, // can have empty directories
|
||||||
NoMultiThreading: true, // can't have multiple threads downloading
|
NoMultiThreading: true, // can't have multiple threads downloading
|
||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
f.deviceID = genDeviceID()
|
|
||||||
|
// new device id if necessary
|
||||||
|
if len(f.opt.DeviceID) != 32 {
|
||||||
|
f.opt.DeviceID = genDeviceID()
|
||||||
|
m.Set("device_id", f.opt.DeviceID)
|
||||||
|
fs.Infof(nil, "Using new device id %q", f.opt.DeviceID)
|
||||||
|
}
|
||||||
|
|
||||||
if err := f.newClientWithPacer(ctx); err != nil {
|
if err := f.newClientWithPacer(ctx); err != nil {
|
||||||
return nil, err
|
// re-authorize if necessary
|
||||||
|
if strings.Contains(err.Error(), "invalid_grant") {
|
||||||
|
return f, f.reAuthorize(ctx)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return f, nil
|
return f, nil
|
||||||
@ -1707,7 +1773,7 @@ func (o *Object) upload(ctx context.Context, in io.Reader, src fs.ObjectInfo, wi
|
|||||||
gcid, err := o.fs.getGcid(ctx, src)
|
gcid, err := o.fs.getGcid(ctx, src)
|
||||||
if err != nil || gcid == "" {
|
if err != nil || gcid == "" {
|
||||||
fs.Debugf(o, "calculating gcid: %v", err)
|
fs.Debugf(o, "calculating gcid: %v", err)
|
||||||
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
if srcObj := unWrapObjectInfo(src); srcObj != nil && srcObj.Fs().Features().IsLocal {
|
||||||
// No buffering; directly calculate gcid from source
|
// No buffering; directly calculate gcid from source
|
||||||
rc, err := srcObj.Open(ctx)
|
rc, err := srcObj.Open(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -449,7 +449,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// No root so return old f
|
// No root so return old f
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
_, err := tempF.newObjectWithLink(ctx, remote, nil)
|
_, err := tempF.newObject(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == fs.ErrorObjectNotFound {
|
if err == fs.ErrorObjectNotFound {
|
||||||
// File doesn't exist so return old f
|
// File doesn't exist so return old f
|
||||||
@ -487,7 +487,7 @@ func (f *Fs) CleanUp(ctx context.Context) error {
|
|||||||
// ErrorIsDir if possible without doing any extra work,
|
// ErrorIsDir if possible without doing any extra work,
|
||||||
// otherwise ErrorObjectNotFound.
|
// otherwise ErrorObjectNotFound.
|
||||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
return f.newObjectWithLink(ctx, remote, nil)
|
return f.newObject(ctx, remote)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, error) {
|
||||||
@ -516,35 +516,27 @@ func (f *Fs) getObjectLink(ctx context.Context, remote string) (*proton.Link, er
|
|||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForRemote reads the metadata from the remote
|
// readMetaDataForLink reads the metadata from the remote
|
||||||
func (f *Fs) readMetaDataForRemote(ctx context.Context, remote string, _link *proton.Link) (*proton.Link, *protonDriveAPI.FileSystemAttrs, error) {
|
func (f *Fs) readMetaDataForLink(ctx context.Context, link *proton.Link) (*protonDriveAPI.FileSystemAttrs, error) {
|
||||||
link, err := f.getObjectLink(ctx, remote)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
var fileSystemAttrs *protonDriveAPI.FileSystemAttrs
|
||||||
|
var err error
|
||||||
if err = f.pacer.Call(func() (bool, error) {
|
if err = f.pacer.Call(func() (bool, error) {
|
||||||
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
fileSystemAttrs, err = f.protonDrive.GetActiveRevisionAttrs(ctx, link)
|
||||||
return shouldRetry(ctx, err)
|
return shouldRetry(ctx, err)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return link, fileSystemAttrs, nil
|
return fileSystemAttrs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaData gets the metadata if it hasn't already been fetched
|
// Return an Object from a path and link
|
||||||
//
|
//
|
||||||
// it also sets the info
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error) {
|
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
||||||
if o.link != nil {
|
o := &Object{
|
||||||
return nil
|
fs: f,
|
||||||
}
|
remote: remote,
|
||||||
|
|
||||||
link, fileSystemAttrs, err := o.fs.readMetaDataForRemote(ctx, o.remote, link)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
o.id = link.LinkID
|
o.id = link.LinkID
|
||||||
@ -554,6 +546,10 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||||||
o.mimetype = link.MIMEType
|
o.mimetype = link.MIMEType
|
||||||
o.link = link
|
o.link = link
|
||||||
|
|
||||||
|
fileSystemAttrs, err := o.fs.readMetaDataForLink(ctx, link)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if fileSystemAttrs != nil {
|
if fileSystemAttrs != nil {
|
||||||
o.modTime = fileSystemAttrs.ModificationTime
|
o.modTime = fileSystemAttrs.ModificationTime
|
||||||
o.originalSize = &fileSystemAttrs.Size
|
o.originalSize = &fileSystemAttrs.Size
|
||||||
@ -561,23 +557,18 @@ func (o *Object) readMetaData(ctx context.Context, link *proton.Link) (err error
|
|||||||
o.digests = &fileSystemAttrs.Digests
|
o.digests = &fileSystemAttrs.Digests
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return o, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path only
|
||||||
//
|
//
|
||||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||||
func (f *Fs) newObjectWithLink(ctx context.Context, remote string, link *proton.Link) (fs.Object, error) {
|
func (f *Fs) newObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||||
o := &Object{
|
link, err := f.getObjectLink(ctx, remote)
|
||||||
fs: f,
|
|
||||||
remote: remote,
|
|
||||||
}
|
|
||||||
|
|
||||||
err := o.readMetaData(ctx, link)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return o, nil
|
return f.newObjectWithLink(ctx, remote, link)
|
||||||
}
|
}
|
||||||
|
|
||||||
// List the objects and directories in dir into entries. The
|
// List the objects and directories in dir into entries. The
|
||||||
|
157
backend/s3/s3.go
157
backend/s3/s3.go
@ -136,6 +136,9 @@ var providerOption = fs.Option{
|
|||||||
}, {
|
}, {
|
||||||
Value: "Netease",
|
Value: "Netease",
|
||||||
Help: "Netease Object Storage (NOS)",
|
Help: "Netease Object Storage (NOS)",
|
||||||
|
}, {
|
||||||
|
Value: "Outscale",
|
||||||
|
Help: "OUTSCALE Object Storage (OOS)",
|
||||||
}, {
|
}, {
|
||||||
Value: "Petabox",
|
Value: "Petabox",
|
||||||
Help: "Petabox Object Storage",
|
Help: "Petabox Object Storage",
|
||||||
@ -488,6 +491,26 @@ func init() {
|
|||||||
Value: "eu-south-2",
|
Value: "eu-south-2",
|
||||||
Help: "Logrono, Spain",
|
Help: "Logrono, Spain",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "region",
|
||||||
|
Help: "Region where your bucket will be created and your data stored.\n",
|
||||||
|
Provider: "Outscale",
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "eu-west-2",
|
||||||
|
Help: "Paris, France",
|
||||||
|
}, {
|
||||||
|
Value: "us-east-2",
|
||||||
|
Help: "New Jersey, USA",
|
||||||
|
}, {
|
||||||
|
Value: "us-west-1",
|
||||||
|
Help: "California, USA",
|
||||||
|
}, {
|
||||||
|
Value: "cloudgouv-eu-west-1",
|
||||||
|
Help: "SecNumCloud, Paris, France",
|
||||||
|
}, {
|
||||||
|
Value: "ap-northeast-1",
|
||||||
|
Help: "Tokyo, Japan",
|
||||||
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region where your bucket will be created and your data stored.\n",
|
Help: "Region where your bucket will be created and your data stored.\n",
|
||||||
@ -1344,6 +1367,26 @@ func init() {
|
|||||||
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
Value: "s3.ap-southeast-1.lyvecloud.seagate.com",
|
||||||
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
Help: "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
|
||||||
Provider: "LyveCloud",
|
Provider: "LyveCloud",
|
||||||
|
}, {
|
||||||
|
Value: "oos.eu-west-2.outscale.com",
|
||||||
|
Help: "Outscale EU West 2 (Paris)",
|
||||||
|
Provider: "Outscale",
|
||||||
|
}, {
|
||||||
|
Value: "oos.us-east-2.outscale.com",
|
||||||
|
Help: "Outscale US east 2 (New Jersey)",
|
||||||
|
Provider: "Outscale",
|
||||||
|
}, {
|
||||||
|
Value: "oos.us-west-1.outscale.com",
|
||||||
|
Help: "Outscale EU West 1 (California)",
|
||||||
|
Provider: "Outscale",
|
||||||
|
}, {
|
||||||
|
Value: "oos.cloudgouv-eu-west-1.outscale.com",
|
||||||
|
Help: "Outscale SecNumCloud (Paris)",
|
||||||
|
Provider: "Outscale",
|
||||||
|
}, {
|
||||||
|
Value: "oos.ap-northeast-1.outscale.com",
|
||||||
|
Help: "Outscale AP Northeast 1 (Japan)",
|
||||||
|
Provider: "Outscale",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.wasabisys.com",
|
Value: "s3.wasabisys.com",
|
||||||
Help: "Wasabi US East 1 (N. Virginia)",
|
Help: "Wasabi US East 1 (N. Virginia)",
|
||||||
@ -1798,7 +1841,7 @@ func init() {
|
|||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
Help: `Canned ACL used when creating buckets and storing or copying objects.
|
||||||
@ -2606,6 +2649,35 @@ knows about - please make a bug report if not.
|
|||||||
`,
|
`,
|
||||||
Default: fs.Tristate{},
|
Default: fs.Tristate{},
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "directory_bucket",
|
||||||
|
Help: strings.ReplaceAll(`Set to use AWS Directory Buckets
|
||||||
|
|
||||||
|
If you are using an AWS Directory Bucket then set this flag.
|
||||||
|
|
||||||
|
This will ensure no |Content-Md5| headers are sent and ensure |ETag|
|
||||||
|
headers are not interpreted as MD5 sums. |X-Amz-Meta-Md5chksum| will
|
||||||
|
be set on all objects whether single or multipart uploaded.
|
||||||
|
|
||||||
|
This also sets |no_check_bucket = true|.
|
||||||
|
|
||||||
|
Note that Directory Buckets do not support:
|
||||||
|
|
||||||
|
- Versioning
|
||||||
|
- |Content-Encoding: gzip|
|
||||||
|
|
||||||
|
Rclone limitations with Directory Buckets:
|
||||||
|
|
||||||
|
- rclone does not support creating Directory Buckets with |rclone mkdir|
|
||||||
|
- ... or removing them with |rclone rmdir| yet
|
||||||
|
- Directory Buckets do not appear when doing |rclone lsf| at the top level.
|
||||||
|
- Rclone can't remove auto created directories yet. In theory this should
|
||||||
|
work with |directory_markers = true| but it doesn't.
|
||||||
|
- Directories don't seem to appear in recursive (ListR) listings.
|
||||||
|
`, "|", "`"),
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
Provider: "AWS",
|
||||||
}, {
|
}, {
|
||||||
Name: "sdk_log_mode",
|
Name: "sdk_log_mode",
|
||||||
Help: strings.ReplaceAll(`Set to debug the SDK
|
Help: strings.ReplaceAll(`Set to debug the SDK
|
||||||
@ -2780,6 +2852,7 @@ type Options struct {
|
|||||||
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
UseMultipartUploads fs.Tristate `config:"use_multipart_uploads"`
|
||||||
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
UseUnsignedPayload fs.Tristate `config:"use_unsigned_payload"`
|
||||||
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
SDKLogMode sdkLogMode `config:"sdk_log_mode"`
|
||||||
|
DirectoryBucket bool `config:"directory_bucket"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
@ -3052,9 +3125,16 @@ func (s3logger) Logf(classification logging.Classification, format string, v ...
|
|||||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
|
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Client *s3.Client, err error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
var awsConfig aws.Config
|
var awsConfig aws.Config
|
||||||
|
// Make the default static auth
|
||||||
|
v := aws.Credentials{
|
||||||
|
AccessKeyID: opt.AccessKeyID,
|
||||||
|
SecretAccessKey: opt.SecretAccessKey,
|
||||||
|
SessionToken: opt.SessionToken,
|
||||||
|
}
|
||||||
|
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
||||||
|
|
||||||
// Try to fill in the config from the environment if env_auth=true
|
// Try to fill in the config from the environment if env_auth=true
|
||||||
if opt.EnvAuth {
|
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
|
||||||
configOpts := []func(*awsconfig.LoadOptions) error{}
|
configOpts := []func(*awsconfig.LoadOptions) error{}
|
||||||
// Set the name of the profile if supplied
|
// Set the name of the profile if supplied
|
||||||
if opt.Profile != "" {
|
if opt.Profile != "" {
|
||||||
@ -3079,13 +3159,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (s3Cli
|
|||||||
case opt.SecretAccessKey == "":
|
case opt.SecretAccessKey == "":
|
||||||
return nil, errors.New("secret_access_key not found")
|
return nil, errors.New("secret_access_key not found")
|
||||||
default:
|
default:
|
||||||
// Make the static auth
|
// static credentials are already set
|
||||||
v := aws.Credentials{
|
|
||||||
AccessKeyID: opt.AccessKeyID,
|
|
||||||
SecretAccessKey: opt.SecretAccessKey,
|
|
||||||
SessionToken: opt.SessionToken,
|
|
||||||
}
|
|
||||||
awsConfig.Credentials = &credentials.StaticCredentialsProvider{Value: v}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3215,7 +3289,7 @@ func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
|
|||||||
// API to get user region endpoint against the Access Key details: https://www.idrive.com/e2/guides/get_region_endpoint
|
// API to get user region endpoint against the Access Key details: https://www.idrive.com/e2/guides/get_region_endpoint
|
||||||
resp, err := client.Post("https://api.idrivee2.com/api/service/get_region_end_point",
|
resp, err := client.Post("https://api.idrivee2.com/api/service/get_region_end_point",
|
||||||
"application/json",
|
"application/json",
|
||||||
strings.NewReader(`{"access_key": "`+value+`"}`))
|
strings.NewReader(`{"access_key": `+strconv.Quote(value)+`}`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -3328,6 +3402,8 @@ func setQuirks(opt *Options) {
|
|||||||
urlEncodeListings = false
|
urlEncodeListings = false
|
||||||
useMultipartEtag = false // untested
|
useMultipartEtag = false // untested
|
||||||
useAlreadyExists = false // untested
|
useAlreadyExists = false // untested
|
||||||
|
case "Outscale":
|
||||||
|
virtualHostStyle = false
|
||||||
case "RackCorp":
|
case "RackCorp":
|
||||||
// No quirks
|
// No quirks
|
||||||
useMultipartEtag = false // untested
|
useMultipartEtag = false // untested
|
||||||
@ -3547,6 +3623,14 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// MD5 digest of their object data.
|
// MD5 digest of their object data.
|
||||||
f.etagIsNotMD5 = true
|
f.etagIsNotMD5 = true
|
||||||
}
|
}
|
||||||
|
if opt.DirectoryBucket {
|
||||||
|
// Objects uploaded to directory buckets appear to have random ETags
|
||||||
|
//
|
||||||
|
// This doesn't appear to be documented
|
||||||
|
f.etagIsNotMD5 = true
|
||||||
|
// The normal API doesn't work for creating directory buckets, so don't try
|
||||||
|
f.opt.NoCheckBucket = true
|
||||||
|
}
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
@ -4811,15 +4895,16 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
|
|
||||||
var commandHelp = []fs.CommandHelp{{
|
var commandHelp = []fs.CommandHelp{{
|
||||||
Name: "restore",
|
Name: "restore",
|
||||||
Short: "Restore objects from GLACIER to normal storage",
|
Short: "Restore objects from GLACIER or INTELLIGENT-TIERING archive tier",
|
||||||
Long: `This command can be used to restore one or more objects from GLACIER
|
Long: `This command can be used to restore one or more objects from GLACIER to normal storage
|
||||||
to normal storage.
|
or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.
|
||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
|
||||||
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
|
||||||
|
rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY
|
||||||
|
|
||||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||||
|
|
||||||
@ -4847,14 +4932,14 @@ if not.
|
|||||||
`,
|
`,
|
||||||
Opts: map[string]string{
|
Opts: map[string]string{
|
||||||
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
"priority": "Priority of restore: Standard|Expedited|Bulk",
|
||||||
"lifetime": "Lifetime of the active copy in days",
|
"lifetime": "Lifetime of the active copy in days, ignored for INTELLIGENT-TIERING storage",
|
||||||
"description": "The optional description for the job.",
|
"description": "The optional description for the job.",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "restore-status",
|
Name: "restore-status",
|
||||||
Short: "Show the restore status for objects being restored from GLACIER to normal storage",
|
Short: "Show the restore status for objects being restored from GLACIER or INTELLIGENT-TIERING storage",
|
||||||
Long: `This command can be used to show the status for objects being restored from GLACIER
|
Long: `This command can be used to show the status for objects being restored from GLACIER to normal storage
|
||||||
to normal storage.
|
or from INTELLIGENT-TIERING Archive Access / Deep Archive Access tier to the Frequent Access tier.
|
||||||
|
|
||||||
Usage Examples:
|
Usage Examples:
|
||||||
|
|
||||||
@ -4884,6 +4969,15 @@ It returns a list of status dictionaries.
|
|||||||
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
"RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
|
||||||
},
|
},
|
||||||
"StorageClass": "DEEP_ARCHIVE"
|
"StorageClass": "DEEP_ARCHIVE"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Remote": "test.gz",
|
||||||
|
"VersionID": null,
|
||||||
|
"RestoreStatus": {
|
||||||
|
"IsRestoreInProgress": true,
|
||||||
|
"RestoreExpiryDate": "null"
|
||||||
|
},
|
||||||
|
"StorageClass": "INTELLIGENT_TIERING"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
`,
|
`,
|
||||||
@ -5007,11 +5101,11 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
RestoreRequest: &types.RestoreRequest{},
|
RestoreRequest: &types.RestoreRequest{},
|
||||||
}
|
}
|
||||||
if lifetime := opt["lifetime"]; lifetime != "" {
|
if lifetime := opt["lifetime"]; lifetime != "" {
|
||||||
ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
|
ilifetime, err := strconv.ParseInt(lifetime, 10, 32)
|
||||||
ilifetime32 := int32(ilifetime)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("bad lifetime: %w", err)
|
return nil, fmt.Errorf("bad lifetime: %w", err)
|
||||||
}
|
}
|
||||||
|
ilifetime32 := int32(ilifetime)
|
||||||
req.RestoreRequest.Days = &ilifetime32
|
req.RestoreRequest.Days = &ilifetime32
|
||||||
}
|
}
|
||||||
if priority := opt["priority"]; priority != "" {
|
if priority := opt["priority"]; priority != "" {
|
||||||
@ -5046,12 +5140,15 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
st.Status = "Not an S3 object"
|
st.Status = "Not an S3 object"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE") {
|
if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE" && *o.storageClass != "INTELLIGENT_TIERING") {
|
||||||
st.Status = "Not GLACIER or DEEP_ARCHIVE storage class"
|
st.Status = "Not GLACIER or DEEP_ARCHIVE or INTELLIGENT_TIERING storage class"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bucket, bucketPath := o.split()
|
bucket, bucketPath := o.split()
|
||||||
reqCopy := req
|
reqCopy := req
|
||||||
|
if *o.storageClass == "INTELLIGENT_TIERING" {
|
||||||
|
reqCopy.RestoreRequest.Days = nil
|
||||||
|
}
|
||||||
reqCopy.Bucket = &bucket
|
reqCopy.Bucket = &bucket
|
||||||
reqCopy.Key = &bucketPath
|
reqCopy.Key = &bucketPath
|
||||||
reqCopy.VersionId = o.versionID
|
reqCopy.VersionId = o.versionID
|
||||||
@ -5732,7 +5829,7 @@ func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options
|
|||||||
ContentEncoding: header("Content-Encoding"),
|
ContentEncoding: header("Content-Encoding"),
|
||||||
ContentLanguage: header("Content-Language"),
|
ContentLanguage: header("Content-Language"),
|
||||||
ContentType: header("Content-Type"),
|
ContentType: header("Content-Type"),
|
||||||
StorageClass: types.StorageClass(*header("X-Amz-Storage-Class")),
|
StorageClass: types.StorageClass(deref(header("X-Amz-Storage-Class"))),
|
||||||
}
|
}
|
||||||
o.setMetaData(&head)
|
o.setMetaData(&head)
|
||||||
return resp.Body, err
|
return resp.Body, err
|
||||||
@ -5975,7 +6072,13 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||||||
if do, ok := reader.(pool.DelayAccountinger); ok {
|
if do, ok := reader.(pool.DelayAccountinger); ok {
|
||||||
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
// To figure out this number, do a transfer and if the accounted size is 0 or a
|
||||||
// multiple of what it should be, increase or decrease this number.
|
// multiple of what it should be, increase or decrease this number.
|
||||||
do.DelayAccounting(3)
|
//
|
||||||
|
// For transfers over https the SDK does not sign the body whereas over http it does
|
||||||
|
if len(w.f.opt.Endpoint) >= 5 && strings.EqualFold(w.f.opt.Endpoint[:5], "http:") {
|
||||||
|
do.DelayAccounting(3)
|
||||||
|
} else {
|
||||||
|
do.DelayAccounting(2)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// create checksum of buffer for integrity checking
|
// create checksum of buffer for integrity checking
|
||||||
@ -6009,6 +6112,10 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
|||||||
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
SSECustomerKey: w.multiPartUploadInput.SSECustomerKey,
|
||||||
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
SSECustomerKeyMD5: w.multiPartUploadInput.SSECustomerKeyMD5,
|
||||||
}
|
}
|
||||||
|
if w.f.opt.DirectoryBucket {
|
||||||
|
// Directory buckets do not support "Content-Md5" header
|
||||||
|
uploadPartReq.ContentMD5 = nil
|
||||||
|
}
|
||||||
var uout *s3.UploadPartOutput
|
var uout *s3.UploadPartOutput
|
||||||
err = w.f.pacer.Call(func() (bool, error) {
|
err = w.f.pacer.Call(func() (bool, error) {
|
||||||
// rewind the reader on retry and after reading md5
|
// rewind the reader on retry and after reading md5
|
||||||
@ -6285,7 +6392,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
|
||||||
// Set the md5sum as metadata on the object if
|
// Set the md5sum as metadata on the object if
|
||||||
// - a multipart upload
|
// - a multipart upload
|
||||||
// - the Etag is not an MD5, eg when using SSE/SSE-C
|
// - the Etag is not an MD5, eg when using SSE/SSE-C or directory buckets
|
||||||
// provided checksums aren't disabled
|
// provided checksums aren't disabled
|
||||||
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
ui.req.Metadata[metaMD5Hash] = md5sumBase64
|
||||||
}
|
}
|
||||||
@ -6300,7 +6407,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
|||||||
if size >= 0 {
|
if size >= 0 {
|
||||||
ui.req.ContentLength = &size
|
ui.req.ContentLength = &size
|
||||||
}
|
}
|
||||||
if md5sumBase64 != "" {
|
if md5sumBase64 != "" && !o.fs.opt.DirectoryBucket {
|
||||||
ui.req.ContentMD5 = &md5sumBase64
|
ui.req.ContentMD5 = &md5sumBase64
|
||||||
}
|
}
|
||||||
if o.fs.opt.RequesterPays {
|
if o.fs.opt.RequesterPays {
|
||||||
|
@ -883,7 +883,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
|
|
||||||
// About gets quota information
|
// About gets quota information
|
||||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||||
var total, objects int64
|
var used, objects, total int64
|
||||||
if f.rootContainer != "" {
|
if f.rootContainer != "" {
|
||||||
var container swift.Container
|
var container swift.Container
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -893,8 +893,9 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("container info failed: %w", err)
|
return nil, fmt.Errorf("container info failed: %w", err)
|
||||||
}
|
}
|
||||||
total = container.Bytes
|
used = container.Bytes
|
||||||
objects = container.Count
|
objects = container.Count
|
||||||
|
total = container.QuotaBytes
|
||||||
} else {
|
} else {
|
||||||
var containers []swift.Container
|
var containers []swift.Container
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -905,14 +906,19 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
return nil, fmt.Errorf("container listing failed: %w", err)
|
return nil, fmt.Errorf("container listing failed: %w", err)
|
||||||
}
|
}
|
||||||
for _, c := range containers {
|
for _, c := range containers {
|
||||||
total += c.Bytes
|
used += c.Bytes
|
||||||
objects += c.Count
|
objects += c.Count
|
||||||
|
total += c.QuotaBytes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Used: fs.NewUsageValue(total), // bytes in use
|
Used: fs.NewUsageValue(used), // bytes in use
|
||||||
Objects: fs.NewUsageValue(objects), // objects in use
|
Objects: fs.NewUsageValue(objects), // objects in use
|
||||||
}
|
}
|
||||||
|
if total > 0 {
|
||||||
|
usage.Total = fs.NewUsageValue(total)
|
||||||
|
usage.Free = fs.NewUsageValue(total - used)
|
||||||
|
}
|
||||||
return usage, nil
|
return usage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1410,14 +1416,6 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// min returns the smallest of x, y
|
|
||||||
func min(x, y int64) int64 {
|
|
||||||
if x < y {
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
return y
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the segments for a large object
|
// Get the segments for a large object
|
||||||
//
|
//
|
||||||
// It returns the names of the segments and the container that they live in
|
// It returns the names of the segments and the container that they live in
|
||||||
|
@ -14,21 +14,30 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error, sleepTime *time.Duration, wasLocked *bool) (bool, error) {
|
||||||
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
// Not found. Can be returned by NextCloud when merging chunks of an upload.
|
||||||
if resp != nil && resp.StatusCode == 404 {
|
if resp != nil && resp.StatusCode == 404 {
|
||||||
|
if *wasLocked {
|
||||||
|
// Assume a 404 error after we've received a 423 error is actually a success
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// 423 LOCKED
|
// 423 LOCKED
|
||||||
if resp != nil && resp.StatusCode == 423 {
|
if resp != nil && resp.StatusCode == 423 {
|
||||||
return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
*wasLocked = true
|
||||||
|
fs.Logf(f, "Sleeping for %v to wait for chunks to be merged after 423 error", *sleepTime)
|
||||||
|
time.Sleep(*sleepTime)
|
||||||
|
*sleepTime *= 2
|
||||||
|
return true, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
@ -180,9 +189,11 @@ func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs
|
|||||||
}
|
}
|
||||||
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
opts.ExtraHeaders = o.extraHeaders(ctx, src)
|
||||||
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
opts.ExtraHeaders["Destination"] = destinationURL.String()
|
||||||
|
sleepTime := 5 * time.Second
|
||||||
|
wasLocked := false
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||||
return o.fs.shouldRetryChunkMerge(ctx, resp, err)
|
return o.fs.shouldRetryChunkMerge(ctx, resp, err, &sleepTime, &wasLocked)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
|
||||||
|
@ -7,7 +7,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
@ -26,6 +25,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/oauthutil"
|
"github.com/rclone/rclone/lib/oauthutil"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
|
"github.com/rclone/rclone/lib/random"
|
||||||
"github.com/rclone/rclone/lib/readers"
|
"github.com/rclone/rclone/lib/readers"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@ -39,6 +39,8 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second // may needs to be increased, testing needed
|
maxSleep = 2 * time.Second // may needs to be increased, testing needed
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
|
|
||||||
|
userAgentTemplae = `Yandex.Disk {"os":"windows","dtype":"ydisk3","vsn":"3.2.37.4977","id":"6BD01244C7A94456BBCEE7EEC990AEAD","id2":"0F370CD40C594A4783BC839C846B999C","session_id":"%s"}`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@ -79,15 +81,22 @@ func init() {
|
|||||||
// it doesn't seem worth making an exception for this
|
// it doesn't seem worth making an exception for this
|
||||||
Default: (encoder.Display |
|
Default: (encoder.Display |
|
||||||
encoder.EncodeInvalidUtf8),
|
encoder.EncodeInvalidUtf8),
|
||||||
|
}, {
|
||||||
|
Name: "spoof_ua",
|
||||||
|
Help: "Set the user agent to match an official version of the yandex disk client. May help with upload performance.",
|
||||||
|
Default: true,
|
||||||
|
Advanced: true,
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
}}...),
|
}}...),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Token string `config:"token"`
|
Token string `config:"token"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
|
SpoofUserAgent bool `config:"spoof_ua"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote yandex
|
// Fs represents a remote yandex
|
||||||
@ -254,6 +263,12 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, ci := fs.AddConfig(ctx)
|
||||||
|
if fs.ConfigOptionsInfo.Get("user_agent").IsDefault() && opt.SpoofUserAgent {
|
||||||
|
randomSessionID, _ := random.Password(128)
|
||||||
|
ci.UserAgent = fmt.Sprintf(userAgentTemplae, randomSessionID)
|
||||||
|
}
|
||||||
|
|
||||||
token, err := oauthutil.GetToken(name, m)
|
token, err := oauthutil.GetToken(name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
|
return nil, fmt.Errorf("couldn't read OAuth token: %w", err)
|
||||||
@ -267,14 +282,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
|
return nil, fmt.Errorf("couldn't save OAuth token: %w", err)
|
||||||
}
|
}
|
||||||
log.Printf("Automatically upgraded OAuth config.")
|
fs.Logf(nil, "Automatically upgraded OAuth config.")
|
||||||
}
|
}
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to configure Yandex: %w", err)
|
return nil, fmt.Errorf("failed to configure Yandex: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ci := fs.GetConfig(ctx)
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -12,7 +14,12 @@ type Time time.Time
|
|||||||
|
|
||||||
// UnmarshalJSON turns JSON into a Time
|
// UnmarshalJSON turns JSON into a Time
|
||||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||||
millis, err := strconv.ParseInt(string(data), 10, 64)
|
s := string(data)
|
||||||
|
// If the time is a quoted string, strip quotes
|
||||||
|
if len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"' {
|
||||||
|
s = s[1 : len(s)-1]
|
||||||
|
}
|
||||||
|
millis, err := strconv.ParseInt(s, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -20,8 +27,8 @@ func (t *Time) UnmarshalJSON(data []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// User is a Zoho user we are only interested in the ZUID here
|
// OAuthUser is a Zoho user we are only interested in the ZUID here
|
||||||
type User struct {
|
type OAuthUser struct {
|
||||||
FirstName string `json:"First_Name"`
|
FirstName string `json:"First_Name"`
|
||||||
Email string `json:"Email"`
|
Email string `json:"Email"`
|
||||||
LastName string `json:"Last_Name"`
|
LastName string `json:"Last_Name"`
|
||||||
@ -29,12 +36,41 @@ type User struct {
|
|||||||
ZUID int64 `json:"ZUID"`
|
ZUID int64 `json:"ZUID"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TeamWorkspace represents a Zoho Team or workspace
|
// UserInfoResponse is returned by the user info API.
|
||||||
|
type UserInfoResponse struct {
|
||||||
|
Data struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Type string `json:"users"`
|
||||||
|
Attributes struct {
|
||||||
|
EmailID string `json:"email_id"`
|
||||||
|
Edition string `json:"edition"`
|
||||||
|
} `json:"attributes"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrivateSpaceInfo gives basic information about a users private folder.
|
||||||
|
type PrivateSpaceInfo struct {
|
||||||
|
Data struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Type string `json:"string"`
|
||||||
|
} `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CurrentTeamInfo gives information about the current user in a team.
|
||||||
|
type CurrentTeamInfo struct {
|
||||||
|
Data struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Type string `json:"string"`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TeamWorkspace represents a Zoho Team, Workspace or Private Space
|
||||||
// It's actually a VERY large json object that differs between
|
// It's actually a VERY large json object that differs between
|
||||||
// Team and Workspace but we are only interested in some fields
|
// Team and Workspace and Private Space but we are only interested in some fields
|
||||||
// that both of them have so we can use the same struct for both
|
// that all of them have so we can use the same struct.
|
||||||
type TeamWorkspace struct {
|
type TeamWorkspace struct {
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
|
Type string `json:"type"`
|
||||||
Attributes struct {
|
Attributes struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Created Time `json:"created_time_in_millisecond"`
|
Created Time `json:"created_time_in_millisecond"`
|
||||||
@ -42,7 +78,8 @@ type TeamWorkspace struct {
|
|||||||
} `json:"attributes"`
|
} `json:"attributes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TeamWorkspaceResponse is the response by the list teams api
|
// TeamWorkspaceResponse is the response by the list teams API, list workspace API
|
||||||
|
// or list team private spaces API.
|
||||||
type TeamWorkspaceResponse struct {
|
type TeamWorkspaceResponse struct {
|
||||||
TeamWorkspace []TeamWorkspace `json:"data"`
|
TeamWorkspace []TeamWorkspace `json:"data"`
|
||||||
}
|
}
|
||||||
@ -84,6 +121,73 @@ type ItemList struct {
|
|||||||
Items []Item `json:"data"`
|
Items []Item `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UploadFileInfo is what the FileInfo field in the UnloadInfo struct decodes to
|
||||||
|
type UploadFileInfo struct {
|
||||||
|
OrgID string `json:"ORG_ID"`
|
||||||
|
ResourceID string `json:"RESOURCE_ID"`
|
||||||
|
LibraryID string `json:"LIBRARY_ID"`
|
||||||
|
Md5Checksum string `json:"MD5_CHECKSUM"`
|
||||||
|
ParentModelID string `json:"PARENT_MODEL_ID"`
|
||||||
|
ParentID string `json:"PARENT_ID"`
|
||||||
|
ResourceType int `json:"RESOURCE_TYPE"`
|
||||||
|
WmsSentTime string `json:"WMS_SENT_TIME"`
|
||||||
|
TabID string `json:"TAB_ID"`
|
||||||
|
Owner string `json:"OWNER"`
|
||||||
|
ResourceGroup string `json:"RESOURCE_GROUP"`
|
||||||
|
ParentModelName string `json:"PARENT_MODEL_NAME"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Operation string `json:"OPERATION"`
|
||||||
|
EventID string `json:"EVENT_ID"`
|
||||||
|
AuditInfo struct {
|
||||||
|
VersionInfo struct {
|
||||||
|
VersionAuthors []string `json:"versionAuthors"`
|
||||||
|
VersionID string `json:"versionId"`
|
||||||
|
IsMinorVersion bool `json:"isMinorVersion"`
|
||||||
|
VersionTime Time `json:"versionTime"`
|
||||||
|
VersionAuthorZuid []string `json:"versionAuthorZuid"`
|
||||||
|
VersionNotes string `json:"versionNotes"`
|
||||||
|
VersionNumber string `json:"versionNumber"`
|
||||||
|
} `json:"versionInfo"`
|
||||||
|
Resource struct {
|
||||||
|
Owner string `json:"owner"`
|
||||||
|
CreatedTime Time `json:"created_time"`
|
||||||
|
Creator string `json:"creator"`
|
||||||
|
ServiceType int `json:"service_type"`
|
||||||
|
Extension string `json:"extension"`
|
||||||
|
StatusChangeTime Time `json:"status_change_time"`
|
||||||
|
ResourceType int `json:"resource_type"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
} `json:"resource"`
|
||||||
|
ParentInfo struct {
|
||||||
|
ParentName string `json:"parentName"`
|
||||||
|
ParentID string `json:"parentId"`
|
||||||
|
ParentType int `json:"parentType"`
|
||||||
|
} `json:"parentInfo"`
|
||||||
|
LibraryInfo struct {
|
||||||
|
LibraryName string `json:"libraryName"`
|
||||||
|
LibraryID string `json:"libraryId"`
|
||||||
|
LibraryType int `json:"libraryType"`
|
||||||
|
} `json:"libraryInfo"`
|
||||||
|
UpdateType string `json:"updateType"`
|
||||||
|
StatusCode string `json:"statusCode"`
|
||||||
|
} `json:"AUDIT_INFO"`
|
||||||
|
ZUID int64 `json:"ZUID"`
|
||||||
|
TeamID string `json:"TEAM_ID"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetModTime fetches the modification time of the upload
|
||||||
|
//
|
||||||
|
// This tries a few places and if all fails returns the current time
|
||||||
|
func (ufi *UploadFileInfo) GetModTime() Time {
|
||||||
|
if t := ufi.AuditInfo.Resource.CreatedTime; !time.Time(t).IsZero() {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
if t := ufi.AuditInfo.Resource.StatusChangeTime; !time.Time(t).IsZero() {
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
return Time(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
// UploadInfo is a simplified and slightly different version of
|
// UploadInfo is a simplified and slightly different version of
|
||||||
// the Item struct only used in the response to uploads
|
// the Item struct only used in the response to uploads
|
||||||
type UploadInfo struct {
|
type UploadInfo struct {
|
||||||
@ -91,14 +195,53 @@ type UploadInfo struct {
|
|||||||
ParentID string `json:"parent_id"`
|
ParentID string `json:"parent_id"`
|
||||||
FileName string `json:"notes.txt"`
|
FileName string `json:"notes.txt"`
|
||||||
RessourceID string `json:"resource_id"`
|
RessourceID string `json:"resource_id"`
|
||||||
|
Permalink string `json:"Permalink"`
|
||||||
|
FileInfo string `json:"File INFO"` // JSON encoded UploadFileInfo
|
||||||
} `json:"attributes"`
|
} `json:"attributes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetUploadFileInfo decodes the embedded FileInfo
|
||||||
|
func (ui *UploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||||
|
var ufi UploadFileInfo
|
||||||
|
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||||
|
}
|
||||||
|
return &ufi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LargeUploadInfo is once again a slightly different version of UploadInfo
|
||||||
|
// returned as part of an LargeUploadResponse by the large file upload API.
|
||||||
|
type LargeUploadInfo struct {
|
||||||
|
Attributes struct {
|
||||||
|
ParentID string `json:"parent_id"`
|
||||||
|
FileName string `json:"file_name"`
|
||||||
|
RessourceID string `json:"resource_id"`
|
||||||
|
FileInfo string `json:"file_info"`
|
||||||
|
} `json:"attributes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUploadFileInfo decodes the embedded FileInfo
|
||||||
|
func (ui *LargeUploadInfo) GetUploadFileInfo() (*UploadFileInfo, error) {
|
||||||
|
var ufi UploadFileInfo
|
||||||
|
err := json.Unmarshal([]byte(ui.Attributes.FileInfo), &ufi)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to decode FileInfo: %w", err)
|
||||||
|
}
|
||||||
|
return &ufi, nil
|
||||||
|
}
|
||||||
|
|
||||||
// UploadResponse is the response to a file Upload
|
// UploadResponse is the response to a file Upload
|
||||||
type UploadResponse struct {
|
type UploadResponse struct {
|
||||||
Uploads []UploadInfo `json:"data"`
|
Uploads []UploadInfo `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LargeUploadResponse is the response returned by large file upload API.
|
||||||
|
type LargeUploadResponse struct {
|
||||||
|
Uploads []LargeUploadInfo `json:"data"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
// WriteMetadataRequest is used to write metadata for a
|
// WriteMetadataRequest is used to write metadata for a
|
||||||
// single item
|
// single item
|
||||||
type WriteMetadataRequest struct {
|
type WriteMetadataRequest struct {
|
||||||
|
@ -14,6 +14,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
"github.com/rclone/rclone/lib/encoder"
|
||||||
"github.com/rclone/rclone/lib/pacer"
|
"github.com/rclone/rclone/lib/pacer"
|
||||||
"github.com/rclone/rclone/lib/random"
|
"github.com/rclone/rclone/lib/random"
|
||||||
@ -36,9 +37,11 @@ const (
|
|||||||
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
rcloneClientID = "1000.46MXF275FM2XV7QCHX5A7K3LGME66B"
|
||||||
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
rcloneEncryptedClientSecret = "U-2gxclZQBcOG9NPhjiXAhj-f0uQ137D0zar8YyNHXHkQZlTeSpIOQfmCb4oSpvosJp_SJLXmLLeUA"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 60 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
configRootID = "root_folder_id"
|
configRootID = "root_folder_id"
|
||||||
|
|
||||||
|
defaultUploadCutoff = 10 * 1024 * 1024 // 10 MiB
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@ -50,6 +53,7 @@ var (
|
|||||||
"WorkDrive.team.READ",
|
"WorkDrive.team.READ",
|
||||||
"WorkDrive.workspace.READ",
|
"WorkDrive.workspace.READ",
|
||||||
"WorkDrive.files.ALL",
|
"WorkDrive.files.ALL",
|
||||||
|
"ZohoFiles.files.ALL",
|
||||||
},
|
},
|
||||||
Endpoint: oauth2.Endpoint{
|
Endpoint: oauth2.Endpoint{
|
||||||
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
AuthURL: "https://accounts.zoho.eu/oauth/v2/auth",
|
||||||
@ -61,6 +65,8 @@ var (
|
|||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
}
|
}
|
||||||
rootURL = "https://workdrive.zoho.eu/api/v1"
|
rootURL = "https://workdrive.zoho.eu/api/v1"
|
||||||
|
downloadURL = "https://download.zoho.eu/v1/workdrive"
|
||||||
|
uploadURL = "http://upload.zoho.eu/workdrive-api/v1/"
|
||||||
accountsURL = "https://accounts.zoho.eu"
|
accountsURL = "https://accounts.zoho.eu"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -79,7 +85,7 @@ func init() {
|
|||||||
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
getSrvs := func() (authSrv, apiSrv *rest.Client, err error) {
|
||||||
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
oAuthClient, _, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("failed to load oAuthClient: %w", err)
|
return nil, nil, fmt.Errorf("failed to load OAuth client: %w", err)
|
||||||
}
|
}
|
||||||
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
authSrv = rest.NewClient(oAuthClient).SetRoot(accountsURL)
|
||||||
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
apiSrv = rest.NewClient(oAuthClient).SetRoot(rootURL)
|
||||||
@ -88,12 +94,12 @@ func init() {
|
|||||||
|
|
||||||
switch config.State {
|
switch config.State {
|
||||||
case "":
|
case "":
|
||||||
return oauthutil.ConfigOut("teams", &oauthutil.Options{
|
return oauthutil.ConfigOut("type", &oauthutil.Options{
|
||||||
OAuth2Config: oauthConfig,
|
OAuth2Config: oauthConfig,
|
||||||
// No refresh token unless ApprovalForce is set
|
// No refresh token unless ApprovalForce is set
|
||||||
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
OAuth2Opts: []oauth2.AuthCodeOption{oauth2.ApprovalForce},
|
||||||
})
|
})
|
||||||
case "teams":
|
case "type":
|
||||||
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
// We need to rewrite the token type to "Zoho-oauthtoken" because Zoho wants
|
||||||
// it's own custom type
|
// it's own custom type
|
||||||
token, err := oauthutil.GetToken(name, m)
|
token, err := oauthutil.GetToken(name, m)
|
||||||
@ -108,24 +114,43 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
authSrv, apiSrv, err := getSrvs()
|
_, apiSrv, err := getSrvs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the user Info
|
userInfo, err := getUserInfo(ctx, apiSrv)
|
||||||
opts := rest.Opts{
|
if err != nil {
|
||||||
Method: "GET",
|
return nil, err
|
||||||
Path: "/oauth/user/info",
|
|
||||||
}
|
}
|
||||||
var user api.User
|
// If personal Edition only one private Space is available. Directly configure that.
|
||||||
_, err = authSrv.CallJSON(ctx, &opts, nil, &user)
|
if userInfo.Data.Attributes.Edition == "PERSONAL" {
|
||||||
|
return fs.ConfigResult("private_space", userInfo.Data.ID)
|
||||||
|
}
|
||||||
|
// Otherwise go to team selection
|
||||||
|
return fs.ConfigResult("team", userInfo.Data.ID)
|
||||||
|
case "private_space":
|
||||||
|
_, apiSrv, err := getSrvs()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
workspaces, err := getPrivateSpaces(ctx, config.Result, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||||
|
workspace := workspaces[i]
|
||||||
|
return workspace.ID, workspace.Name
|
||||||
|
})
|
||||||
|
case "team":
|
||||||
|
_, apiSrv, err := getSrvs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the teams
|
// Get the teams
|
||||||
teams, err := listTeams(ctx, user.ZUID, apiSrv)
|
teams, err := listTeams(ctx, config.Result, apiSrv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -143,9 +168,19 @@ func init() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
currentTeamInfo, err := getCurrentTeamInfo(ctx, teamID, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
privateSpaces, err := getPrivateSpaces(ctx, currentTeamInfo.Data.ID, apiSrv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
workspaces = append(workspaces, privateSpaces...)
|
||||||
|
|
||||||
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
return fs.ConfigChoose("workspace_end", "config_workspace", "Workspace ID", len(workspaces), func(i int) (string, string) {
|
||||||
workspace := workspaces[i]
|
workspace := workspaces[i]
|
||||||
return workspace.ID, workspace.Attributes.Name
|
return workspace.ID, workspace.Name
|
||||||
})
|
})
|
||||||
case "workspace_end":
|
case "workspace_end":
|
||||||
workspaceID := config.Result
|
workspaceID := config.Result
|
||||||
@ -179,7 +214,13 @@ browser.`,
|
|||||||
}, {
|
}, {
|
||||||
Value: "com.au",
|
Value: "com.au",
|
||||||
Help: "Australia",
|
Help: "Australia",
|
||||||
}}}, {
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "upload_cutoff",
|
||||||
|
Help: "Cutoff for switching to large file upload api (>= 10 MiB).",
|
||||||
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
@ -193,6 +234,7 @@ browser.`,
|
|||||||
|
|
||||||
// Options defines the configuration for this backend
|
// Options defines the configuration for this backend
|
||||||
type Options struct {
|
type Options struct {
|
||||||
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
RootFolderID string `config:"root_folder_id"`
|
RootFolderID string `config:"root_folder_id"`
|
||||||
Region string `config:"region"`
|
Region string `config:"region"`
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
@ -200,13 +242,15 @@ type Options struct {
|
|||||||
|
|
||||||
// Fs represents a remote workdrive
|
// Fs represents a remote workdrive
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
opt Options // parsed options
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
downloadsrv *rest.Client // the connection to the download server
|
||||||
pacer *fs.Pacer // pacer for API calls
|
uploadsrv *rest.Client // the connection to the upload server
|
||||||
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
|
pacer *fs.Pacer // pacer for API calls
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a Zoho WorkDrive object
|
// Object describes a Zoho WorkDrive object
|
||||||
@ -229,6 +273,8 @@ func setupRegion(m configmap.Mapper) error {
|
|||||||
return errors.New("no region set")
|
return errors.New("no region set")
|
||||||
}
|
}
|
||||||
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
rootURL = fmt.Sprintf("https://workdrive.zoho.%s/api/v1", region)
|
||||||
|
downloadURL = fmt.Sprintf("https://download.zoho.%s/v1/workdrive", region)
|
||||||
|
uploadURL = fmt.Sprintf("https://upload.zoho.%s/workdrive-api/v1", region)
|
||||||
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
accountsURL = fmt.Sprintf("https://accounts.zoho.%s", region)
|
||||||
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
oauthConfig.Endpoint.AuthURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/auth", region)
|
||||||
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
oauthConfig.Endpoint.TokenURL = fmt.Sprintf("https://accounts.zoho.%s/oauth/v2/token", region)
|
||||||
@ -237,11 +283,63 @@ func setupRegion(m configmap.Mapper) error {
|
|||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
type workspaceInfo struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUserInfo(ctx context.Context, srv *rest.Client) (*api.UserInfoResponse, error) {
|
||||||
|
var userInfo api.UserInfoResponse
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/users/me",
|
||||||
|
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||||
|
}
|
||||||
|
_, err := srv.CallJSON(ctx, &opts, nil, &userInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &userInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCurrentTeamInfo(ctx context.Context, teamID string, srv *rest.Client) (*api.CurrentTeamInfo, error) {
|
||||||
|
var currentTeamInfo api.CurrentTeamInfo
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/teams/" + teamID + "/currentuser",
|
||||||
|
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||||
|
}
|
||||||
|
_, err := srv.CallJSON(ctx, &opts, nil, ¤tTeamInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ¤tTeamInfo, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPrivateSpaces(ctx context.Context, teamUserID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||||
|
var privateSpaceListResponse api.TeamWorkspaceResponse
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "GET",
|
||||||
|
Path: "/users/" + teamUserID + "/privatespace",
|
||||||
|
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||||
|
}
|
||||||
|
_, err := srv.CallJSON(ctx, &opts, nil, &privateSpaceListResponse)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
workspaceList := make([]workspaceInfo, 0, len(privateSpaceListResponse.TeamWorkspace))
|
||||||
|
for _, workspace := range privateSpaceListResponse.TeamWorkspace {
|
||||||
|
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: "My Space"})
|
||||||
|
}
|
||||||
|
return workspaceList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func listTeams(ctx context.Context, zuid string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
||||||
var teamList api.TeamWorkspaceResponse
|
var teamList api.TeamWorkspaceResponse
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/users/" + strconv.FormatInt(uid, 10) + "/teams",
|
Path: "/users/" + zuid + "/teams",
|
||||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||||
}
|
}
|
||||||
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
_, err := srv.CallJSON(ctx, &opts, nil, &teamList)
|
||||||
@ -251,18 +349,24 @@ func listTeams(ctx context.Context, uid int64, srv *rest.Client) ([]api.TeamWork
|
|||||||
return teamList.TeamWorkspace, nil
|
return teamList.TeamWorkspace, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]api.TeamWorkspace, error) {
|
func listWorkspaces(ctx context.Context, teamID string, srv *rest.Client) ([]workspaceInfo, error) {
|
||||||
var workspaceList api.TeamWorkspaceResponse
|
var workspaceListResponse api.TeamWorkspaceResponse
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/teams/" + teamID + "/workspaces",
|
Path: "/teams/" + teamID + "/workspaces",
|
||||||
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
ExtraHeaders: map[string]string{"Accept": "application/vnd.api+json"},
|
||||||
}
|
}
|
||||||
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceList)
|
_, err := srv.CallJSON(ctx, &opts, nil, &workspaceListResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return workspaceList.TeamWorkspace, nil
|
|
||||||
|
workspaceList := make([]workspaceInfo, 0, len(workspaceListResponse.TeamWorkspace))
|
||||||
|
for _, workspace := range workspaceListResponse.TeamWorkspace {
|
||||||
|
workspaceList = append(workspaceList, workspaceInfo{ID: workspace.ID, Name: workspace.Attributes.Name})
|
||||||
|
}
|
||||||
|
|
||||||
|
return workspaceList, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------------------------------------------
|
// --------------------------------------------------------------
|
||||||
@ -285,13 +389,20 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
}
|
}
|
||||||
authRetry := false
|
authRetry := false
|
||||||
|
|
||||||
|
// Bail out early if we are missing OAuth Scopes.
|
||||||
|
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Status, "INVALID_OAUTHSCOPE") {
|
||||||
|
fs.Errorf(nil, "zoho: missing OAuth Scope. Run rclone config reconnect to fix this issue.")
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
||||||
authRetry = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
if resp != nil && resp.StatusCode == 429 {
|
if resp != nil && resp.StatusCode == 429 {
|
||||||
fs.Errorf(nil, "zoho: rate limit error received, sleeping for 60s: %v", err)
|
err = pacer.RetryAfterError(err, 60*time.Second)
|
||||||
time.Sleep(60 * time.Second)
|
fs.Debugf(nil, "Too many requests. Trying again in %d seconds.", 60)
|
||||||
|
return true, err
|
||||||
}
|
}
|
||||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
@ -389,6 +500,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if err := configstruct.Set(m, opt); err != nil {
|
if err := configstruct.Set(m, opt); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.UploadCutoff < defaultUploadCutoff {
|
||||||
|
return nil, fmt.Errorf("zoho: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(defaultUploadCutoff))
|
||||||
|
}
|
||||||
|
|
||||||
err := setupRegion(m)
|
err := setupRegion(m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -401,11 +517,13 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
downloadsrv: rest.NewClient(oAuthClient).SetRoot(downloadURL),
|
||||||
|
uploadsrv: rest.NewClient(oAuthClient).SetRoot(uploadURL),
|
||||||
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@ -643,9 +761,61 @@ func (f *Fs) createObject(ctx context.Context, remote string, size int64, modTim
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *Fs) uploadLargeFile(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||||
|
opts := rest.Opts{
|
||||||
|
Method: "POST",
|
||||||
|
Path: "/stream/upload",
|
||||||
|
Body: in,
|
||||||
|
ContentLength: &size,
|
||||||
|
ContentType: "application/octet-stream",
|
||||||
|
Options: options,
|
||||||
|
ExtraHeaders: map[string]string{
|
||||||
|
"x-filename": url.QueryEscape(name),
|
||||||
|
"x-parent_id": parent,
|
||||||
|
"override-name-exist": "true",
|
||||||
|
"upload-id": uuid.New().String(),
|
||||||
|
"x-streammode": "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var resp *http.Response
|
||||||
|
var uploadResponse *api.LargeUploadResponse
|
||||||
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
resp, err = f.uploadsrv.CallJSON(ctx, &opts, nil, &uploadResponse)
|
||||||
|
return shouldRetry(ctx, resp, err)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("upload large error: %v", err)
|
||||||
|
}
|
||||||
|
if len(uploadResponse.Uploads) != 1 {
|
||||||
|
return nil, errors.New("upload: invalid response")
|
||||||
|
}
|
||||||
|
upload := uploadResponse.Uploads[0]
|
||||||
|
uploadInfo, err := upload.GetUploadFileInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("upload error: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill in the api.Item from the api.UploadFileInfo
|
||||||
|
var info api.Item
|
||||||
|
info.ID = upload.Attributes.RessourceID
|
||||||
|
info.Attributes.Name = upload.Attributes.FileName
|
||||||
|
// info.Attributes.Type = not used
|
||||||
|
info.Attributes.IsFolder = false
|
||||||
|
// info.Attributes.CreatedTime = not used
|
||||||
|
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||||
|
// info.Attributes.UploadedTime = 0 not used
|
||||||
|
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||||
|
info.Attributes.StorageInfo.FileCount = 0
|
||||||
|
info.Attributes.StorageInfo.FolderCount = 0
|
||||||
|
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
func (f *Fs) upload(ctx context.Context, name string, parent string, size int64, in io.Reader, options ...fs.OpenOption) (*api.Item, error) {
|
||||||
params := url.Values{}
|
params := url.Values{}
|
||||||
params.Set("filename", name)
|
params.Set("filename", url.QueryEscape(name))
|
||||||
params.Set("parent_id", parent)
|
params.Set("parent_id", parent)
|
||||||
params.Set("override-name-exist", strconv.FormatBool(true))
|
params.Set("override-name-exist", strconv.FormatBool(true))
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, nil, "content", name)
|
||||||
@ -677,25 +847,26 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
if len(uploadResponse.Uploads) != 1 {
|
if len(uploadResponse.Uploads) != 1 {
|
||||||
return nil, errors.New("upload: invalid response")
|
return nil, errors.New("upload: invalid response")
|
||||||
}
|
}
|
||||||
// Received meta data is missing size so we have to read it again.
|
upload := uploadResponse.Uploads[0]
|
||||||
// It doesn't always appear on first read so try again if necessary
|
uploadInfo, err := upload.GetUploadFileInfo()
|
||||||
var info *api.Item
|
if err != nil {
|
||||||
const maxTries = 10
|
return nil, fmt.Errorf("upload error: %w", err)
|
||||||
sleepTime := 100 * time.Millisecond
|
|
||||||
for i := 0; i < maxTries; i++ {
|
|
||||||
info, err = f.readMetaDataForID(ctx, uploadResponse.Uploads[0].Attributes.RessourceID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if info.Attributes.StorageInfo.Size != 0 || size == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fs.Debugf(f, "Size not available yet for %q - try again in %v (try %d/%d)", name, sleepTime, i+1, maxTries)
|
|
||||||
time.Sleep(sleepTime)
|
|
||||||
sleepTime *= 2
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return info, nil
|
// Fill in the api.Item from the api.UploadFileInfo
|
||||||
|
var info api.Item
|
||||||
|
info.ID = upload.Attributes.RessourceID
|
||||||
|
info.Attributes.Name = upload.Attributes.FileName
|
||||||
|
// info.Attributes.Type = not used
|
||||||
|
info.Attributes.IsFolder = false
|
||||||
|
// info.Attributes.CreatedTime = not used
|
||||||
|
info.Attributes.ModifiedTime = uploadInfo.GetModTime()
|
||||||
|
// info.Attributes.UploadedTime = 0 not used
|
||||||
|
info.Attributes.StorageInfo.Size = uploadInfo.Size
|
||||||
|
info.Attributes.StorageInfo.FileCount = 0
|
||||||
|
info.Attributes.StorageInfo.FolderCount = 0
|
||||||
|
|
||||||
|
return &info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put the object into the container
|
// Put the object into the container
|
||||||
@ -704,21 +875,40 @@ func (f *Fs) upload(ctx context.Context, name string, parent string, size int64,
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
size := src.Size()
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
||||||
remote := src.Remote()
|
switch err {
|
||||||
|
case nil:
|
||||||
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||||
|
case fs.ErrorObjectNotFound:
|
||||||
|
size := src.Size()
|
||||||
|
remote := src.Remote()
|
||||||
|
|
||||||
// Create the directory for the object if it doesn't exist
|
// Create the directory for the object if it doesn't exist
|
||||||
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// use normal upload API for small sizes (<10MiB)
|
||||||
|
if size < int64(f.opt.UploadCutoff) {
|
||||||
|
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// large file API otherwise
|
||||||
|
info, err := f.uploadLargeFile(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.newObjectWithInfo(ctx, remote, info)
|
||||||
|
default:
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload the file
|
|
||||||
info, err := f.upload(ctx, f.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return f.newObjectWithInfo(ctx, remote, info)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mkdir creates the container if it doesn't exist
|
// Mkdir creates the container if it doesn't exist
|
||||||
@ -1158,7 +1348,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
Options: options,
|
Options: options,
|
||||||
}
|
}
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
resp, err = o.fs.downloadsrv.Call(ctx, &opts)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1182,11 +1372,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Overwrite the old file
|
// use normal upload API for small sizes (<10MiB)
|
||||||
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
if size < int64(o.fs.opt.UploadCutoff) {
|
||||||
|
info, err := o.fs.upload(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return o.setMetaData(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// large file API otherwise
|
||||||
|
info, err := o.fs.uploadLargeFile(ctx, o.fs.opt.Enc.FromStandardName(leaf), directoryID, size, in, options...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return o.setMetaData(info)
|
return o.setMetaData(info)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,8 @@ import (
|
|||||||
// TestIntegration runs integration tests against the remote
|
// TestIntegration runs integration tests against the remote
|
||||||
func TestIntegration(t *testing.T) {
|
func TestIntegration(t *testing.T) {
|
||||||
fstests.Run(t, &fstests.Opt{
|
fstests.Run(t, &fstests.Opt{
|
||||||
RemoteName: "TestZoho:",
|
RemoteName: "TestZoho:",
|
||||||
NilObject: (*zoho.Object)(nil),
|
SkipInvalidUTF8: true,
|
||||||
|
NilObject: (*zoho.Object)(nil),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -21,17 +21,20 @@ def find_backends():
|
|||||||
def output_docs(backend, out, cwd):
|
def output_docs(backend, out, cwd):
|
||||||
"""Output documentation for backend options to out"""
|
"""Output documentation for backend options to out"""
|
||||||
out.flush()
|
out.flush()
|
||||||
subprocess.check_call(["./rclone", "help", "backend", backend], stdout=out)
|
subprocess.check_call(["./rclone", "--config=/notfound", "help", "backend", backend], stdout=out)
|
||||||
|
|
||||||
def output_backend_tool_docs(backend, out, cwd):
|
def output_backend_tool_docs(backend, out, cwd):
|
||||||
"""Output documentation for backend tool to out"""
|
"""Output documentation for backend tool to out"""
|
||||||
out.flush()
|
out.flush()
|
||||||
subprocess.call(["./rclone", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
subprocess.call(["./rclone", "--config=/notfound", "backend", "help", backend], stdout=out, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
def alter_doc(backend):
|
def alter_doc(backend):
|
||||||
"""Alter the documentation for backend"""
|
"""Alter the documentation for backend"""
|
||||||
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
|
rclone_bin_dir = Path(sys.path[0]).parent.absolute()
|
||||||
doc_file = "docs/content/"+backend+".md"
|
doc_file = "docs/content/"+backend+".md"
|
||||||
|
doc_file2 = "docs/content/"+backend+"/_index.md"
|
||||||
|
if not os.path.exists(doc_file) and os.path.exists(doc_file2):
|
||||||
|
doc_file = doc_file2
|
||||||
if not os.path.exists(doc_file):
|
if not os.path.exists(doc_file):
|
||||||
raise ValueError("Didn't find doc file %s" % (doc_file,))
|
raise ValueError("Didn't find doc file %s" % (doc_file,))
|
||||||
new_file = doc_file+"~new~"
|
new_file = doc_file+"~new~"
|
||||||
|
@ -52,6 +52,7 @@ docs = [
|
|||||||
"hidrive.md",
|
"hidrive.md",
|
||||||
"http.md",
|
"http.md",
|
||||||
"imagekit.md",
|
"imagekit.md",
|
||||||
|
"iclouddrive.md",
|
||||||
"internetarchive.md",
|
"internetarchive.md",
|
||||||
"jottacloud.md",
|
"jottacloud.md",
|
||||||
"koofr.md",
|
"koofr.md",
|
||||||
@ -64,7 +65,7 @@ docs = [
|
|||||||
"azurefiles.md",
|
"azurefiles.md",
|
||||||
"onedrive.md",
|
"onedrive.md",
|
||||||
"opendrive.md",
|
"opendrive.md",
|
||||||
"oracleobjectstorage.md",
|
"oracleobjectstorage/_index.md",
|
||||||
"qingstor.md",
|
"qingstor.md",
|
||||||
"quatrix.md",
|
"quatrix.md",
|
||||||
"sia.md",
|
"sia.md",
|
||||||
@ -81,7 +82,6 @@ docs = [
|
|||||||
"smb.md",
|
"smb.md",
|
||||||
"storj.md",
|
"storj.md",
|
||||||
"sugarsync.md",
|
"sugarsync.md",
|
||||||
"tardigrade.md", # stub only to redirect to storj.md
|
|
||||||
"ulozto.md",
|
"ulozto.md",
|
||||||
"uptobox.md",
|
"uptobox.md",
|
||||||
"union.md",
|
"union.md",
|
||||||
@ -159,6 +159,7 @@ def read_doc(doc):
|
|||||||
def check_docs(docpath):
|
def check_docs(docpath):
|
||||||
"""Check all the docs are in docpath"""
|
"""Check all the docs are in docpath"""
|
||||||
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
files = set(f for f in os.listdir(docpath) if f.endswith(".md"))
|
||||||
|
files.update(f for f in docs if os.path.exists(os.path.join(docpath,f)))
|
||||||
files -= set(ignore_docs)
|
files -= set(ignore_docs)
|
||||||
docs_set = set(docs)
|
docs_set = set(docs)
|
||||||
if files == docs_set:
|
if files == docs_set:
|
||||||
|
@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||||||
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
cmd := exec.Command("git", "log", "--oneline", from+".."+to)
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to run git log %s: %v", from+".."+to, err)
|
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||||
}
|
}
|
||||||
logMap = map[string]string{}
|
logMap = map[string]string{}
|
||||||
logs = []string{}
|
logs = []string{}
|
||||||
@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
|
|||||||
}
|
}
|
||||||
match := logRe.FindSubmatch(line)
|
match := logRe.FindSubmatch(line)
|
||||||
if match == nil {
|
if match == nil {
|
||||||
log.Fatalf("failed to parse line: %q", line)
|
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||||
}
|
}
|
||||||
var hash, logMessage = string(match[1]), string(match[2])
|
var hash, logMessage = string(match[1]), string(match[2])
|
||||||
logMap[logMessage] = hash
|
logMap[logMessage] = hash
|
||||||
@ -52,12 +52,12 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
args := flag.Args()
|
args := flag.Args()
|
||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
log.Fatalf("Syntax: %s", os.Args[0])
|
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||||
}
|
}
|
||||||
// v1.54.0
|
// v1.54.0
|
||||||
versionBytes, err := os.ReadFile("VERSION")
|
versionBytes, err := os.ReadFile("VERSION")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to read version: %v", err)
|
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||||
}
|
}
|
||||||
if versionBytes[0] == 'v' {
|
if versionBytes[0] == 'v' {
|
||||||
versionBytes = versionBytes[1:]
|
versionBytes = versionBytes[1:]
|
||||||
@ -65,7 +65,7 @@ func main() {
|
|||||||
versionBytes = bytes.TrimSpace(versionBytes)
|
versionBytes = bytes.TrimSpace(versionBytes)
|
||||||
semver := semver.New(string(versionBytes))
|
semver := semver.New(string(versionBytes))
|
||||||
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
|
||||||
log.Printf("Finding commits in %v not in stable %s", semver, stable)
|
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log.
|
||||||
masterMap, masterLogs := readCommits(stable+".0", "master")
|
masterMap, masterLogs := readCommits(stable+".0", "master")
|
||||||
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
stableMap, _ := readCommits(stable+".0", stable+"-stable")
|
||||||
for _, logMessage := range masterLogs {
|
for _, logMessage := range masterLogs {
|
||||||
|
51
bin/rules.go
Normal file
51
bin/rules.go
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Ruleguard file implementing custom linting rules.
|
||||||
|
//
|
||||||
|
// Note that when used from golangci-lint (using the gocritic linter configured
|
||||||
|
// with the ruleguard check), because rule files are not handled by
|
||||||
|
// golangci-lint itself, changes will not invalidate the golangci-lint cache,
|
||||||
|
// and you must manually clean to cache (golangci-lint cache clean) for them to
|
||||||
|
// be considered, as explained here:
|
||||||
|
// https://www.quasilyte.dev/blog/post/ruleguard/#using-from-the-golangci-lint
|
||||||
|
//
|
||||||
|
// Note that this file is ignored from build with a build constraint, but using
|
||||||
|
// a different than "ignore" to avoid go mod tidy making dsl an indirect
|
||||||
|
// dependency, as explained here:
|
||||||
|
// https://github.com/quasilyte/go-ruleguard?tab=readme-ov-file#troubleshooting
|
||||||
|
|
||||||
|
//go:build ruleguard
|
||||||
|
// +build ruleguard
|
||||||
|
|
||||||
|
// Package gorules implementing custom linting rules using ruleguard
|
||||||
|
package gorules
|
||||||
|
|
||||||
|
import "github.com/quasilyte/go-ruleguard/dsl"
|
||||||
|
|
||||||
|
// Suggest rewriting "log.(Print|Fatal|Panic)(f|ln)?" to
|
||||||
|
// "fs.(Printf|Fatalf|Panicf)", and do it if running golangci-lint with
|
||||||
|
// argument --fix. The suggestion wraps a single non-string single argument or
|
||||||
|
// variadic arguments in fmt.Sprint to be compatible with format string
|
||||||
|
// argument of fs functions.
|
||||||
|
//
|
||||||
|
// Caveats:
|
||||||
|
// - After applying the suggestions, imports may have to be fixed manually,
|
||||||
|
// removing unused "log", adding "github.com/rclone/rclone/fs" and "fmt",
|
||||||
|
// and if there was a variable named "fs" or "fmt" in the scope the name
|
||||||
|
// clash must be fixed.
|
||||||
|
// - Suggested code is incorrect when within fs package itself, due to the
|
||||||
|
// "fs."" prefix. Could handle it using condition
|
||||||
|
// ".Where(m.File().PkgPath.Matches(`github.com/rclone/rclone/fs`))"
|
||||||
|
// but not sure how to avoid duplicating all checks with and without this
|
||||||
|
// condition so haven't bothered yet.
|
||||||
|
func useFsLog(m dsl.Matcher) {
|
||||||
|
m.Match(`log.Print($x)`, `log.Println($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Log(nil, $x)`)
|
||||||
|
m.Match(`log.Print($*args)`, `log.Println($*args)`).Suggest(`fs.Log(nil, fmt.Sprint($args))`)
|
||||||
|
m.Match(`log.Printf($*args)`).Suggest(`fs.Logf(nil, $args)`)
|
||||||
|
|
||||||
|
m.Match(`log.Fatal($x)`, `log.Fatalln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Fatal(nil, $x)`)
|
||||||
|
m.Match(`log.Fatal($*args)`, `log.Fatalln($*args)`).Suggest(`fs.Fatal(nil, fmt.Sprint($args))`)
|
||||||
|
m.Match(`log.Fatalf($*args)`).Suggest(`fs.Fatalf(nil, $args)`)
|
||||||
|
|
||||||
|
m.Match(`log.Panic($x)`, `log.Panicln($x)`).Where(m["x"].Type.Is(`string`)).Suggest(`fs.Panic(nil, $x)`)
|
||||||
|
m.Match(`log.Panic($*args)`, `log.Panicln($*args)`).Suggest(`fs.Panic(nil, fmt.Sprint($args))`)
|
||||||
|
m.Match(`log.Panicf($*args)`).Suggest(`fs.Panicf(nil, $args)`)
|
||||||
|
}
|
@ -10,12 +10,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -208,15 +208,16 @@ type bisyncTest struct {
|
|||||||
parent1 fs.Fs
|
parent1 fs.Fs
|
||||||
parent2 fs.Fs
|
parent2 fs.Fs
|
||||||
// global flags
|
// global flags
|
||||||
argRemote1 string
|
argRemote1 string
|
||||||
argRemote2 string
|
argRemote2 string
|
||||||
noCompare bool
|
noCompare bool
|
||||||
noCleanup bool
|
noCleanup bool
|
||||||
golden bool
|
golden bool
|
||||||
debug bool
|
debug bool
|
||||||
stopAt int
|
stopAt int
|
||||||
TestFn bisync.TestFunc
|
TestFn bisync.TestFunc
|
||||||
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
ignoreModtime bool // ignore modtimes when comparing final listings, for backends without support
|
||||||
|
ignoreBlankHash bool // ignore blank hashes for backends where we allow them to be blank
|
||||||
}
|
}
|
||||||
|
|
||||||
var color = bisync.Color
|
var color = bisync.Color
|
||||||
@ -232,7 +233,7 @@ func TestBisyncRemoteLocal(t *testing.T) {
|
|||||||
t.Skip("path1 and path2 are the same remote")
|
t.Skip("path1 and path2 are the same remote")
|
||||||
}
|
}
|
||||||
_, remote, cleanup, err := fstest.RandomRemote()
|
_, remote, cleanup, err := fstest.RandomRemote()
|
||||||
log.Printf("remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, remote, *argRemote2)
|
testBisync(t, remote, *argRemote2)
|
||||||
@ -244,7 +245,7 @@ func TestBisyncLocalRemote(t *testing.T) {
|
|||||||
t.Skip("path1 and path2 are the same remote")
|
t.Skip("path1 and path2 are the same remote")
|
||||||
}
|
}
|
||||||
_, remote, cleanup, err := fstest.RandomRemote()
|
_, remote, cleanup, err := fstest.RandomRemote()
|
||||||
log.Printf("remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, *argRemote2, remote)
|
testBisync(t, *argRemote2, remote)
|
||||||
@ -254,7 +255,7 @@ func TestBisyncLocalRemote(t *testing.T) {
|
|||||||
// (useful for testing server-side copy/move)
|
// (useful for testing server-side copy/move)
|
||||||
func TestBisyncRemoteRemote(t *testing.T) {
|
func TestBisyncRemoteRemote(t *testing.T) {
|
||||||
_, remote, cleanup, err := fstest.RandomRemote()
|
_, remote, cleanup, err := fstest.RandomRemote()
|
||||||
log.Printf("remote: %v", remote)
|
fs.Logf(nil, "remote: %v", remote)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
testBisync(t, remote, remote)
|
testBisync(t, remote, remote)
|
||||||
@ -450,13 +451,13 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||||||
for _, dir := range srcDirs {
|
for _, dir := range srcDirs {
|
||||||
dirs = append(dirs, norm.NFC.String(dir.Remote()))
|
dirs = append(dirs, norm.NFC.String(dir.Remote()))
|
||||||
}
|
}
|
||||||
log.Printf("checking initFs %s", initFs)
|
fs.Logf(nil, "checking initFs %s", initFs)
|
||||||
fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision())
|
fstest.CheckListingWithPrecision(b.t, initFs, items, dirs, initFs.Precision())
|
||||||
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1")
|
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs1, initFs, true), "setting up path1")
|
||||||
log.Printf("checking Path1 %s", b.fs1)
|
fs.Logf(nil, "checking Path1 %s", b.fs1)
|
||||||
fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision())
|
fstest.CheckListingWithPrecision(b.t, b.fs1, items, dirs, b.fs1.Precision())
|
||||||
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2")
|
checkError(b.t, sync.CopyDir(ctxNoDsStore, b.fs2, initFs, true), "setting up path2")
|
||||||
log.Printf("checking path2 %s", b.fs2)
|
fs.Logf(nil, "checking path2 %s", b.fs2)
|
||||||
fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision())
|
fstest.CheckListingWithPrecision(b.t, b.fs2, items, dirs, b.fs2.Precision())
|
||||||
|
|
||||||
// Create log file
|
// Create log file
|
||||||
@ -514,21 +515,21 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
|||||||
require.NoError(b.t, err, "saving log file %s", savedLog)
|
require.NoError(b.t, err, "saving log file %s", savedLog)
|
||||||
|
|
||||||
if b.golden && !b.stopped {
|
if b.golden && !b.stopped {
|
||||||
log.Printf("Store results to golden directory")
|
fs.Logf(nil, "Store results to golden directory")
|
||||||
b.storeGolden()
|
b.storeGolden()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
errorCount := 0
|
errorCount := 0
|
||||||
if b.noCompare {
|
if b.noCompare {
|
||||||
log.Printf("Skip comparing results with golden directory")
|
fs.Logf(nil, "Skip comparing results with golden directory")
|
||||||
errorCount = -2
|
errorCount = -2
|
||||||
} else {
|
} else {
|
||||||
errorCount = b.compareResults()
|
errorCount = b.compareResults()
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.noCleanup {
|
if b.noCleanup {
|
||||||
log.Printf("Skip cleanup")
|
fs.Logf(nil, "Skip cleanup")
|
||||||
} else {
|
} else {
|
||||||
b.cleanupCase(ctx)
|
b.cleanupCase(ctx)
|
||||||
}
|
}
|
||||||
@ -947,6 +948,10 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
|
|||||||
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
|
if (!b.fs1.Features().CanHaveEmptyDirectories || !b.fs2.Features().CanHaveEmptyDirectories) && (b.testCase == "createemptysrcdirs" || b.testCase == "rmdirs") {
|
||||||
b.t.Skip("skipping test as remote does not support empty dirs")
|
b.t.Skip("skipping test as remote does not support empty dirs")
|
||||||
}
|
}
|
||||||
|
ignoreHashBackends := []string{"TestWebdavNextcloud", "TestWebdavOwncloud", "TestAzureFiles"} // backends that support hashes but allow them to be blank
|
||||||
|
if slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs1.Name(), prefix) }) || slices.ContainsFunc(ignoreHashBackends, func(prefix string) bool { return strings.HasPrefix(b.fs2.Name(), prefix) }) {
|
||||||
|
b.ignoreBlankHash = true
|
||||||
|
}
|
||||||
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
|
if b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported {
|
||||||
if b.testCase != "nomodtime" {
|
if b.testCase != "nomodtime" {
|
||||||
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
b.t.Skip("skipping test as at least one remote does not support setting modtime")
|
||||||
@ -1383,24 +1388,24 @@ func (b *bisyncTest) compareResults() int {
|
|||||||
const divider = "----------------------------------------------------------"
|
const divider = "----------------------------------------------------------"
|
||||||
|
|
||||||
if goldenNum != resultNum {
|
if goldenNum != resultNum {
|
||||||
log.Print(divider)
|
fs.Log(nil, divider)
|
||||||
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
fs.Log(nil, color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||||
log.Printf(" Golden count: %d", goldenNum)
|
fs.Logf(nil, " Golden count: %d", goldenNum)
|
||||||
log.Printf(" Result count: %d", resultNum)
|
fs.Logf(nil, " Result count: %d", resultNum)
|
||||||
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
|
fs.Logf(nil, " Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||||
log.Printf(" Result files: %s", strings.Join(resultFiles, ", "))
|
fs.Logf(nil, " Result files: %s", strings.Join(resultFiles, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range goldenFiles {
|
for _, file := range goldenFiles {
|
||||||
if !resultSet.Has(file) {
|
if !resultSet.Has(file) {
|
||||||
errorCount++
|
errorCount++
|
||||||
log.Printf(" File found in Golden but not in Results: %s", file)
|
fs.Logf(nil, " File found in Golden but not in Results: %s", file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, file := range resultFiles {
|
for _, file := range resultFiles {
|
||||||
if !goldenSet.Has(file) {
|
if !goldenSet.Has(file) {
|
||||||
errorCount++
|
errorCount++
|
||||||
log.Printf(" File found in Results but not in Golden: %s", file)
|
fs.Logf(nil, " File found in Results but not in Golden: %s", file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1433,15 +1438,15 @@ func (b *bisyncTest) compareResults() int {
|
|||||||
text, err := difflib.GetUnifiedDiffString(diff)
|
text, err := difflib.GetUnifiedDiffString(diff)
|
||||||
require.NoError(b.t, err, "diff failed")
|
require.NoError(b.t, err, "diff failed")
|
||||||
|
|
||||||
log.Print(divider)
|
fs.Log(nil, divider)
|
||||||
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
fs.Logf(nil, color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||||
log.Printf("| %s", strings.TrimSpace(line))
|
fs.Logf(nil, "| %s", strings.TrimSpace(line))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if errorCount > 0 {
|
if errorCount > 0 {
|
||||||
log.Print(divider)
|
fs.Log(nil, divider)
|
||||||
}
|
}
|
||||||
if errorCount == 0 && goldenNum != resultNum {
|
if errorCount == 0 && goldenNum != resultNum {
|
||||||
return -1
|
return -1
|
||||||
@ -1464,7 +1469,7 @@ func (b *bisyncTest) storeGolden() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fileName == "backupdirs" {
|
if fileName == "backupdirs" {
|
||||||
log.Printf("skipping: %v", fileName)
|
fs.Logf(nil, "skipping: %v", fileName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
goldName := b.toGolden(fileName)
|
goldName := b.toGolden(fileName)
|
||||||
@ -1489,7 +1494,7 @@ func (b *bisyncTest) storeGolden() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if fileName == "backupdirs" {
|
if fileName == "backupdirs" {
|
||||||
log.Printf("skipping: %v", fileName)
|
fs.Logf(nil, "skipping: %v", fileName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
text := b.mangleResult(b.goldenDir, fileName, true)
|
text := b.mangleResult(b.goldenDir, fileName, true)
|
||||||
@ -1552,6 +1557,12 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||||||
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
|
if b.fs1.Hashes() == hash.Set(hash.None) || b.fs2.Hashes() == hash.Set(hash.None) {
|
||||||
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
|
logReplacements = append(logReplacements, `^.*{hashtype} differ.*$`, dropMe)
|
||||||
}
|
}
|
||||||
|
if b.ignoreBlankHash {
|
||||||
|
logReplacements = append(logReplacements,
|
||||||
|
`^.*hash is missing.*$`, dropMe,
|
||||||
|
`^.*not equal on recheck.*$`, dropMe,
|
||||||
|
)
|
||||||
|
}
|
||||||
rep := logReplacements
|
rep := logReplacements
|
||||||
if b.testCase == "dry_run" {
|
if b.testCase == "dry_run" {
|
||||||
rep = append(rep, dryrunReplacements...)
|
rep = append(rep, dryrunReplacements...)
|
||||||
@ -1849,7 +1860,7 @@ func fileType(fileName string) string {
|
|||||||
// logPrintf prints a message to stdout and to the test log
|
// logPrintf prints a message to stdout and to the test log
|
||||||
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
||||||
line := fmt.Sprintf(text, args...)
|
line := fmt.Sprintf(text, args...)
|
||||||
log.Print(line)
|
fs.Log(nil, line)
|
||||||
if b.logFile != nil {
|
if b.logFile != nil {
|
||||||
_, err := fmt.Fprintln(b.logFile, line)
|
_, err := fmt.Fprintln(b.logFile, line)
|
||||||
require.NoError(b.t, err, "writing log file")
|
require.NoError(b.t, err, "writing log file")
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
|
"github.com/rclone/rclone/fs/fserrors"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -193,7 +194,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
cmd.Run(false, true, command, func() error {
|
cmd.Run(false, true, command, func() error {
|
||||||
err := Bisync(ctx, fs1, fs2, &opt)
|
err := Bisync(ctx, fs1, fs2, &opt)
|
||||||
if err == ErrBisyncAborted {
|
if err == ErrBisyncAborted {
|
||||||
os.Exit(2)
|
return fserrors.FatalError(err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -21,7 +22,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/filter"
|
"github.com/rclone/rclone/fs/filter"
|
||||||
"github.com/rclone/rclone/fs/hash"
|
"github.com/rclone/rclone/fs/hash"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ListingHeader defines first line of a listing
|
// ListingHeader defines first line of a listing
|
||||||
@ -43,8 +43,10 @@ var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT
|
|||||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||||
|
|
||||||
// TZ defines time zone used in listings
|
// TZ defines time zone used in listings
|
||||||
var TZ = time.UTC
|
var (
|
||||||
var tzLocal = false
|
TZ = time.UTC
|
||||||
|
tzLocal = false
|
||||||
|
)
|
||||||
|
|
||||||
// fileInfo describes a file
|
// fileInfo describes a file
|
||||||
type fileInfo struct {
|
type fileInfo struct {
|
||||||
@ -83,7 +85,7 @@ func (ls *fileList) has(file string) bool {
|
|||||||
}
|
}
|
||||||
_, found := ls.info[file]
|
_, found := ls.info[file]
|
||||||
if !found {
|
if !found {
|
||||||
//try unquoting
|
// try unquoting
|
||||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||||
_, found = ls.info[file]
|
_, found = ls.info[file]
|
||||||
}
|
}
|
||||||
@ -93,7 +95,7 @@ func (ls *fileList) has(file string) bool {
|
|||||||
func (ls *fileList) get(file string) *fileInfo {
|
func (ls *fileList) get(file string) *fileInfo {
|
||||||
info, found := ls.info[file]
|
info, found := ls.info[file]
|
||||||
if !found {
|
if !found {
|
||||||
//try unquoting
|
// try unquoting
|
||||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||||
info = ls.info[fmt.Sprint(file)]
|
info = ls.info[fmt.Sprint(file)]
|
||||||
}
|
}
|
||||||
@ -420,7 +422,7 @@ func (b *bisyncRun) loadListingNum(listingNum int) (*fileList, error) {
|
|||||||
|
|
||||||
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||||
var fulllisting *fileList
|
var fulllisting *fileList
|
||||||
var dirsonly = newFileList()
|
dirsonly := newFileList()
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if !b.opt.CreateEmptySrcDirs {
|
if !b.opt.CreateEmptySrcDirs {
|
||||||
@ -450,24 +452,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
|||||||
return dirsonly, err
|
return dirsonly, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
|
||||||
// Need to use the other fs's precision (if lower) when copying
|
|
||||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
|
||||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
|
||||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
|
||||||
DestPrecision := dst.Precision()
|
|
||||||
|
|
||||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
|
||||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
|
||||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
|
||||||
}
|
|
||||||
|
|
||||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
|
||||||
return Modtime.Truncate(DestPrecision)
|
|
||||||
}
|
|
||||||
return Modtime
|
|
||||||
}
|
|
||||||
|
|
||||||
// modifyListing will modify the listing based on the results of the sync
|
// modifyListing will modify the listing based on the results of the sync
|
||||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||||
queue := queues.copy2to1
|
queue := queues.copy2to1
|
||||||
@ -533,13 +517,13 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||||||
|
|
||||||
// build src winners list
|
// build src winners list
|
||||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
srcWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||||
}
|
}
|
||||||
|
|
||||||
// build dst winners list
|
// build dst winners list
|
||||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
dstWinners.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -623,7 +607,7 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
|||||||
}
|
}
|
||||||
if srcNewName != "" { // if it was renamed and not deleted
|
if srcNewName != "" { // if it was renamed and not deleted
|
||||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||||
}
|
}
|
||||||
if srcNewName != srcOldName {
|
if srcNewName != srcOldName {
|
||||||
srcList.remove(srcOldName)
|
srcList.remove(srcOldName)
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/terminal"
|
"github.com/rclone/rclone/lib/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
// ErrBisyncAborted signals that bisync is aborted and forces non-zero exit code
|
||||||
var ErrBisyncAborted = errors.New("bisync aborted")
|
var ErrBisyncAborted = errors.New("bisync aborted")
|
||||||
|
|
||||||
// bisyncRun keeps bisync runtime state
|
// bisyncRun keeps bisync runtime state
|
||||||
|
@ -4,11 +4,11 @@ package cat
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -79,7 +79,7 @@ files, use:
|
|||||||
usedHead := head > 0
|
usedHead := head > 0
|
||||||
usedTail := tail > 0
|
usedTail := tail > 0
|
||||||
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
|
if usedHead && usedTail || usedHead && usedOffset || usedTail && usedOffset {
|
||||||
log.Fatalf("Can only use one of --head, --tail or --offset with --count")
|
fs.Fatalf(nil, "Can only use one of --head, --tail or --offset with --count")
|
||||||
}
|
}
|
||||||
if head > 0 {
|
if head > 0 {
|
||||||
offset = 0
|
offset = 0
|
||||||
|
108
cmd/cmd.go
108
cmd/cmd.go
@ -10,7 +10,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
@ -51,7 +50,6 @@ var (
|
|||||||
version bool
|
version bool
|
||||||
// Errors
|
// Errors
|
||||||
errorCommandNotFound = errors.New("command not found")
|
errorCommandNotFound = errors.New("command not found")
|
||||||
errorUncategorized = errors.New("uncategorized error")
|
|
||||||
errorNotEnoughArguments = errors.New("not enough arguments")
|
errorNotEnoughArguments = errors.New("not enough arguments")
|
||||||
errorTooManyArguments = errors.New("too many arguments")
|
errorTooManyArguments = errors.New("too many arguments")
|
||||||
)
|
)
|
||||||
@ -85,12 +83,13 @@ func ShowVersion() {
|
|||||||
// It returns a string with the file name if points to a file
|
// It returns a string with the file name if points to a file
|
||||||
// otherwise "".
|
// otherwise "".
|
||||||
func NewFsFile(remote string) (fs.Fs, string) {
|
func NewFsFile(remote string) (fs.Fs, string) {
|
||||||
|
ctx := context.Background()
|
||||||
_, fsPath, err := fspath.SplitFs(remote)
|
_, fsPath, err := fspath.SplitFs(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||||
}
|
}
|
||||||
f, err := cache.Get(context.Background(), remote)
|
f, err := cache.Get(ctx, remote)
|
||||||
switch err {
|
switch err {
|
||||||
case fs.ErrorIsFile:
|
case fs.ErrorIsFile:
|
||||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||||
@ -99,8 +98,8 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
|||||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||||
return f, ""
|
return f, ""
|
||||||
default:
|
default:
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||||
}
|
}
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
@ -110,19 +109,20 @@ func NewFsFile(remote string) (fs.Fs, string) {
|
|||||||
// This works the same as NewFsFile however it adds filters to the Fs
|
// This works the same as NewFsFile however it adds filters to the Fs
|
||||||
// to limit it to a single file if the remote pointed to a file.
|
// to limit it to a single file if the remote pointed to a file.
|
||||||
func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
func newFsFileAddFilter(remote string) (fs.Fs, string) {
|
||||||
fi := filter.GetConfig(context.Background())
|
ctx := context.Background()
|
||||||
|
fi := filter.GetConfig(ctx)
|
||||||
f, fileName := NewFsFile(remote)
|
f, fileName := NewFsFile(remote)
|
||||||
if fileName != "" {
|
if fileName != "" {
|
||||||
if !fi.InActive() {
|
if !fi.InActive() {
|
||||||
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
|
err := fmt.Errorf("can't limit to single files when using filters: %v", remote)
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err.Error())
|
fs.Fatal(nil, err.Error())
|
||||||
}
|
}
|
||||||
// Limit transfers to this file
|
// Limit transfers to this file
|
||||||
err := fi.AddFile(fileName)
|
err := fi.AddFile(fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Failed to limit to single file %q: %v", remote, err)
|
fs.Fatalf(nil, "Failed to limit to single file %q: %v", remote, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return f, fileName
|
return f, fileName
|
||||||
@ -141,10 +141,11 @@ func NewFsSrc(args []string) fs.Fs {
|
|||||||
//
|
//
|
||||||
// This must point to a directory
|
// This must point to a directory
|
||||||
func newFsDir(remote string) fs.Fs {
|
func newFsDir(remote string) fs.Fs {
|
||||||
f, err := cache.Get(context.Background(), remote)
|
ctx := context.Background()
|
||||||
|
f, err := cache.Get(ctx, remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
fs.Fatalf(nil, "Failed to create file system for %q: %v", remote, err)
|
||||||
}
|
}
|
||||||
cache.Pin(f) // pin indefinitely since it was on the CLI
|
cache.Pin(f) // pin indefinitely since it was on the CLI
|
||||||
return f
|
return f
|
||||||
@ -177,6 +178,7 @@ func NewFsSrcFileDst(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs)
|
|||||||
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
|
// NewFsSrcDstFiles creates a new src and dst fs from the arguments
|
||||||
// If src is a file then srcFileName and dstFileName will be non-empty
|
// If src is a file then srcFileName and dstFileName will be non-empty
|
||||||
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
|
func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs, dstFileName string) {
|
||||||
|
ctx := context.Background()
|
||||||
fsrc, srcFileName = newFsFileAddFilter(args[0])
|
fsrc, srcFileName = newFsFileAddFilter(args[0])
|
||||||
// If copying a file...
|
// If copying a file...
|
||||||
dstRemote := args[1]
|
dstRemote := args[1]
|
||||||
@ -186,24 +188,24 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
|||||||
var err error
|
var err error
|
||||||
dstRemote, dstFileName, err = fspath.Split(dstRemote)
|
dstRemote, dstFileName, err = fspath.Split(dstRemote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Parsing %q failed: %v", args[1], err)
|
fs.Fatalf(nil, "Parsing %q failed: %v", args[1], err)
|
||||||
}
|
}
|
||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
dstRemote = "."
|
dstRemote = "."
|
||||||
}
|
}
|
||||||
if dstFileName == "" {
|
if dstFileName == "" {
|
||||||
log.Fatalf("%q is a directory", args[1])
|
fs.Fatalf(nil, "%q is a directory", args[1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fdst, err := cache.Get(context.Background(), dstRemote)
|
fdst, err := cache.Get(ctx, dstRemote)
|
||||||
switch err {
|
switch err {
|
||||||
case fs.ErrorIsFile:
|
case fs.ErrorIsFile:
|
||||||
_ = fs.CountError(err)
|
_ = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Source doesn't exist or is a directory and destination is a file")
|
fs.Fatalf(nil, "Source doesn't exist or is a directory and destination is a file")
|
||||||
case nil:
|
case nil:
|
||||||
default:
|
default:
|
||||||
_ = fs.CountError(err)
|
_ = fs.CountError(ctx, err)
|
||||||
log.Fatalf("Failed to create file system for destination %q: %v", dstRemote, err)
|
fs.Fatalf(nil, "Failed to create file system for destination %q: %v", dstRemote, err)
|
||||||
}
|
}
|
||||||
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
cache.Pin(fdst) // pin indefinitely since it was on the CLI
|
||||||
return
|
return
|
||||||
@ -213,13 +215,13 @@ func NewFsSrcDstFiles(args []string) (fsrc fs.Fs, srcFileName string, fdst fs.Fs
|
|||||||
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
func NewFsDstFile(args []string) (fdst fs.Fs, dstFileName string) {
|
||||||
dstRemote, dstFileName, err := fspath.Split(args[0])
|
dstRemote, dstFileName, err := fspath.Split(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Parsing %q failed: %v", args[0], err)
|
fs.Fatalf(nil, "Parsing %q failed: %v", args[0], err)
|
||||||
}
|
}
|
||||||
if dstRemote == "" {
|
if dstRemote == "" {
|
||||||
dstRemote = "."
|
dstRemote = "."
|
||||||
}
|
}
|
||||||
if dstFileName == "" {
|
if dstFileName == "" {
|
||||||
log.Fatalf("%q is a directory", args[0])
|
fs.Fatalf(nil, "%q is a directory", args[0])
|
||||||
}
|
}
|
||||||
fdst = newFsDir(dstRemote)
|
fdst = newFsDir(dstRemote)
|
||||||
return
|
return
|
||||||
@ -236,7 +238,8 @@ func ShowStats() bool {
|
|||||||
|
|
||||||
// Run the function with stats and retries if required
|
// Run the function with stats and retries if required
|
||||||
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
||||||
ci := fs.GetConfig(context.Background())
|
ctx := context.Background()
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
var cmdErr error
|
var cmdErr error
|
||||||
stopStats := func() {}
|
stopStats := func() {}
|
||||||
if !showStats && ShowStats() {
|
if !showStats && ShowStats() {
|
||||||
@ -250,7 +253,7 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
|||||||
SigInfoHandler()
|
SigInfoHandler()
|
||||||
for try := 1; try <= ci.Retries; try++ {
|
for try := 1; try <= ci.Retries; try++ {
|
||||||
cmdErr = f()
|
cmdErr = f()
|
||||||
cmdErr = fs.CountError(cmdErr)
|
cmdErr = fs.CountError(ctx, cmdErr)
|
||||||
lastErr := accounting.GlobalStats().GetLastError()
|
lastErr := accounting.GlobalStats().GetLastError()
|
||||||
if cmdErr == nil {
|
if cmdErr == nil {
|
||||||
cmdErr = lastErr
|
cmdErr = lastErr
|
||||||
@ -328,9 +331,9 @@ func Run(Retry bool, showStats bool, cmd *cobra.Command, f func() error) {
|
|||||||
if cmdErr != nil {
|
if cmdErr != nil {
|
||||||
nerrs := accounting.GlobalStats().GetErrors()
|
nerrs := accounting.GlobalStats().GetErrors()
|
||||||
if nerrs <= 1 {
|
if nerrs <= 1 {
|
||||||
log.Printf("Failed to %s: %v", cmd.Name(), cmdErr)
|
fs.Logf(nil, "Failed to %s: %v", cmd.Name(), cmdErr)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr)
|
fs.Logf(nil, "Failed to %s with %d errors: last error was: %v", cmd.Name(), nerrs, cmdErr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
resolveExitCode(cmdErr)
|
resolveExitCode(cmdErr)
|
||||||
@ -383,7 +386,7 @@ func initConfig() {
|
|||||||
// Set the global options from the flags
|
// Set the global options from the flags
|
||||||
err := fs.GlobalOptionsInit()
|
err := fs.GlobalOptionsInit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to initialise global options: %v", err)
|
fs.Fatalf(nil, "Failed to initialise global options: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
@ -421,9 +424,16 @@ func initConfig() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start the remote control server if configured
|
// Start the remote control server if configured
|
||||||
_, err = rcserver.Start(context.Background(), &rc.Opt)
|
_, err = rcserver.Start(ctx, &rc.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to start remote control: %v", err)
|
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the metrics server if configured
|
||||||
|
_, err = rcserver.MetricsStart(ctx, &rc.Opt)
|
||||||
|
if err != nil {
|
||||||
|
fs.Fatalf(nil, "Failed to start metrics server: %v", err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup CPU profiling if desired
|
// Setup CPU profiling if desired
|
||||||
@ -431,20 +441,20 @@ func initConfig() {
|
|||||||
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
fs.Infof(nil, "Creating CPU profile %q\n", *cpuProfile)
|
||||||
f, err := os.Create(*cpuProfile)
|
f, err := os.Create(*cpuProfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
err = pprof.StartCPUProfile(f)
|
err = pprof.StartCPUProfile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
atexit.Register(func() {
|
atexit.Register(func() {
|
||||||
pprof.StopCPUProfile()
|
pprof.StopCPUProfile()
|
||||||
err := f.Close()
|
err := f.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -455,25 +465,26 @@ func initConfig() {
|
|||||||
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
fs.Infof(nil, "Saving Memory profile %q\n", *memProfile)
|
||||||
f, err := os.Create(*memProfile)
|
f, err := os.Create(*memProfile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
err = pprof.WriteHeapProfile(f)
|
err = pprof.WriteHeapProfile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
err = f.Close()
|
err = f.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveExitCode(err error) {
|
func resolveExitCode(err error) {
|
||||||
ci := fs.GetConfig(context.Background())
|
ctx := context.Background()
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
atexit.Run()
|
atexit.Run()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if ci.ErrorOnNoTransfer {
|
if ci.ErrorOnNoTransfer {
|
||||||
@ -489,8 +500,6 @@ func resolveExitCode(err error) {
|
|||||||
os.Exit(exitcode.DirNotFound)
|
os.Exit(exitcode.DirNotFound)
|
||||||
case errors.Is(err, fs.ErrorObjectNotFound):
|
case errors.Is(err, fs.ErrorObjectNotFound):
|
||||||
os.Exit(exitcode.FileNotFound)
|
os.Exit(exitcode.FileNotFound)
|
||||||
case errors.Is(err, errorUncategorized):
|
|
||||||
os.Exit(exitcode.UncategorizedError)
|
|
||||||
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
|
||||||
os.Exit(exitcode.TransferExceeded)
|
os.Exit(exitcode.TransferExceeded)
|
||||||
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
case errors.Is(err, fssync.ErrorMaxDurationReached):
|
||||||
@ -501,8 +510,10 @@ func resolveExitCode(err error) {
|
|||||||
os.Exit(exitcode.NoRetryError)
|
os.Exit(exitcode.NoRetryError)
|
||||||
case fserrors.IsFatalError(err):
|
case fserrors.IsFatalError(err):
|
||||||
os.Exit(exitcode.FatalError)
|
os.Exit(exitcode.FatalError)
|
||||||
default:
|
case errors.Is(err, errorCommandNotFound), errors.Is(err, errorNotEnoughArguments), errors.Is(err, errorTooManyArguments):
|
||||||
os.Exit(exitcode.UsageError)
|
os.Exit(exitcode.UsageError)
|
||||||
|
default:
|
||||||
|
os.Exit(exitcode.UncategorizedError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -530,6 +541,7 @@ func Main() {
|
|||||||
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
if strings.HasPrefix(err.Error(), "unknown command") && selfupdateEnabled {
|
||||||
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
Root.PrintErrf("You could use '%s selfupdate' to get latest features.\n\n", Root.CommandPath())
|
||||||
}
|
}
|
||||||
log.Fatalf("Fatal error: %v", err)
|
fs.Logf(nil, "Fatal error: %v", err)
|
||||||
|
os.Exit(exitcode.UsageError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ func init() {
|
|||||||
configCommand.AddCommand(configReconnectCommand)
|
configCommand.AddCommand(configReconnectCommand)
|
||||||
configCommand.AddCommand(configDisconnectCommand)
|
configCommand.AddCommand(configDisconnectCommand)
|
||||||
configCommand.AddCommand(configUserInfoCommand)
|
configCommand.AddCommand(configUserInfoCommand)
|
||||||
|
configCommand.AddCommand(configEncryptionCommand)
|
||||||
}
|
}
|
||||||
|
|
||||||
var configCommand = &cobra.Command{
|
var configCommand = &cobra.Command{
|
||||||
@ -518,3 +519,91 @@ system.
|
|||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
configEncryptionCommand.AddCommand(configEncryptionSetCommand)
|
||||||
|
configEncryptionCommand.AddCommand(configEncryptionRemoveCommand)
|
||||||
|
configEncryptionCommand.AddCommand(configEncryptionCheckCommand)
|
||||||
|
}
|
||||||
|
|
||||||
|
var configEncryptionCommand = &cobra.Command{
|
||||||
|
Use: "encryption",
|
||||||
|
Short: `set, remove and check the encryption for the config file`,
|
||||||
|
Long: `This command sets, clears and checks the encryption for the config file using
|
||||||
|
the subcommands below.
|
||||||
|
`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var configEncryptionSetCommand = &cobra.Command{
|
||||||
|
Use: "set",
|
||||||
|
Short: `Set or change the config file encryption password`,
|
||||||
|
Long: strings.ReplaceAll(`This command sets or changes the config file encryption password.
|
||||||
|
|
||||||
|
If there was no config password set then it sets a new one, otherwise
|
||||||
|
it changes the existing config password.
|
||||||
|
|
||||||
|
Note that if you are changing an encryption password using
|
||||||
|
|--password-command| then this will be called once to decrypt the
|
||||||
|
config using the old password and then again to read the new
|
||||||
|
password to re-encrypt the config.
|
||||||
|
|
||||||
|
When |--password-command| is called to change the password then the
|
||||||
|
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
|
||||||
|
changing passwords programatically you can use the environment
|
||||||
|
variable to distinguish which password you must supply.
|
||||||
|
|
||||||
|
Alternatively you can remove the password first (with |rclone config
|
||||||
|
encryption remove|), then set it again with this command which may be
|
||||||
|
easier if you don't mind the unecrypted config file being on the disk
|
||||||
|
briefly.
|
||||||
|
`, "|", "`"),
|
||||||
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
|
config.LoadedData()
|
||||||
|
config.ChangeConfigPasswordAndSave()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var configEncryptionRemoveCommand = &cobra.Command{
|
||||||
|
Use: "remove",
|
||||||
|
Short: `Remove the config file encryption password`,
|
||||||
|
Long: strings.ReplaceAll(`Remove the config file encryption password
|
||||||
|
|
||||||
|
This removes the config file encryption, returning it to un-encrypted.
|
||||||
|
|
||||||
|
If |--password-command| is in use, this will be called to supply the old config
|
||||||
|
password.
|
||||||
|
|
||||||
|
If the config was not encrypted then no error will be returned and
|
||||||
|
this command will do nothing.
|
||||||
|
`, "|", "`"),
|
||||||
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
|
config.LoadedData()
|
||||||
|
config.RemoveConfigPasswordAndSave()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var configEncryptionCheckCommand = &cobra.Command{
|
||||||
|
Use: "check",
|
||||||
|
Short: `Check that the config file is encrypted`,
|
||||||
|
Long: strings.ReplaceAll(`This checks the config file is encrypted and that you can decrypt it.
|
||||||
|
|
||||||
|
It will attempt to decrypt the config using the password you supply.
|
||||||
|
|
||||||
|
If decryption fails it will return a non-zero exit code if using
|
||||||
|
|--password-command|, otherwise it will prompt again for the password.
|
||||||
|
|
||||||
|
If the config file is not encrypted it will return a non zero exit code.
|
||||||
|
`, "|", "`"),
|
||||||
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
|
cmd.CheckArgs(0, 0, command, args)
|
||||||
|
config.LoadedData()
|
||||||
|
if !config.IsEncrypted() {
|
||||||
|
return errors.New("config file is NOT encrypted")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
@ -3,7 +3,7 @@ package dedupe
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
"fmt"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
@ -142,7 +142,7 @@ Or
|
|||||||
if len(args) > 1 {
|
if len(args) > 1 {
|
||||||
err := dedupeMode.Set(args[0])
|
err := dedupeMode.Set(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
args = args[1:]
|
args = args[1:]
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package genautocomplete
|
package genautocomplete
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ current shell.
|
|||||||
if args[0] == "-" {
|
if args[0] == "-" {
|
||||||
err := cmd.Root.GenBashCompletionV2(os.Stdout, false)
|
err := cmd.Root.GenBashCompletionV2(os.Stdout, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -58,7 +59,7 @@ current shell.
|
|||||||
}
|
}
|
||||||
err := cmd.Root.GenBashCompletionFileV2(out, false)
|
err := cmd.Root.GenBashCompletionFileV2(out, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package genautocomplete
|
package genautocomplete
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,7 +40,7 @@ If output_file is "-", then the output will be written to stdout.
|
|||||||
if args[0] == "-" {
|
if args[0] == "-" {
|
||||||
err := cmd.Root.GenFishCompletion(os.Stdout, true)
|
err := cmd.Root.GenFishCompletion(os.Stdout, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -47,7 +48,7 @@ If output_file is "-", then the output will be written to stdout.
|
|||||||
}
|
}
|
||||||
err := cmd.Root.GenFishCompletionFile(out, true)
|
err := cmd.Root.GenFishCompletionFile(out, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package genautocomplete
|
package genautocomplete
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,13 +32,13 @@ If output_file is "-" or missing, then the output will be written to stdout.
|
|||||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||||
err := cmd.Root.GenPowerShellCompletion(os.Stdout)
|
err := cmd.Root.GenPowerShellCompletion(os.Stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := cmd.Root.GenPowerShellCompletionFile(args[0])
|
err := cmd.Root.GenPowerShellCompletionFile(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
package genautocomplete
|
package genautocomplete
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,7 +40,7 @@ If output_file is "-", then the output will be written to stdout.
|
|||||||
if args[0] == "-" {
|
if args[0] == "-" {
|
||||||
err := cmd.Root.GenZshCompletion(os.Stdout)
|
err := cmd.Root.GenZshCompletion(os.Stdout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -47,12 +48,12 @@ If output_file is "-", then the output will be written to stdout.
|
|||||||
}
|
}
|
||||||
outFile, err := os.Create(out)
|
outFile, err := os.Create(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
defer func() { _ = outFile.Close() }()
|
defer func() { _ = outFile.Close() }()
|
||||||
err = cmd.Root.GenZshCompletion(outFile)
|
err = cmd.Root.GenZshCompletion(outFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -4,7 +4,6 @@ package gendocs
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -14,6 +13,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -144,7 +144,7 @@ rclone.org website.`,
|
|||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
err := frontmatterTemplate.Execute(&buf, data)
|
err := frontmatterTemplate.Execute(&buf, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to render frontmatter template: %v", err)
|
fs.Fatalf(nil, "Failed to render frontmatter template: %v", err)
|
||||||
}
|
}
|
||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,6 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
@ -76,7 +75,7 @@ var helpFlags = &cobra.Command{
|
|||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
re, err := filter.GlobStringToRegexp(args[0], false, true)
|
re, err := filter.GlobStringToRegexp(args[0], false, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Invalid flag filter: %v", err)
|
fs.Fatalf(nil, "Invalid flag filter: %v", err)
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "Flag filter: %s", re.String())
|
fs.Debugf(nil, "Flag filter: %s", re.String())
|
||||||
filterFlagsRe = re
|
filterFlagsRe = re
|
||||||
@ -244,6 +243,7 @@ var filterFlagsMultiGroupTemplate = `{{range flagGroups .}}{{if .Flags.HasFlags}
|
|||||||
var docFlagsTemplate = `---
|
var docFlagsTemplate = `---
|
||||||
title: "Global Flags"
|
title: "Global Flags"
|
||||||
description: "Rclone Global Flags"
|
description: "Rclone Global Flags"
|
||||||
|
# autogenerated - DO NOT EDIT
|
||||||
---
|
---
|
||||||
|
|
||||||
# Global Flags
|
# Global Flags
|
||||||
@ -285,7 +285,7 @@ func quoteString(v interface{}) string {
|
|||||||
func showBackend(name string) {
|
func showBackend(name string) {
|
||||||
backend, err := fs.Find(name)
|
backend, err := fs.Find(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
var standardOptions, advancedOptions fs.Options
|
var standardOptions, advancedOptions fs.Options
|
||||||
done := map[string]struct{}{}
|
done := map[string]struct{}{}
|
||||||
|
@ -41,7 +41,7 @@ var commandDefinition = &cobra.Command{
|
|||||||
Short: `List directories and objects in the path in JSON format.`,
|
Short: `List directories and objects in the path in JSON format.`,
|
||||||
Long: `List directories and objects in the path in JSON format.
|
Long: `List directories and objects in the path in JSON format.
|
||||||
|
|
||||||
The output is an array of Items, where each Item looks like this
|
The output is an array of Items, where each Item looks like this:
|
||||||
|
|
||||||
{
|
{
|
||||||
"Hashes" : {
|
"Hashes" : {
|
||||||
@ -63,44 +63,50 @@ The output is an array of Items, where each Item looks like this
|
|||||||
"Tier" : "hot",
|
"Tier" : "hot",
|
||||||
}
|
}
|
||||||
|
|
||||||
If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The
|
The exact set of properties included depends on the backend:
|
||||||
types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which
|
|
||||||
may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `.
|
|
||||||
|
|
||||||
If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can
|
- The property IsBucket will only be included for bucket-based remotes, and only
|
||||||
speed things up on remotes where reading the ModTime takes an extra
|
for directories that are buckets. It will always be omitted when value is not true.
|
||||||
request (e.g. s3, swift).
|
- Properties Encrypted and EncryptedPath will only be included for encrypted
|
||||||
|
remotes, and (as mentioned below) only if the ` + "`--encrypted`" + ` option is set.
|
||||||
|
|
||||||
If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can
|
Different options may also affect which properties are included:
|
||||||
speed things up on remotes where reading the MimeType takes an extra
|
|
||||||
request (e.g. s3, swift).
|
|
||||||
|
|
||||||
If ` + "`--encrypted`" + ` is not specified the Encrypted will be omitted.
|
- If ` + "`--hash`" + ` is not specified, the Hashes property will be omitted. The
|
||||||
|
types of hash can be specified with the ` + "`--hash-type`" + ` parameter (which
|
||||||
|
may be repeated). If ` + "`--hash-type`" + ` is set then it implies ` + "`--hash`" + `.
|
||||||
|
- If ` + "`--no-modtime`" + ` is specified then ModTime will be blank. This can
|
||||||
|
speed things up on remotes where reading the ModTime takes an extra
|
||||||
|
request (e.g. s3, swift).
|
||||||
|
- If ` + "`--no-mimetype`" + ` is specified then MimeType will be blank. This can
|
||||||
|
speed things up on remotes where reading the MimeType takes an extra
|
||||||
|
request (e.g. s3, swift).
|
||||||
|
- If ` + "`--encrypted`" + ` is not specified the Encrypted and EncryptedPath
|
||||||
|
properties will be omitted - even for encrypted remotes.
|
||||||
|
- If ` + "`--metadata`" + ` is set then an additional Metadata property will be
|
||||||
|
returned. This will have [metadata](/docs/#metadata) in rclone standard format
|
||||||
|
as a JSON object.
|
||||||
|
|
||||||
If ` + "`--dirs-only`" + ` is not specified files in addition to directories are
|
The default is to list directories and files/objects, but this can be changed
|
||||||
returned
|
with the following options:
|
||||||
|
|
||||||
If ` + "`--files-only`" + ` is not specified directories in addition to the files
|
- If ` + "`--dirs-only`" + ` is specified then directories will be returned
|
||||||
will be returned.
|
only, no files/objects.
|
||||||
|
- If ` + "`--files-only`" + ` is specified then files will be returned only,
|
||||||
|
no directories.
|
||||||
|
|
||||||
If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned.
|
If ` + "`--stat`" + ` is set then the the output is not an array of items,
|
||||||
This will have metadata in rclone standard format as a JSON object.
|
but instead a single JSON blob will be returned about the item pointed to.
|
||||||
|
This will return an error if the item isn't found, however on bucket based
|
||||||
if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the
|
backends (like s3, gcs, b2, azureblob etc) if the item isn't found it will
|
||||||
item pointed to. This will return an error if the item isn't found.
|
return an empty directory, as it isn't possible to tell empty directories
|
||||||
However on bucket based backends (like s3, gcs, b2, azureblob etc) if
|
from missing directories there.
|
||||||
the item isn't found it will return an empty directory as it isn't
|
|
||||||
possible to tell empty directories from missing directories there.
|
|
||||||
|
|
||||||
The Path field will only show folders below the remote path being listed.
|
The Path field will only show folders below the remote path being listed.
|
||||||
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
If "remote:path" contains the file "subfolder/file.txt", the Path for "file.txt"
|
||||||
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
|
will be "subfolder/file.txt", not "remote:path/subfolder/file.txt".
|
||||||
When used without ` + "`--recursive`" + ` the Path will always be the same as Name.
|
When used without ` + "`--recursive`" + ` the Path will always be the same as Name.
|
||||||
|
|
||||||
If the directory is a bucket in a bucket-based backend, then
|
|
||||||
"IsBucket" will be set to true. This key won't be present unless it is
|
|
||||||
"true".
|
|
||||||
|
|
||||||
The time is in RFC3339 format with up to nanosecond precision. The
|
The time is in RFC3339 format with up to nanosecond precision. The
|
||||||
number of decimal digits in the seconds will depend on the precision
|
number of decimal digits in the seconds will depend on the precision
|
||||||
that the remote can hold the times, so if times are accurate to the
|
that the remote can hold the times, so if times are accurate to the
|
||||||
@ -110,7 +116,8 @@ accurate to the nearest second (Dropbox, Box, WebDav, etc.) no digits
|
|||||||
will be shown ("2017-05-31T16:15:57+01:00").
|
will be shown ("2017-05-31T16:15:57+01:00").
|
||||||
|
|
||||||
The whole output can be processed as a JSON blob, or alternatively it
|
The whole output can be processed as a JSON blob, or alternatively it
|
||||||
can be processed line by line as each item is written one to a line.
|
can be processed line by line as each item is written on individual lines
|
||||||
|
(except with ` + "`--stat`" + `).
|
||||||
` + lshelp.Help,
|
` + lshelp.Help,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
"versionIntroduced": "v1.37",
|
"versionIntroduced": "v1.37",
|
||||||
|
@ -5,7 +5,6 @@ package mount2
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -150,7 +149,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou
|
|||||||
opts = append(opts, "ro")
|
opts = append(opts, "ro")
|
||||||
}
|
}
|
||||||
if fsys.opt.WritebackCache {
|
if fsys.opt.WritebackCache {
|
||||||
log.Printf("FIXME --write-back-cache not supported")
|
fs.Printf(nil, "FIXME --write-back-cache not supported")
|
||||||
// FIXME opts = append(opts,fuse.WritebackCache())
|
// FIXME opts = append(opts,fuse.WritebackCache())
|
||||||
}
|
}
|
||||||
// Some OS X only options
|
// Some OS X only options
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
_ "embed"
|
_ "embed"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@ -312,7 +311,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||||||
err = mnt.Wait()
|
err = mnt.Wait()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Fatal error: %v", err)
|
fs.Fatalf(nil, "Fatal error: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -340,7 +339,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||||||
atexit.Unregister(handle)
|
atexit.Unregister(handle)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Fatal error: %v", err)
|
fs.Fatalf(nil, "Fatal error: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -42,7 +42,9 @@ When running in background mode the user will have to stop the mount manually:
|
|||||||
|
|
||||||
# Linux
|
# Linux
|
||||||
fusermount -u /path/to/local/mount
|
fusermount -u /path/to/local/mount
|
||||||
# OS X
|
#... or on some systems
|
||||||
|
fusermount3 -u /path/to/local/mount
|
||||||
|
# OS X or Linux when using nfsmount
|
||||||
umount /path/to/local/mount
|
umount /path/to/local/mount
|
||||||
|
|
||||||
The umount operation can fail, for example when the mountpoint is busy.
|
The umount operation can fail, for example when the mountpoint is busy.
|
||||||
@ -386,9 +388,9 @@ Note that systemd runs mount units without any environment variables including
|
|||||||
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
`PATH` or `HOME`. This means that tilde (`~`) expansion will not work
|
||||||
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
and you should provide `--config` and `--cache-dir` explicitly as absolute
|
||||||
paths via rclone arguments.
|
paths via rclone arguments.
|
||||||
Since mounting requires the `fusermount` program, rclone will use the fallback
|
Since mounting requires the `fusermount` or `fusermount3` program,
|
||||||
PATH of `/bin:/usr/bin` in this scenario. Please ensure that `fusermount`
|
rclone will use the fallback PATH of `/bin:/usr/bin` in this scenario.
|
||||||
is present on this PATH.
|
Please ensure that `fusermount`/`fusermount3` is present on this PATH.
|
||||||
|
|
||||||
### Rclone as Unix mount helper
|
### Rclone as Unix mount helper
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ package mountlib
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -123,12 +122,12 @@ func mountRc(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
|||||||
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
|
mnt := NewMountPoint(mountFn, mountPoint, fdst, &mountOpt, &vfsOpt)
|
||||||
_, err = mnt.Mount()
|
_, err = mnt.Mount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("mount FAILED: %v", err)
|
fs.Logf(nil, "mount FAILED: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
if err = mnt.Wait(); err != nil {
|
if err = mnt.Wait(); err != nil {
|
||||||
log.Printf("unmount FAILED: %v", err)
|
fs.Logf(nil, "unmount FAILED: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mountMu.Lock()
|
mountMu.Lock()
|
||||||
|
@ -929,23 +929,23 @@ func (u *UI) Run() error {
|
|||||||
return fmt.Errorf("screen init: %w", err)
|
return fmt.Errorf("screen init: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hijack fs.LogPrint so that it doesn't corrupt the screen.
|
// Hijack fs.LogOutput so that it doesn't corrupt the screen.
|
||||||
if logPrint := fs.LogPrint; !log.Redirected() {
|
if logOutput := fs.LogOutput; !log.Redirected() {
|
||||||
type log struct {
|
type log struct {
|
||||||
text string
|
text string
|
||||||
level fs.LogLevel
|
level fs.LogLevel
|
||||||
}
|
}
|
||||||
var logs []log
|
var logs []log
|
||||||
fs.LogPrint = func(level fs.LogLevel, text string) {
|
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||||
if len(logs) > 100 {
|
if len(logs) > 100 {
|
||||||
logs = logs[len(logs)-100:]
|
logs = logs[len(logs)-100:]
|
||||||
}
|
}
|
||||||
logs = append(logs, log{level: level, text: text})
|
logs = append(logs, log{level: level, text: text})
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
fs.LogPrint = logPrint
|
fs.LogOutput = logOutput
|
||||||
for i := range logs {
|
for i := range logs {
|
||||||
logPrint(logs[i].level, logs[i].text)
|
logOutput(logs[i].level, logs[i].text)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sudo = false
|
sudo = false
|
||||||
nfsServerOpt nfs.Options
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -33,11 +32,11 @@ func init() {
|
|||||||
mountlib.AddRc(name, mount)
|
mountlib.AddRc(name, mount)
|
||||||
cmdFlags := cmd.Flags()
|
cmdFlags := cmd.Flags()
|
||||||
flags.BoolVarP(cmdFlags, &sudo, "sudo", "", sudo, "Use sudo to run the mount/umount commands as root.", "")
|
flags.BoolVarP(cmdFlags, &sudo, "sudo", "", sudo, "Use sudo to run the mount/umount commands as root.", "")
|
||||||
nfs.AddFlags(cmdFlags, &nfsServerOpt)
|
nfs.AddFlags(cmdFlags)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (asyncerrors <-chan error, unmount func() error, err error) {
|
func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (asyncerrors <-chan error, unmount func() error, err error) {
|
||||||
s, err := nfs.NewServer(context.Background(), VFS, &nfsServerOpt)
|
s, err := nfs.NewServer(context.Background(), VFS, &nfs.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/cmd/serve/nfs"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
"github.com/rclone/rclone/vfs/vfstest"
|
"github.com/rclone/rclone/vfs/vfstest"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -29,7 +30,7 @@ func TestMount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
sudo = true
|
sudo = true
|
||||||
}
|
}
|
||||||
nfsServerOpt.HandleCacheDir = t.TempDir()
|
nfs.Opt.HandleCacheDir = t.TempDir()
|
||||||
require.NoError(t, nfsServerOpt.HandleCache.Set("disk"))
|
require.NoError(t, nfs.Opt.HandleCache.Set("disk"))
|
||||||
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
vfstest.RunTests(t, false, vfscommon.CacheModeWrites, false, mount)
|
||||||
}
|
}
|
||||||
|
@ -28,12 +28,12 @@ const (
|
|||||||
// It returns a func which should be called to stop the stats.
|
// It returns a func which should be called to stop the stats.
|
||||||
func startProgress() func() {
|
func startProgress() func() {
|
||||||
stopStats := make(chan struct{})
|
stopStats := make(chan struct{})
|
||||||
oldLogPrint := fs.LogPrint
|
oldLogOutput := fs.LogOutput
|
||||||
oldSyncPrint := operations.SyncPrintf
|
oldSyncPrint := operations.SyncPrintf
|
||||||
|
|
||||||
if !log.Redirected() {
|
if !log.Redirected() {
|
||||||
// Intercept the log calls if not logging to file or syslog
|
// Intercept the log calls if not logging to file or syslog
|
||||||
fs.LogPrint = func(level fs.LogLevel, text string) {
|
fs.LogOutput = func(level fs.LogLevel, text string) {
|
||||||
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
|
printProgress(fmt.Sprintf("%s %-6s: %s", time.Now().Format(logTimeFormat), level, text))
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -60,7 +60,7 @@ func startProgress() func() {
|
|||||||
case <-stopStats:
|
case <-stopStats:
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
printProgress("")
|
printProgress("")
|
||||||
fs.LogPrint = oldLogPrint
|
fs.LogOutput = oldLogOutput
|
||||||
operations.SyncPrintf = oldSyncPrint
|
operations.SyncPrintf = oldSyncPrint
|
||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
return
|
return
|
||||||
|
@ -3,11 +3,11 @@ package rcat
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/flags"
|
"github.com/rclone/rclone/fs/config/flags"
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@ -64,7 +64,7 @@ destination which can use retries.`,
|
|||||||
|
|
||||||
stat, _ := os.Stdin.Stat()
|
stat, _ := os.Stdin.Stat()
|
||||||
if (stat.Mode() & os.ModeCharDevice) != 0 {
|
if (stat.Mode() & os.ModeCharDevice) != 0 {
|
||||||
log.Fatalf("nothing to read from standard input (stdin).")
|
fs.Fatalf(nil, "nothing to read from standard input (stdin).")
|
||||||
}
|
}
|
||||||
|
|
||||||
fdst, dstFileName := cmd.NewFsDstFile(args)
|
fdst, dstFileName := cmd.NewFsDstFile(args)
|
||||||
|
@ -3,9 +3,9 @@ package rcd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/rc"
|
"github.com/rclone/rclone/fs/rc"
|
||||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||||
"github.com/rclone/rclone/fs/rc/rcserver"
|
"github.com/rclone/rclone/fs/rc/rcserver"
|
||||||
@ -39,7 +39,7 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
|||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(0, 1, command, args)
|
cmd.CheckArgs(0, 1, command, args)
|
||||||
if rc.Opt.Enabled {
|
if rc.Opt.Enabled {
|
||||||
log.Fatalf("Don't supply --rc flag when using rcd")
|
fs.Fatalf(nil, "Don't supply --rc flag when using rcd")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the rc
|
// Start the rc
|
||||||
@ -50,10 +50,10 @@ See the [rc documentation](/rc/) for more info on the rc flags.
|
|||||||
|
|
||||||
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
s, err := rcserver.Start(context.Background(), &rc.Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to start remote control: %v", err)
|
fs.Fatalf(nil, "Failed to start remote control: %v", err)
|
||||||
}
|
}
|
||||||
if s == nil {
|
if s == nil {
|
||||||
log.Fatal("rc server not configured")
|
fs.Fatal(nil, "rc server not configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notify stopping on exit
|
// Notify stopping on exit
|
||||||
|
@ -14,7 +14,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
@ -83,19 +82,19 @@ var cmdSelfUpdate = &cobra.Command{
|
|||||||
}
|
}
|
||||||
if Opt.Package != "zip" {
|
if Opt.Package != "zip" {
|
||||||
if Opt.Package != "deb" && Opt.Package != "rpm" {
|
if Opt.Package != "deb" && Opt.Package != "rpm" {
|
||||||
log.Fatalf("--package should be one of zip|deb|rpm")
|
fs.Fatalf(nil, "--package should be one of zip|deb|rpm")
|
||||||
}
|
}
|
||||||
if runtime.GOOS != "linux" {
|
if runtime.GOOS != "linux" {
|
||||||
log.Fatalf(".deb and .rpm packages are supported only on Linux")
|
fs.Fatalf(nil, ".deb and .rpm packages are supported only on Linux")
|
||||||
} else if os.Geteuid() != 0 && !Opt.Check {
|
} else if os.Geteuid() != 0 && !Opt.Check {
|
||||||
log.Fatalf(".deb and .rpm must be installed by root")
|
fs.Fatalf(nil, ".deb and .rpm must be installed by root")
|
||||||
}
|
}
|
||||||
if Opt.Output != "" && !Opt.Check {
|
if Opt.Output != "" && !Opt.Check {
|
||||||
fmt.Println("Warning: --output is ignored with --package deb|rpm")
|
fmt.Println("Warning: --output is ignored with --package deb|rpm")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := InstallUpdate(context.Background(), &Opt); err != nil {
|
if err := InstallUpdate(context.Background(), &Opt); err != nil {
|
||||||
log.Fatalf("Error: %v", err)
|
fs.Fatalf(nil, "Error: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ import (
|
|||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
@ -360,7 +359,7 @@ func (o *object) FilePath() string {
|
|||||||
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
|
// Returns the ObjectID for the object. This is used in various ContentDirectory actions.
|
||||||
func (o object) ID() string {
|
func (o object) ID() string {
|
||||||
if !path.IsAbs(o.Path) {
|
if !path.IsAbs(o.Path) {
|
||||||
log.Panicf("Relative object path: %s", o.Path)
|
fs.Panicf(nil, "Relative object path: %s", o.Path)
|
||||||
}
|
}
|
||||||
if len(o.Path) == 1 {
|
if len(o.Path) == 1 {
|
||||||
return "0"
|
return "0"
|
||||||
|
@ -190,16 +190,17 @@ func (s *server) ModelNumber() string {
|
|||||||
|
|
||||||
// Renders the root device descriptor.
|
// Renders the root device descriptor.
|
||||||
func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
|
func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
tmpl, err := data.GetTemplate()
|
tmpl, err := data.GetTemplate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serveError(s, w, "Failed to load root descriptor template", err)
|
serveError(ctx, s, w, "Failed to load root descriptor template", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
err = tmpl.Execute(buffer, s)
|
err = tmpl.Execute(buffer, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serveError(s, w, "Failed to render root descriptor XML", err)
|
serveError(ctx, s, w, "Failed to render root descriptor XML", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,15 +216,16 @@ func (s *server) rootDescHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// Handle a service control HTTP request.
|
// Handle a service control HTTP request.
|
||||||
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
|
func (s *server) serviceControlHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
soapActionString := r.Header.Get("SOAPACTION")
|
soapActionString := r.Header.Get("SOAPACTION")
|
||||||
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
|
soapAction, err := upnp.ParseActionHTTPHeader(soapActionString)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serveError(s, w, "Could not parse SOAPACTION header", err)
|
serveError(ctx, s, w, "Could not parse SOAPACTION header", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var env soap.Envelope
|
var env soap.Envelope
|
||||||
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
|
if err := xml.NewDecoder(r.Body).Decode(&env); err != nil {
|
||||||
serveError(s, w, "Could not parse SOAP request body", err)
|
serveError(ctx, s, w, "Could not parse SOAP request body", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,6 +259,7 @@ func (s *server) soapActionResponse(sa upnp.SoapAction, actionRequestXML []byte,
|
|||||||
|
|
||||||
// Serves actual resources (media files).
|
// Serves actual resources (media files).
|
||||||
func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
remotePath := r.URL.Path
|
remotePath := r.URL.Path
|
||||||
node, err := s.vfs.Stat(r.URL.Path)
|
node, err := s.vfs.Stat(r.URL.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -277,7 +280,7 @@ func (s *server) resourceHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
file := node.(*vfs.File)
|
file := node.(*vfs.File)
|
||||||
in, err := file.Open(os.O_RDONLY)
|
in, err := file.Open(os.O_RDONLY)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serveError(node, w, "Could not open resource", err)
|
serveError(ctx, node, w, "Could not open resource", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer fs.CheckClose(in, &err)
|
defer fs.CheckClose(in, &err)
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
package dlna
|
package dlna
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"crypto/md5"
|
"crypto/md5"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
@ -31,7 +31,7 @@ func makeDefaultFriendlyName() string {
|
|||||||
func makeDeviceUUID(unique string) string {
|
func makeDeviceUUID(unique string) string {
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
if _, err := io.WriteString(h, unique); err != nil {
|
if _, err := io.WriteString(h, unique); err != nil {
|
||||||
log.Panicf("makeDeviceUUID write failed: %s", err)
|
fs.Panicf(nil, "makeDeviceUUID write failed: %s", err)
|
||||||
}
|
}
|
||||||
buf := h.Sum(nil)
|
buf := h.Sum(nil)
|
||||||
return upnp.FormatUUID(buf)
|
return upnp.FormatUUID(buf)
|
||||||
@ -41,7 +41,7 @@ func makeDeviceUUID(unique string) string {
|
|||||||
func listInterfaces() []net.Interface {
|
func listInterfaces() []net.Interface {
|
||||||
ifs, err := net.Interfaces()
|
ifs, err := net.Interfaces()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("list network interfaces: %v", err)
|
fs.Logf(nil, "list network interfaces: %v", err)
|
||||||
return []net.Interface{}
|
return []net.Interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ func didlLite(chardata string) string {
|
|||||||
func mustMarshalXML(value interface{}) []byte {
|
func mustMarshalXML(value interface{}) []byte {
|
||||||
ret, err := xml.MarshalIndent(value, "", " ")
|
ret, err := xml.MarshalIndent(value, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panicf("mustMarshalXML failed to marshal %v: %s", value, err)
|
fs.Panicf(nil, "mustMarshalXML failed to marshal %v: %s", value, err)
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
@ -143,9 +143,10 @@ func logging(next http.Handler) http.Handler {
|
|||||||
// Error recovery and general request logging are left to logging().
|
// Error recovery and general request logging are left to logging().
|
||||||
func traceLogging(next http.Handler) http.Handler {
|
func traceLogging(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
dump, err := httputil.DumpRequest(r, true)
|
dump, err := httputil.DumpRequest(r, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serveError(nil, w, "error dumping request", err)
|
serveError(ctx, nil, w, "error dumping request", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "%s", dump)
|
fs.Debugf(nil, "%s", dump)
|
||||||
@ -183,8 +184,8 @@ func withHeader(name string, value string, next http.Handler) http.Handler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// serveError returns an http.StatusInternalServerError and logs the error
|
// serveError returns an http.StatusInternalServerError and logs the error
|
||||||
func serveError(what interface{}, w http.ResponseWriter, text string, err error) {
|
func serveError(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) {
|
||||||
err = fs.CountError(err)
|
err = fs.CountError(ctx, err)
|
||||||
fs.Errorf(what, "%s: %v", text, err)
|
fs.Errorf(what, "%s: %v", text, err)
|
||||||
http.Error(w, text+".", http.StatusInternalServerError)
|
http.Error(w, text+".", http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package docker
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
@ -270,7 +271,13 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||||||
err = getFVarP(&vfsOpt.DiskSpaceTotalSize, opt, key)
|
err = getFVarP(&vfsOpt.DiskSpaceTotalSize, opt, key)
|
||||||
case "vfs-read-chunk-streams":
|
case "vfs-read-chunk-streams":
|
||||||
intVal, err = opt.GetInt64(key)
|
intVal, err = opt.GetInt64(key)
|
||||||
vfsOpt.ChunkStreams = int(intVal)
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxInt {
|
||||||
|
vfsOpt.ChunkStreams = int(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows int", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// unprefixed vfs options
|
// unprefixed vfs options
|
||||||
case "no-modtime":
|
case "no-modtime":
|
||||||
@ -295,10 +302,22 @@ func getVFSOption(vfsOpt *vfscommon.Options, opt rc.Params, key string) (ok bool
|
|||||||
err = getFVarP(&vfsOpt.Umask, opt, key)
|
err = getFVarP(&vfsOpt.Umask, opt, key)
|
||||||
case "uid":
|
case "uid":
|
||||||
intVal, err = opt.GetInt64(key)
|
intVal, err = opt.GetInt64(key)
|
||||||
vfsOpt.UID = uint32(intVal)
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||||
|
vfsOpt.UID = uint32(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
case "gid":
|
case "gid":
|
||||||
intVal, err = opt.GetInt64(key)
|
intVal, err = opt.GetInt64(key)
|
||||||
vfsOpt.GID = uint32(intVal)
|
if err == nil {
|
||||||
|
if intVal >= 0 && intVal <= math.MaxUint32 {
|
||||||
|
vfsOpt.UID = uint32(intVal)
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("key %q (%v) overflows uint32", key, intVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// non-vfs options
|
// non-vfs options
|
||||||
default:
|
default:
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@ -92,7 +91,7 @@ control the stats printing.
|
|||||||
cmd.Run(false, true, command, func() error {
|
cmd.Run(false, true, command, func() error {
|
||||||
s, err := run(context.Background(), f, Opt)
|
s, err := run(context.Background(), f, Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
fs.Fatal(nil, fmt.Sprint(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer systemd.Notify()()
|
defer systemd.Notify()()
|
||||||
@ -187,6 +186,7 @@ func (s *HTTP) handler(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// serveDir serves a directory index at dirRemote
|
// serveDir serves a directory index at dirRemote
|
||||||
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||||
|
ctx := r.Context()
|
||||||
VFS, err := s.getVFS(r.Context())
|
VFS, err := s.getVFS(r.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "Root directory not found", http.StatusNotFound)
|
http.Error(w, "Root directory not found", http.StatusNotFound)
|
||||||
@ -199,7 +199,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||||||
http.Error(w, "Directory not found", http.StatusNotFound)
|
http.Error(w, "Directory not found", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
serve.Error(dirRemote, w, "Failed to list directory", err)
|
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !node.IsDir() {
|
if !node.IsDir() {
|
||||||
@ -209,7 +209,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||||||
dir := node.(*vfs.Dir)
|
dir := node.(*vfs.Dir)
|
||||||
dirEntries, err := dir.ReadDirAll()
|
dirEntries, err := dir.ReadDirAll()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serve.Error(dirRemote, w, "Failed to list directory", err)
|
serve.Error(ctx, dirRemote, w, "Failed to list directory", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,6 +235,7 @@ func (s *HTTP) serveDir(w http.ResponseWriter, r *http.Request, dirRemote string
|
|||||||
|
|
||||||
// serveFile serves a file object at remote
|
// serveFile serves a file object at remote
|
||||||
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string) {
|
||||||
|
ctx := r.Context()
|
||||||
VFS, err := s.getVFS(r.Context())
|
VFS, err := s.getVFS(r.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "File not found", http.StatusNotFound)
|
http.Error(w, "File not found", http.StatusNotFound)
|
||||||
@ -248,7 +249,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
|
|||||||
http.Error(w, "File not found", http.StatusNotFound)
|
http.Error(w, "File not found", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
serve.Error(remote, w, "Failed to find file", err)
|
serve.Error(ctx, remote, w, "Failed to find file", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !node.IsFile() {
|
if !node.IsFile() {
|
||||||
@ -288,7 +289,7 @@ func (s *HTTP) serveFile(w http.ResponseWriter, r *http.Request, remote string)
|
|||||||
// open the object
|
// open the object
|
||||||
in, err := file.Open(os.O_RDONLY)
|
in, err := file.Open(os.O_RDONLY)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serve.Error(remote, w, "Failed to open file", err)
|
serve.Error(ctx, remote, w, "Failed to open file", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
package nfs
|
package nfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
@ -13,8 +14,34 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/vfs"
|
"github.com/rclone/rclone/vfs"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
|
"github.com/willscott/go-nfs/file"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// setSys sets the Sys() call up for the vfs.Node passed in
|
||||||
|
//
|
||||||
|
// The billy abstraction layer does not extend to exposing `uid` and `gid`
|
||||||
|
// ownership of files. If ownership is important to your file system, you
|
||||||
|
// will need to ensure that the `os.FileInfo` meets additional constraints.
|
||||||
|
// In particular, the `Sys()` escape hatch is queried by this library, and
|
||||||
|
// if your file system populates a [`syscall.Stat_t`](https://golang.org/pkg/syscall/#Stat_t)
|
||||||
|
// concrete struct, the ownership specified in that object will be used.
|
||||||
|
// It can also return a file.FileInfo which is easier to manage cross platform
|
||||||
|
func setSys(fi os.FileInfo) {
|
||||||
|
node, ok := fi.(vfs.Node)
|
||||||
|
if !ok {
|
||||||
|
fs.Errorf(fi, "internal error: %T is not a vfs.Node", fi)
|
||||||
|
}
|
||||||
|
vfs := node.VFS()
|
||||||
|
// Set the UID and GID for the node passed in from the VFS defaults.
|
||||||
|
stat := file.FileInfo{
|
||||||
|
Nlink: 1,
|
||||||
|
UID: vfs.Opt.UID,
|
||||||
|
GID: vfs.Opt.GID,
|
||||||
|
Fileid: math.MaxUint64, // without this mounting doesn't work on Linux
|
||||||
|
}
|
||||||
|
node.SetSys(&stat)
|
||||||
|
}
|
||||||
|
|
||||||
// FS is our wrapper around the VFS to properly support billy.Filesystem interface
|
// FS is our wrapper around the VFS to properly support billy.Filesystem interface
|
||||||
type FS struct {
|
type FS struct {
|
||||||
vfs *vfs.VFS
|
vfs *vfs.VFS
|
||||||
@ -23,7 +50,14 @@ type FS struct {
|
|||||||
// ReadDir implements read dir
|
// ReadDir implements read dir
|
||||||
func (f *FS) ReadDir(path string) (dir []os.FileInfo, err error) {
|
func (f *FS) ReadDir(path string) (dir []os.FileInfo, err error) {
|
||||||
defer log.Trace(path, "")("items=%d, err=%v", &dir, &err)
|
defer log.Trace(path, "")("items=%d, err=%v", &dir, &err)
|
||||||
return f.vfs.ReadDir(path)
|
dir, err = f.vfs.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, fi := range dir {
|
||||||
|
setSys(fi)
|
||||||
|
}
|
||||||
|
return dir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create implements creating new files
|
// Create implements creating new files
|
||||||
@ -47,7 +81,12 @@ func (f *FS) OpenFile(filename string, flag int, perm os.FileMode) (node billy.F
|
|||||||
// Stat gets the file stat
|
// Stat gets the file stat
|
||||||
func (f *FS) Stat(filename string) (fi os.FileInfo, err error) {
|
func (f *FS) Stat(filename string) (fi os.FileInfo, err error) {
|
||||||
defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err)
|
defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err)
|
||||||
return f.vfs.Stat(filename)
|
fi, err = f.vfs.Stat(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setSys(fi)
|
||||||
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Rename renames a file
|
// Rename renames a file
|
||||||
@ -95,7 +134,12 @@ func (f *FS) MkdirAll(filename string, perm os.FileMode) (err error) {
|
|||||||
// Lstat gets the stats for symlink
|
// Lstat gets the stats for symlink
|
||||||
func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
|
func (f *FS) Lstat(filename string) (fi os.FileInfo, err error) {
|
||||||
defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err)
|
defer log.Trace(filename, "")("fi=%v, err=%v", &fi, &err)
|
||||||
return f.vfs.Stat(filename)
|
fi, err = f.vfs.Stat(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
setSys(fi)
|
||||||
|
return fi, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Symlink is not supported over NFS
|
// Symlink is not supported over NFS
|
||||||
|
@ -24,7 +24,8 @@ type Handler struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewHandler creates a handler for the provided filesystem
|
// NewHandler creates a handler for the provided filesystem
|
||||||
func NewHandler(vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) {
|
func NewHandler(ctx context.Context, vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) {
|
||||||
|
ci := fs.GetConfig(ctx)
|
||||||
h := &Handler{
|
h := &Handler{
|
||||||
vfs: vfs,
|
vfs: vfs,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
@ -35,7 +36,20 @@ func NewHandler(vfs *vfs.VFS, opt *Options) (handler nfs.Handler, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make cache: %w", err)
|
return nil, fmt.Errorf("failed to make cache: %w", err)
|
||||||
}
|
}
|
||||||
nfs.SetLogger(&logIntercepter{Level: nfs.DebugLevel})
|
var level nfs.LogLevel
|
||||||
|
switch {
|
||||||
|
case ci.LogLevel >= fs.LogLevelDebug: // Debug level, needs -vv
|
||||||
|
level = nfs.TraceLevel
|
||||||
|
case ci.LogLevel >= fs.LogLevelInfo: // Transfers, needs -v
|
||||||
|
level = nfs.InfoLevel
|
||||||
|
case ci.LogLevel >= fs.LogLevelNotice: // Normal logging, -q suppresses
|
||||||
|
level = nfs.WarnLevel
|
||||||
|
case ci.LogLevel >= fs.LogLevelError: // Error - can't be suppressed
|
||||||
|
level = nfs.ErrorLevel
|
||||||
|
default:
|
||||||
|
level = nfs.WarnLevel
|
||||||
|
}
|
||||||
|
nfs.SetLogger(&logger{level: level})
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,120 +122,167 @@ func onUnmount() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// logIntercepter intercepts noisy go-nfs logs and reroutes them to DEBUG
|
// logger handles go-nfs logs and reroutes them to rclone's logging system
|
||||||
type logIntercepter struct {
|
type logger struct {
|
||||||
Level nfs.LogLevel
|
level nfs.LogLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
// Intercept intercepts go-nfs logs and calls fs.Debugf instead
|
// logPrint intercepts go-nfs logs and calls rclone's log system instead
|
||||||
func (l *logIntercepter) Intercept(args ...interface{}) {
|
func (l *logger) logPrint(level fs.LogLevel, args ...interface{}) {
|
||||||
args = append([]interface{}{"[NFS DEBUG] "}, args...)
|
fs.LogPrintf(level, "nfs", "%s", fmt.Sprint(args...))
|
||||||
argsS := fmt.Sprint(args...)
|
|
||||||
fs.Debugf(nil, "%v", argsS)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Interceptf intercepts go-nfs logs and calls fs.Debugf instead
|
// logPrintf intercepts go-nfs logs and calls rclone's log system instead
|
||||||
func (l *logIntercepter) Interceptf(format string, args ...interface{}) {
|
func (l *logger) logPrintf(level fs.LogLevel, format string, args ...interface{}) {
|
||||||
argsS := fmt.Sprint(args...)
|
fs.LogPrintf(level, "nfs", format, args...)
|
||||||
// bit of a workaround... the real fix is probably https://github.com/willscott/go-nfs/pull/28
|
|
||||||
if strings.Contains(argsS, "mount.Umnt") {
|
|
||||||
onUnmount()
|
|
||||||
}
|
|
||||||
|
|
||||||
fs.Debugf(nil, "[NFS DEBUG] "+format, args...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug reroutes go-nfs Debug messages to Intercept
|
// Debug reroutes go-nfs Debug messages to Intercept
|
||||||
func (l *logIntercepter) Debug(args ...interface{}) {
|
func (l *logger) Debug(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.DebugLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelDebug, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugf reroutes go-nfs Debugf messages to Interceptf
|
// Debugf reroutes go-nfs Debugf messages to logPrintf
|
||||||
func (l *logIntercepter) Debugf(format string, args ...interface{}) {
|
func (l *logger) Debugf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.DebugLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelDebug, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error reroutes go-nfs Error messages to Intercept
|
// Error reroutes go-nfs Error messages to Intercept
|
||||||
func (l *logIntercepter) Error(args ...interface{}) {
|
func (l *logger) Error(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.ErrorLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelError, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf reroutes go-nfs Errorf messages to Interceptf
|
// Errorf reroutes go-nfs Errorf messages to logPrintf
|
||||||
func (l *logIntercepter) Errorf(format string, args ...interface{}) {
|
func (l *logger) Errorf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.ErrorLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelError, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal reroutes go-nfs Fatal messages to Intercept
|
// Fatal reroutes go-nfs Fatal messages to Intercept
|
||||||
func (l *logIntercepter) Fatal(args ...interface{}) {
|
func (l *logger) Fatal(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.FatalLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelError, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalf reroutes go-nfs Fatalf messages to Interceptf
|
// Fatalf reroutes go-nfs Fatalf messages to logPrintf
|
||||||
func (l *logIntercepter) Fatalf(format string, args ...interface{}) {
|
func (l *logger) Fatalf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.FatalLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelError, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevel returns the nfs.LogLevel
|
// GetLevel returns the nfs.LogLevel
|
||||||
func (l *logIntercepter) GetLevel() nfs.LogLevel {
|
func (l *logger) GetLevel() nfs.LogLevel {
|
||||||
return l.Level
|
return l.level
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info reroutes go-nfs Info messages to Intercept
|
// Info reroutes go-nfs Info messages to Intercept
|
||||||
func (l *logIntercepter) Info(args ...interface{}) {
|
func (l *logger) Info(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.InfoLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelInfo, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof reroutes go-nfs Infof messages to Interceptf
|
// Infof reroutes go-nfs Infof messages to logPrintf
|
||||||
func (l *logIntercepter) Infof(format string, args ...interface{}) {
|
func (l *logger) Infof(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.InfoLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelInfo, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Panic reroutes go-nfs Panic messages to Intercept
|
// Panic reroutes go-nfs Panic messages to Intercept
|
||||||
func (l *logIntercepter) Panic(args ...interface{}) {
|
func (l *logger) Panic(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.PanicLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelError, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Panicf reroutes go-nfs Panicf messages to Interceptf
|
// Panicf reroutes go-nfs Panicf messages to logPrintf
|
||||||
func (l *logIntercepter) Panicf(format string, args ...interface{}) {
|
func (l *logger) Panicf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.PanicLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelError, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseLevel parses the nfs.LogLevel
|
// ParseLevel parses the nfs.LogLevel
|
||||||
func (l *logIntercepter) ParseLevel(level string) (nfs.LogLevel, error) {
|
func (l *logger) ParseLevel(level string) (nfs.LogLevel, error) {
|
||||||
return nfs.Log.ParseLevel(level)
|
return nfs.Log.ParseLevel(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print reroutes go-nfs Print messages to Intercept
|
// Print reroutes go-nfs Print messages to Intercept
|
||||||
func (l *logIntercepter) Print(args ...interface{}) {
|
func (l *logger) Print(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.InfoLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelInfo, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Printf reroutes go-nfs Printf messages to Intercept
|
// Printf reroutes go-nfs Printf messages to Intercept
|
||||||
func (l *logIntercepter) Printf(format string, args ...interface{}) {
|
func (l *logger) Printf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.InfoLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelInfo, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel sets the nfs.LogLevel
|
// SetLevel sets the nfs.LogLevel
|
||||||
func (l *logIntercepter) SetLevel(level nfs.LogLevel) {
|
func (l *logger) SetLevel(level nfs.LogLevel) {
|
||||||
l.Level = level
|
l.level = level
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trace reroutes go-nfs Trace messages to Intercept
|
// Trace reroutes go-nfs Trace messages to Intercept
|
||||||
func (l *logIntercepter) Trace(args ...interface{}) {
|
func (l *logger) Trace(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.DebugLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelDebug, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tracef reroutes go-nfs Tracef messages to Interceptf
|
// Tracef reroutes go-nfs Tracef messages to logPrintf
|
||||||
func (l *logIntercepter) Tracef(format string, args ...interface{}) {
|
func (l *logger) Tracef(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
// FIXME BODGE ... the real fix is probably https://github.com/willscott/go-nfs/pull/28
|
||||||
|
// This comes from `Log.Tracef("request: %v", w.req)` in conn.go
|
||||||
|
// DEBUG : nfs: request: RPC #3285799202 (mount.Umnt)
|
||||||
|
argsS := fmt.Sprint(args...)
|
||||||
|
if strings.Contains(argsS, "mount.Umnt") {
|
||||||
|
onUnmount()
|
||||||
|
}
|
||||||
|
if l.level < nfs.DebugLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelDebug, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn reroutes go-nfs Warn messages to Intercept
|
// Warn reroutes go-nfs Warn messages to Intercept
|
||||||
func (l *logIntercepter) Warn(args ...interface{}) {
|
func (l *logger) Warn(args ...interface{}) {
|
||||||
l.Intercept(args...)
|
if l.level < nfs.WarnLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrint(fs.LogLevelNotice, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnf reroutes go-nfs Warnf messages to Interceptf
|
// Warnf reroutes go-nfs Warnf messages to logPrintf
|
||||||
func (l *logIntercepter) Warnf(format string, args ...interface{}) {
|
func (l *logger) Warnf(format string, args ...interface{}) {
|
||||||
l.Interceptf(format, args...)
|
if l.level < nfs.WarnLevel {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.logPrintf(fs.LogLevelNotice, format, args...)
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ var OptionsInfo = fs.Options{{
|
|||||||
}}
|
}}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &opt, Options: OptionsInfo})
|
fs.RegisterGlobalOptions(fs.OptionsInfo{Name: "nfs", Opt: &Opt, Options: OptionsInfo})
|
||||||
}
|
}
|
||||||
|
|
||||||
type handleCache = fs.Enum[handleCacheChoices]
|
type handleCache = fs.Enum[handleCacheChoices]
|
||||||
@ -72,16 +72,17 @@ type Options struct {
|
|||||||
HandleCacheDir string `config:"nfs_cache_dir"` // where the handle cache should be stored
|
HandleCacheDir string `config:"nfs_cache_dir"` // where the handle cache should be stored
|
||||||
}
|
}
|
||||||
|
|
||||||
var opt Options
|
// Opt is the default set of serve nfs options
|
||||||
|
var Opt Options
|
||||||
|
|
||||||
// AddFlags adds flags for serve nfs (and nfsmount)
|
// AddFlags adds flags for serve nfs (and nfsmount)
|
||||||
func AddFlags(flagSet *pflag.FlagSet, Opt *Options) {
|
func AddFlags(flagSet *pflag.FlagSet) {
|
||||||
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
flags.AddFlagsFromOptions(flagSet, "", OptionsInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
vfsflags.AddFlags(Command.Flags())
|
vfsflags.AddFlags(Command.Flags())
|
||||||
AddFlags(Command.Flags(), &opt)
|
AddFlags(Command.Flags())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the command
|
// Run the command
|
||||||
@ -90,7 +91,7 @@ func Run(command *cobra.Command, args []string) {
|
|||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
f = cmd.NewFsSrc(args)
|
f = cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, true, command, func() error {
|
cmd.Run(false, true, command, func() error {
|
||||||
s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &opt)
|
s, err := NewServer(context.Background(), vfs.New(f, &vfscommon.Opt), &Opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,7 @@ func NewServer(ctx context.Context, vfs *vfs.VFS, opt *Options) (s *Server, err
|
|||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
opt: *opt,
|
opt: *opt,
|
||||||
}
|
}
|
||||||
s.handler, err = NewHandler(vfs, opt)
|
s.handler, err = NewHandler(ctx, vfs, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to make NFS handler: %w", err)
|
return nil, fmt.Errorf("failed to make NFS handler: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -6,11 +6,11 @@ import (
|
|||||||
"crypto/rsa"
|
"crypto/rsa"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"log"
|
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
_ "github.com/rclone/rclone/backend/local"
|
_ "github.com/rclone/rclone/backend/local"
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/config/configmap"
|
"github.com/rclone/rclone/fs/config/configmap"
|
||||||
"github.com/rclone/rclone/fs/config/obscure"
|
"github.com/rclone/rclone/fs/config/obscure"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -149,11 +149,11 @@ func TestRun(t *testing.T) {
|
|||||||
|
|
||||||
privateKey, privateKeyErr := rsa.GenerateKey(rand.Reader, 2048)
|
privateKey, privateKeyErr := rsa.GenerateKey(rand.Reader, 2048)
|
||||||
if privateKeyErr != nil {
|
if privateKeyErr != nil {
|
||||||
log.Fatal("error generating test private key " + privateKeyErr.Error())
|
fs.Fatal(nil, "error generating test private key "+privateKeyErr.Error())
|
||||||
}
|
}
|
||||||
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
|
publicKey, publicKeyError := ssh.NewPublicKey(&privateKey.PublicKey)
|
||||||
if privateKeyErr != nil {
|
if privateKeyErr != nil {
|
||||||
log.Fatal("error generating test public key " + publicKeyError.Error())
|
fs.Fatal(nil, "error generating test public key "+publicKeyError.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
publicKeyString := base64.StdEncoding.EncodeToString(publicKey.Marshal())
|
publicKeyString := base64.StdEncoding.EncodeToString(publicKey.Marshal())
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/config"
|
"github.com/rclone/rclone/fs/config"
|
||||||
"github.com/rclone/rclone/lib/env"
|
"github.com/rclone/rclone/lib/env"
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
|
sdActivation "github.com/rclone/rclone/lib/sdactivation"
|
||||||
"github.com/rclone/rclone/vfs"
|
"github.com/rclone/rclone/vfs"
|
||||||
"github.com/rclone/rclone/vfs/vfscommon"
|
"github.com/rclone/rclone/vfs/vfscommon"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@ -266,10 +267,27 @@ func (s *server) serve() (err error) {
|
|||||||
|
|
||||||
// Once a ServerConfig has been configured, connections can be
|
// Once a ServerConfig has been configured, connections can be
|
||||||
// accepted.
|
// accepted.
|
||||||
s.listener, err = net.Listen("tcp", s.opt.ListenAddr)
|
var listener net.Listener
|
||||||
|
|
||||||
|
// In case we run in a socket-activated environment, listen on (the first)
|
||||||
|
// passed FD.
|
||||||
|
sdListeners, err := sdActivation.Listeners()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to listen for connection: %w", err)
|
return fmt.Errorf("unable to acquire listeners: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(sdListeners) > 0 {
|
||||||
|
if len(sdListeners) > 1 {
|
||||||
|
fs.LogPrintf(fs.LogLevelWarning, nil, "more than one listener passed, ignoring all but the first.\n")
|
||||||
|
}
|
||||||
|
listener = sdListeners[0]
|
||||||
|
} else {
|
||||||
|
listener, err = net.Listen("tcp", s.opt.ListenAddr)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to listen for connection: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.listener = listener
|
||||||
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
|
fs.Logf(nil, "SFTP server listening on %v\n", s.listener.Addr())
|
||||||
|
|
||||||
go s.acceptConnections()
|
go s.acceptConnections()
|
||||||
|
@ -115,6 +115,17 @@ directory.
|
|||||||
By default the server binds to localhost:2022 - if you want it to be
|
By default the server binds to localhost:2022 - if you want it to be
|
||||||
reachable externally then supply ` + "`--addr :2022`" + ` for example.
|
reachable externally then supply ` + "`--addr :2022`" + ` for example.
|
||||||
|
|
||||||
|
This also supports being run with socket activation, in which case it will
|
||||||
|
listen on the first passed FD.
|
||||||
|
It can be configured with .socket and .service unit files as described in
|
||||||
|
https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html
|
||||||
|
|
||||||
|
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command:
|
||||||
|
|
||||||
|
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
|
||||||
|
|
||||||
|
This will socket-activate rclone on the first connection to port 2222 over TCP.
|
||||||
|
|
||||||
Note that the default of ` + "`--vfs-cache-mode off`" + ` is fine for the rclone
|
Note that the default of ` + "`--vfs-cache-mode off`" + ` is fine for the rclone
|
||||||
sftp backend, but it may not be with other SFTP clients.
|
sftp backend, but it may not be with other SFTP clients.
|
||||||
|
|
||||||
|
@ -349,6 +349,7 @@ func (w *WebDAV) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
|||||||
// serveDir serves a directory index at dirRemote
|
// serveDir serves a directory index at dirRemote
|
||||||
// This is similar to serveDir in serve http.
|
// This is similar to serveDir in serve http.
|
||||||
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
|
func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote string) {
|
||||||
|
ctx := r.Context()
|
||||||
VFS, err := w.getVFS(r.Context())
|
VFS, err := w.getVFS(r.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(rw, "Root directory not found", http.StatusNotFound)
|
http.Error(rw, "Root directory not found", http.StatusNotFound)
|
||||||
@ -361,7 +362,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
|||||||
http.Error(rw, "Directory not found", http.StatusNotFound)
|
http.Error(rw, "Directory not found", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
serve.Error(dirRemote, rw, "Failed to list directory", err)
|
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !node.IsDir() {
|
if !node.IsDir() {
|
||||||
@ -372,7 +373,7 @@ func (w *WebDAV) serveDir(rw http.ResponseWriter, r *http.Request, dirRemote str
|
|||||||
dirEntries, err := dir.ReadDirAll()
|
dirEntries, err := dir.ReadDirAll()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
serve.Error(dirRemote, rw, "Failed to list directory", err)
|
serve.Error(ctx, dirRemote, rw, "Failed to list directory", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,11 +3,11 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/fs/accounting"
|
"github.com/rclone/rclone/fs/accounting"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ func SigInfoHandler() {
|
|||||||
signal.Notify(signals, syscall.SIGINFO)
|
signal.Notify(signals, syscall.SIGINFO)
|
||||||
go func() {
|
go func() {
|
||||||
for range signals {
|
for range signals {
|
||||||
log.Printf("%v\n", accounting.GlobalStats())
|
fs.Printf(nil, "%v\n", accounting.GlobalStats())
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,6 @@ package info
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@ -25,7 +24,7 @@ func (r *results) checkBase32768() {
|
|||||||
n := 0
|
n := 0
|
||||||
dir, err := os.MkdirTemp("", "rclone-base32768-files")
|
dir, err := os.MkdirTemp("", "rclone-base32768-files")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to make temp dir: %v", err)
|
fs.Logf(nil, "Failed to make temp dir: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -41,7 +40,7 @@ func (r *results) checkBase32768() {
|
|||||||
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
fileName := filepath.Join(dir, fmt.Sprintf("%04d-%s.txt", n, out.String()))
|
||||||
err = os.WriteFile(fileName, []byte(fileName), 0666)
|
err = os.WriteFile(fileName, []byte(fileName), 0666)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("write %q failed: %v", fileName, err)
|
fs.Logf(nil, "write %q failed: %v", fileName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n++
|
n++
|
||||||
@ -50,7 +49,7 @@ func (r *results) checkBase32768() {
|
|||||||
// Make a local fs
|
// Make a local fs
|
||||||
fLocal, err := fs.NewFs(ctx, dir)
|
fLocal, err := fs.NewFs(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to make local fs: %v", err)
|
fs.Logf(nil, "Failed to make local fs: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,14 +60,14 @@ func (r *results) checkBase32768() {
|
|||||||
s = fspath.JoinRootPath(s, testDir)
|
s = fspath.JoinRootPath(s, testDir)
|
||||||
fRemote, err := fs.NewFs(ctx, s)
|
fRemote, err := fs.NewFs(ctx, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to make remote fs: %v", err)
|
fs.Logf(nil, "Failed to make remote fs: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
err := operations.Purge(ctx, r.f, testDir)
|
err := operations.Purge(ctx, r.f, testDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to purge test directory: %v", err)
|
fs.Logf(nil, "Failed to purge test directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -76,7 +75,7 @@ func (r *results) checkBase32768() {
|
|||||||
// Sync local to remote
|
// Sync local to remote
|
||||||
err = sync.Sync(ctx, fRemote, fLocal, false)
|
err = sync.Sync(ctx, fRemote, fLocal, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to sync remote fs: %v", err)
|
fs.Logf(nil, "Failed to sync remote fs: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -86,7 +85,7 @@ func (r *results) checkBase32768() {
|
|||||||
Fsrc: fLocal,
|
Fsrc: fLocal,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to check remote fs: %v", err)
|
fs.Logf(nil, "Failed to check remote fs: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,6 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -77,7 +76,7 @@ code for each one.
|
|||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(1, 1e6, command, args)
|
cmd.CheckArgs(1, 1e6, command, args)
|
||||||
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !checkBase32768 && !all {
|
if !checkNormalization && !checkControl && !checkLength && !checkStreaming && !checkBase32768 && !all {
|
||||||
log.Fatalf("no tests selected - select a test or use --all")
|
fs.Fatalf(nil, "no tests selected - select a test or use --all")
|
||||||
}
|
}
|
||||||
if all {
|
if all {
|
||||||
checkNormalization = true
|
checkNormalization = true
|
||||||
@ -93,7 +92,7 @@ code for each one.
|
|||||||
fs.Infof(f, "Created temporary directory for test files: %s", tempDirPath)
|
fs.Infof(f, "Created temporary directory for test files: %s", tempDirPath)
|
||||||
err := f.Mkdir(context.Background(), "")
|
err := f.Mkdir(context.Background(), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("couldn't create temporary directory: %v", err)
|
fs.Fatalf(nil, "couldn't create temporary directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user