mirror of
https://github.com/rclone/rclone.git
synced 2024-11-25 01:44:41 +01:00
Merge branch 'rclone:master' into googlephotos-writedescription
This commit is contained in:
commit
d101fb2a5d
36
.github/workflows/build.yml
vendored
36
.github/workflows/build.yml
vendored
@ -27,12 +27,12 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.19', 'go1.20']
|
||||
job_name: ['linux', 'linux_386', 'mac_amd64', 'mac_arm64', 'windows', 'other_os', 'go1.20', 'go1.21']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
@ -43,14 +43,14 @@ jobs:
|
||||
|
||||
- job_name: linux_386
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
goarch: 386
|
||||
gotags: cmount
|
||||
quicktest: true
|
||||
|
||||
- job_name: mac_amd64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
@ -59,14 +59,14 @@ jobs:
|
||||
|
||||
- job_name: mac_arm64
|
||||
os: macos-11
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: 'cmount'
|
||||
build_flags: '-include "^darwin/arm64" -cgo -macos-arch arm64 -cgo-cflags=-I/usr/local/include -cgo-ldflags=-L/usr/local/lib'
|
||||
deploy: true
|
||||
|
||||
- job_name: windows
|
||||
os: windows-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
gotags: cmount
|
||||
cgo: '0'
|
||||
build_flags: '-include "^windows/"'
|
||||
@ -76,23 +76,23 @@ jobs:
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
go: '>=1.22.0-rc.1'
|
||||
build_flags: '-exclude "^(windows/|darwin/|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: go1.19
|
||||
os: ubuntu-latest
|
||||
go: '1.19'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.20
|
||||
os: ubuntu-latest
|
||||
go: '1.20'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.21
|
||||
os: ubuntu-latest
|
||||
go: '1.21'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
@ -168,7 +168,7 @@ jobs:
|
||||
env
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@ -234,7 +234,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Code quality test
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
uses: golangci/golangci-lint-action@v4
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
check-latest: true
|
||||
|
||||
- name: Install govulncheck
|
||||
@ -268,10 +268,10 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
go-version: '>=1.22.0-rc.1'
|
||||
|
||||
- name: Go module cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
|
@ -1,8 +1,9 @@
|
||||
FROM golang AS builder
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN apk add --no-cache make bash gawk git
|
||||
RUN \
|
||||
CGO_ENABLED=0 \
|
||||
make
|
||||
|
12157
MANUAL.html
generated
12157
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
11928
MANUAL.txt
generated
11928
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@ -103,7 +103,7 @@ check: rclone
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
go run bin/get-github-release.go -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
go run bin/get-github-release.go -use-api -extract golangci-lint golangci/golangci-lint 'golangci-lint-.*\.tar\.gz'
|
||||
|
||||
# Get the release dependencies we only install on linux
|
||||
release_dep_linux:
|
||||
|
@ -1,5 +1,7 @@
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logo_on_dark__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/#gh-dark-mode-only)
|
||||
[<img src="https://rclone.org/img/logos/warp-github-light.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-light-mode-only)
|
||||
[<img src="https://rclone.org/img/logos/warp-github-dark.svg" title="Visit warp.dev to learn more." align="right">](https://www.warp.dev/?utm_source=github&utm_medium=referral&utm_campaign=rclone_20231103#gh-dark-mode-only)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@ -23,7 +25,6 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Akamai Netstorage [:page_facing_up:](https://rclone.org/netstorage/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* ArvanCloud Object Storage (AOS) [:page_facing_up:](https://rclone.org/s3/#arvan-cloud-object-storage-aos)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
@ -46,6 +47,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
|
||||
* HiDrive [:page_facing_up:](https://rclone.org/hidrive/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Huawei Cloud Object Storage Service(OBS) [:page_facing_up:](https://rclone.org/s3/#huawei-obs)
|
||||
* ImageKit [:page_facing_up:](https://rclone.org/imagekit/)
|
||||
* Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
@ -120,6 +122,7 @@ These backends adapt or modify other storage providers
|
||||
* Partial syncs supported on a whole file basis
|
||||
* [Copy](https://rclone.org/commands/rclone_copy/) mode to just copy new/changed files
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Bisync](https://rclone.org/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
|
39
RELEASE.md
39
RELEASE.md
@ -124,32 +124,21 @@ Cherry pick any changes back to master and the stable branch if it is active.
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on via GitHub actions. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
See: https://github.com/ilteoood/docker_buildx/issues/19
|
||||
See: https://github.com/ilteoood/docker_buildx/blob/master/scripts/install_buildx.sh
|
||||
To do a basic build of rclone's docker image to debug builds locally:
|
||||
|
||||
```
|
||||
docker buildx build --load -t rclone/rclone:testing --progress=plain .
|
||||
docker run --rm rclone/rclone:testing version
|
||||
```
|
||||
|
||||
To test the multipatform build
|
||||
|
||||
```
|
||||
docker buildx build -t rclone/rclone:testing --progress=plain --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7,linux/arm/v6 .
|
||||
```
|
||||
|
||||
To make a full build then set the tags correctly and add `--push`
|
||||
|
||||
```
|
||||
git co v1.54.1
|
||||
docker pull golang
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
docker buildx create --name actions_builder --use
|
||||
docker run --rm --privileged docker/binfmt:820fdd95a9972a5308930a2bdfb8573dd4447ad3
|
||||
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
|
||||
SUPPORTED_PLATFORMS=$(docker buildx inspect --bootstrap | grep 'Platforms:*.*' | cut -d : -f2,3)
|
||||
echo "Supported platforms: $SUPPORTED_PLATFORMS"
|
||||
docker buildx build --platform linux/amd64,linux/386,linux/arm64,linux/arm/v7 -t rclone/rclone:1.54.1 -t rclone/rclone:1.54 -t rclone/rclone:1 -t rclone/rclone:latest --push .
|
||||
docker buildx stop actions_builder
|
||||
```
|
||||
|
||||
### Old build for linux/amd64 only
|
||||
|
||||
```
|
||||
docker pull golang
|
||||
docker build --rm --ulimit memlock=67108864 -t rclone/rclone:1.52.0 -t rclone/rclone:1.52 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.52.0
|
||||
docker push rclone/rclone:1.52
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
@ -81,10 +81,12 @@ func TestNewFS(t *testing.T) {
|
||||
for i, gotEntry := range gotEntries {
|
||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||
wantEntry := test.entries[i]
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
if !isDir {
|
||||
require.Equal(t, wantEntry.size, gotEntry.Size(), what)
|
||||
}
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ package all
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/azurefiles"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,21 +0,0 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
//go:build acd
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
@ -8,6 +8,7 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -401,6 +402,24 @@ rclone does if you know the container exists already.
|
||||
Help: `If set, do not do HEAD before GET when getting objects.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "delete_snapshots",
|
||||
Help: `Set to specify how to deal with snapshots on blob deletion.`,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "",
|
||||
Help: "By default, the delete operation fails if a blob has snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeInclude),
|
||||
Help: "Specify 'include' to remove the root blob and all its snapshots",
|
||||
}, {
|
||||
Value: string(blob.DeleteSnapshotsOptionTypeOnly),
|
||||
Help: "Specify 'only' to remove only the snapshots but keep the root blob.",
|
||||
},
|
||||
},
|
||||
Default: "",
|
||||
Exclusive: true,
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@ -437,6 +456,7 @@ type Options struct {
|
||||
DirectoryMarkers bool `config:"directory_markers"`
|
||||
NoCheckContainer bool `config:"no_check_container"`
|
||||
NoHeadObject bool `config:"no_head_object"`
|
||||
DeleteSnapshots string `config:"delete_snapshots"`
|
||||
}
|
||||
|
||||
// Fs represents a remote azure server
|
||||
@ -1966,34 +1986,21 @@ func (rs *readSeekCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// increment the array as LSB binary
|
||||
func increment(xs *[8]byte) {
|
||||
for i, digit := range xs {
|
||||
newDigit := digit + 1
|
||||
xs[i] = newDigit
|
||||
if newDigit >= digit {
|
||||
// exit if no carry
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// record chunk number and id for Close
|
||||
type azBlock struct {
|
||||
chunkNumber int
|
||||
chunkNumber uint64
|
||||
id string
|
||||
}
|
||||
|
||||
// Implements the fs.ChunkWriter interface
|
||||
type azChunkWriter struct {
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
o *Object
|
||||
chunkSize int64
|
||||
size int64
|
||||
f *Fs
|
||||
ui uploadInfo
|
||||
blocksMu sync.Mutex // protects the below
|
||||
blocks []azBlock // list of blocks for finalize
|
||||
o *Object
|
||||
}
|
||||
|
||||
// OpenChunkWriter returns the chunk size and a ChunkWriter
|
||||
@ -2081,13 +2088,14 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader
|
||||
transactionalMD5 := md5sum[:]
|
||||
|
||||
// increment the blockID and save the blocks for finalize
|
||||
increment(&w.binaryBlockID)
|
||||
blockID := base64.StdEncoding.EncodeToString(w.binaryBlockID[:])
|
||||
var binaryBlockID [8]byte // block counter as LSB first 8 bytes
|
||||
binary.LittleEndian.PutUint64(binaryBlockID[:], uint64(chunkNumber))
|
||||
blockID := base64.StdEncoding.EncodeToString(binaryBlockID[:])
|
||||
|
||||
// Save the blockID for the commit
|
||||
w.blocksMu.Lock()
|
||||
w.blocks = append(w.blocks, azBlock{
|
||||
chunkNumber: chunkNumber,
|
||||
chunkNumber: uint64(chunkNumber),
|
||||
id: blockID,
|
||||
})
|
||||
w.blocksMu.Unlock()
|
||||
@ -2152,9 +2160,20 @@ func (w *azChunkWriter) Close(ctx context.Context) (err error) {
|
||||
return w.blocks[i].chunkNumber < w.blocks[j].chunkNumber
|
||||
})
|
||||
|
||||
// Create a list of block IDs
|
||||
// Create and check a list of block IDs
|
||||
blockIDs := make([]string, len(w.blocks))
|
||||
for i := range w.blocks {
|
||||
if w.blocks[i].chunkNumber != uint64(i) {
|
||||
return fmt.Errorf("internal error: expecting chunkNumber %d but got %d", i, w.blocks[i].chunkNumber)
|
||||
}
|
||||
chunkBytes, err := base64.StdEncoding.DecodeString(w.blocks[i].id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("internal error: bad block ID: %w", err)
|
||||
}
|
||||
chunkNumber := binary.LittleEndian.Uint64(chunkBytes)
|
||||
if w.blocks[i].chunkNumber != chunkNumber {
|
||||
return fmt.Errorf("internal error: expecting decoded chunkNumber %d but got %d", w.blocks[i].chunkNumber, chunkNumber)
|
||||
}
|
||||
blockIDs[i] = w.blocks[i].id
|
||||
}
|
||||
|
||||
@ -2356,9 +2375,10 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// Remove an object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
blb := o.getBlobSVC()
|
||||
//only := blob.DeleteSnapshotsOptionTypeOnly
|
||||
opt := blob.DeleteOptions{
|
||||
//DeleteSnapshots: &only,
|
||||
opt := blob.DeleteOptions{}
|
||||
if o.fs.opt.DeleteSnapshots != "" {
|
||||
action := blob.DeleteSnapshotsOptionType(o.fs.opt.DeleteSnapshots)
|
||||
opt.DeleteSnapshots = &action
|
||||
}
|
||||
return o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blb.Delete(ctx, &opt)
|
||||
|
@ -17,21 +17,3 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
enabled = f.Features().GetTier
|
||||
assert.True(t, enabled)
|
||||
}
|
||||
|
||||
func TestIncrement(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in [8]byte
|
||||
want [8]byte
|
||||
}{
|
||||
{[8]byte{0, 0, 0, 0}, [8]byte{1, 0, 0, 0}},
|
||||
{[8]byte{0xFE, 0, 0, 0}, [8]byte{0xFF, 0, 0, 0}},
|
||||
{[8]byte{0xFF, 0, 0, 0}, [8]byte{0, 1, 0, 0}},
|
||||
{[8]byte{0, 1, 0, 0}, [8]byte{1, 1, 0, 0}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFE}, [8]byte{0, 0, 0, 0xFF}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 1}},
|
||||
{[8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}, [8]byte{0, 0, 0, 0, 0, 0, 0}},
|
||||
} {
|
||||
increment(&test.in)
|
||||
assert.Equal(t, test.want, test.in)
|
||||
}
|
||||
}
|
||||
|
@ -193,9 +193,12 @@ Example:
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "download_auth_duration",
|
||||
Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
|
||||
|
||||
This is used in combination with "rclone link" for making files
|
||||
accessible to the public and sets the duration before the download
|
||||
authorization token will expire.
|
||||
|
||||
The duration before the download authorization token will expire.
|
||||
The minimum value is 1 second. The maximum value is one week.`,
|
||||
Default: fs.Duration(7 * 24 * time.Hour),
|
||||
Advanced: true,
|
||||
|
6
backend/cache/cache_internal_test.go
vendored
6
backend/cache/cache_internal_test.go
vendored
@ -30,6 +30,7 @@ import (
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/testy"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
@ -935,8 +936,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
if purge {
|
||||
_ = f.Features().Purge(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
_ = operations.Purge(context.Background(), f, "")
|
||||
}
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
@ -949,7 +949,7 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
func (r *run) cleanupFs(t *testing.T, f fs.Fs) {
|
||||
err := f.Features().Purge(context.Background(), "")
|
||||
err := operations.Purge(context.Background(), f, "")
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
|
11
backend/cache/cache_test.go
vendored
11
backend/cache/cache_test.go
vendored
@ -16,10 +16,11 @@ import (
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "OpenChunkWriter", "DirSetModTime", "MkdirMetadata"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier", "Metadata"},
|
||||
UnimplementableDirectoryMethods: []string{"Metadata", "SetMetadata", "SetModTime"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
@ -338,13 +338,18 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// Note 2: features.Fill() points features.PutStream to our PutStream,
|
||||
// but features.Mask() will nullify it if wrappedFs does not have it.
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // Object.MimeType not supported
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // Object.MimeType not supported
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
}).Fill(ctx, f).Mask(ctx, baseFs).WrapsFs(f, baseFs)
|
||||
|
||||
f.features.Disable("ListR") // Recursive listing may cause chunker skip files
|
||||
@ -821,8 +826,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
|
||||
}
|
||||
case fs.Directory:
|
||||
isSubdir[entry.Remote()] = true
|
||||
wrapDir := fs.NewDirCopy(ctx, entry)
|
||||
wrapDir.SetRemote(entry.Remote())
|
||||
wrapDir := fs.NewDirWrapper(entry.Remote(), entry)
|
||||
tempEntries = append(tempEntries, wrapDir)
|
||||
default:
|
||||
if f.opt.FailHard {
|
||||
@ -1571,6 +1575,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.base.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
if do := f.base.Features().MkdirMetadata; do != nil {
|
||||
return do(ctx, dir, metadata)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
@ -1888,6 +1900,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return do(ctx, srcFs.base, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
if do := f.base.Features().DirSetModTime; do != nil {
|
||||
return do(ctx, dir, modTime)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
@ -2548,6 +2568,8 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
|
@ -222,18 +222,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
}
|
||||
// check features
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
for _, u := range f.upstreams {
|
||||
@ -440,6 +445,32 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return u.f.Mkdir(ctx, uRemote)
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
u, uRemote, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
do := u.f.Features().MkdirMetadata
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
newDir, err := do(ctx, uRemote, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries := fs.DirEntries{newDir}
|
||||
entries, err = u.wrapEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir, ok := entries[0].(fs.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
// purge the upstream or fallback to a slow way
|
||||
func (u *upstream) purge(ctx context.Context, dir string) (err error) {
|
||||
if do := u.f.Features().Purge; do != nil {
|
||||
@ -755,12 +786,11 @@ func (u *upstream) wrapEntries(ctx context.Context, entries fs.DirEntries) (fs.D
|
||||
case fs.Object:
|
||||
entries[i] = u.newObject(x)
|
||||
case fs.Directory:
|
||||
newDir := fs.NewDirCopy(ctx, x)
|
||||
newPath, err := u.pathAdjustment.do(newDir.Remote())
|
||||
newPath, err := u.pathAdjustment.do(x.Remote())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir.SetRemote(newPath)
|
||||
newDir := fs.NewDirWrapper(newPath, x)
|
||||
entries[i] = newDir
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown entry type %T", entry)
|
||||
@ -783,7 +813,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.root == "" && dir == "" {
|
||||
entries = make(fs.DirEntries, 0, len(f.upstreams))
|
||||
for combineDir := range f.upstreams {
|
||||
d := fs.NewDir(combineDir, f.when)
|
||||
d := fs.NewLimitedDirWrapper(combineDir, fs.NewDir(combineDir, f.when))
|
||||
entries = append(entries, d)
|
||||
}
|
||||
return entries, nil
|
||||
@ -965,6 +995,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return do(ctx, uDirs)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
u, uDir, err := f.findUpstream(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if uDir == "" {
|
||||
fs.Debugf(dir, "Can't set modtime on upstream root. skipping.")
|
||||
return nil
|
||||
}
|
||||
if do := u.f.Features().DirSetModTime; do != nil {
|
||||
return do(ctx, uDir, modTime)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
@ -1099,6 +1145,8 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.OpenWriterAter = (*Fs)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
|
@ -183,18 +183,23 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false,
|
||||
WriteMimeType: false,
|
||||
GetTier: true,
|
||||
SetTier: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: false,
|
||||
WriteMimeType: false,
|
||||
GetTier: true,
|
||||
SetTier: true,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
// We support reading MIME types no matter the wrapped fs
|
||||
f.features.ReadMimeType = true
|
||||
@ -784,6 +789,14 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Mkdir(ctx, dir)
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
||||
return do(ctx, dir, metadata)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
@ -927,6 +940,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return do(ctx, srcFs.Fs, srcRemote, dstRemote)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
||||
return do(ctx, dir, modTime)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
@ -1497,6 +1518,8 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
|
@ -130,6 +130,16 @@ trying to recover an encrypted file with errors and it is desired to
|
||||
recover as much of the file as possible.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "strict_names",
|
||||
Help: `If set, this will raise an error when crypt comes across a filename that can't be decrypted.
|
||||
|
||||
(By default, rclone will just log a NOTICE and continue as normal.)
|
||||
This can happen if encrypted and unencrypted files are stored in the same
|
||||
directory (which is not recommended.) It may also indicate a more serious
|
||||
problem that should be investigated.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
@ -263,19 +273,24 @@ func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs,
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
CaseInsensitive: !cipher.dirNameEncrypt || cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
return f, err
|
||||
@ -294,6 +309,7 @@ type Options struct {
|
||||
PassBadBlocks bool `config:"pass_bad_blocks"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
Suffix string `config:"suffix"`
|
||||
StrictNames bool `config:"strict_names"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
@ -328,45 +344,64 @@ func (f *Fs) String() string {
|
||||
}
|
||||
|
||||
// Encrypt an object file name to entries.
|
||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) error {
|
||||
remote := obj.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
if f.opt.StrictNames {
|
||||
return fmt.Errorf("%s: undecryptable file name detected: %v", remote, err)
|
||||
}
|
||||
fs.Logf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return nil
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encrypt a directory file name to entries.
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) error {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
if f.opt.StrictNames {
|
||||
return fmt.Errorf("%s: undecryptable dir name detected: %v", remote, err)
|
||||
}
|
||||
fs.Logf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return nil
|
||||
}
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(ctx, dir))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
errors := 0
|
||||
var firsterr error
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
f.add(&newEntries, x)
|
||||
err = f.add(&newEntries, x)
|
||||
case fs.Directory:
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
err = f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown object type %T", entry)
|
||||
}
|
||||
if err != nil {
|
||||
errors++
|
||||
if firsterr == nil {
|
||||
firsterr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
if firsterr != nil {
|
||||
return nil, fmt.Errorf("there were %v undecryptable name errors. first error: %v", errors, firsterr)
|
||||
}
|
||||
return newEntries, nil
|
||||
}
|
||||
@ -520,6 +555,37 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
do := f.Fs.Features().MkdirMetadata
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
newDir, err := do(ctx, f.cipher.EncryptDirName(dir), metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var entries = make(fs.DirEntries, 0, 1)
|
||||
err = f.addDir(ctx, &entries, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir, ok := entries[0].(fs.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be fs.Directory", entries[0])
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
do := f.Fs.Features().DirSetModTime
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx, f.cipher.EncryptDirName(dir), modTime)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
@ -761,7 +827,7 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
}
|
||||
out := make([]fs.Directory, len(dirs))
|
||||
for i, dir := range dirs {
|
||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||
out[i] = fs.NewDirWrapper(f.cipher.EncryptDirName(dir.Remote()), dir)
|
||||
}
|
||||
return do(ctx, out)
|
||||
}
|
||||
@ -997,14 +1063,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(ctx, dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
newDir.SetRemote(decryptedRemote)
|
||||
remote = decryptedRemote
|
||||
}
|
||||
newDir := fs.NewDirWrapper(remote, dir)
|
||||
return newDir
|
||||
}
|
||||
|
||||
@ -1207,6 +1273,8 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
|
@ -287,7 +287,10 @@ func init() {
|
||||
},
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: `User metadata is stored in the properties field of the drive object.`,
|
||||
Help: `User metadata is stored in the properties field of the drive object.
|
||||
|
||||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: append(driveOAuthOptions(), []fs.Option{{
|
||||
Name: "scope",
|
||||
@ -870,6 +873,11 @@ type Object struct {
|
||||
v2Download bool // generate v2 download link ondemand
|
||||
}
|
||||
|
||||
// Directory describes a drive directory
|
||||
type Directory struct {
|
||||
baseObject
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@ -1374,15 +1382,20 @@ func newFs(ctx context.Context, name, path string, m configmap.Mapper) (*Fs, err
|
||||
}
|
||||
f.isTeamDrive = opt.TeamDriveID != ""
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
FilterAware: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: false, // FIXME need to check!
|
||||
}).Fill(ctx, f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@ -1729,11 +1742,9 @@ func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut strin
|
||||
return pathIDOut, found, err
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
// createDir makes a directory with pathID as parent and name leaf with optional metadata
|
||||
func (f *Fs) createDir(ctx context.Context, pathID, leaf string, metadata fs.Metadata) (info *drive.File, err error) {
|
||||
leaf = f.opt.Enc.FromStandardName(leaf)
|
||||
// fmt.Println("Making", path)
|
||||
// Define the metadata for the directory we are going to create.
|
||||
pathID = actualID(pathID)
|
||||
createInfo := &drive.File{
|
||||
Name: leaf,
|
||||
@ -1741,14 +1752,63 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
||||
MimeType: driveFolderType,
|
||||
Parents: []string{pathID},
|
||||
}
|
||||
var info *drive.File
|
||||
var updateMetadata updateMetadataFn
|
||||
if len(metadata) > 0 {
|
||||
updateMetadata, err = f.updateMetadata(ctx, createInfo, metadata, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create dir: failed to update metadata: %w", err)
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Create(createInfo).
|
||||
Fields("id").
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if updateMetadata != nil {
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// updateDir updates an existing a directory with the metadata passed in
|
||||
func (f *Fs) updateDir(ctx context.Context, dirID string, metadata fs.Metadata) (info *drive.File, err error) {
|
||||
if len(metadata) == 0 {
|
||||
return f.getFile(ctx, dirID, f.getFileFields(ctx))
|
||||
}
|
||||
dirID = actualID(dirID)
|
||||
updateInfo := &drive.File{}
|
||||
updateMetadata, err := f.updateMetadata(ctx, updateInfo, metadata, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("update dir: failed to update metadata from source object: %w", err)
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Update(dirID, updateInfo).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
info, err := f.createDir(ctx, pathID, leaf, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -2161,7 +2221,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
|
||||
// Send the entry to the caller, queueing any directories as new jobs
|
||||
cb := func(entry fs.DirEntry) error {
|
||||
if d, isDir := entry.(*fs.Dir); isDir {
|
||||
if d, isDir := entry.(fs.Directory); isDir {
|
||||
job := listREntry{actualID(d.ID()), d.Remote()}
|
||||
sendJob(job)
|
||||
}
|
||||
@ -2338,11 +2398,11 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, item *drive.File
|
||||
if item.ResourceKey != "" {
|
||||
f.dirResourceKeys.Store(item.Id, item.ResourceKey)
|
||||
}
|
||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||
if len(item.Parents) > 0 {
|
||||
d.SetParentID(item.Parents[0])
|
||||
baseObject, err := f.newBaseObject(ctx, remote, item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &Directory{baseObject: baseObject}
|
||||
return d, nil
|
||||
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||
// ignore object
|
||||
@ -2535,6 +2595,59 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the directory passed in as dir.
|
||||
//
|
||||
// It shouldn't return an error if it already exists.
|
||||
//
|
||||
// If the metadata is not nil it is set.
|
||||
//
|
||||
// It returns the directory that was created.
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
var info *drive.File
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// Directory does not exist so create it
|
||||
var leaf, parentID string
|
||||
leaf, parentID, err = f.dirCache.FindPath(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err = f.createDir(ctx, parentID, leaf, metadata)
|
||||
} else if err == nil {
|
||||
// Directory exists and needs updating
|
||||
info, err = f.updateDir(ctx, dirID, metadata)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the info into a directory entry
|
||||
entry, err := f.itemToDirEntry(ctx, dir, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirEntry, ok := entry.(fs.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
|
||||
}
|
||||
|
||||
return dirEntry, nil
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o := baseObject{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
id: dirID,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// delete a file or directory unconditionally by ID
|
||||
func (f *Fs) delete(ctx context.Context, id string, useTrash bool) error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@ -2678,6 +2791,12 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
createInfo.Description = ""
|
||||
}
|
||||
|
||||
// Adjust metadata if required
|
||||
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), createInfo, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get the ID of the thing to copy
|
||||
// copy the contents if CopyShortcutContent
|
||||
// else copy the shortcut only
|
||||
@ -2691,7 +2810,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copy := f.svc.Files.Copy(id, createInfo).
|
||||
Fields(partialFields).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
KeepRevisionForever(f.opt.KeepRevisionForever)
|
||||
srcObj.addResourceKey(copy.Header())
|
||||
@ -2727,6 +2846,11 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Errorf(existingObject, "Failed to remove existing object after copy: %v", err)
|
||||
}
|
||||
}
|
||||
// Finalise metadata
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newObject, nil
|
||||
}
|
||||
|
||||
@ -2900,13 +3024,19 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
dstParents := strings.Join(dstInfo.Parents, ",")
|
||||
dstInfo.Parents = nil
|
||||
|
||||
// Adjust metadata if required
|
||||
updateMetadata, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstInfo, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
var info *drive.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, err = f.svc.Files.Update(shortcutID(srcObj.id), dstInfo).
|
||||
RemoveParents(srcParentID).
|
||||
AddParents(dstParents).
|
||||
Fields(partialFields).
|
||||
Fields(f.getFileFields(ctx)).
|
||||
SupportsAllDrives(true).
|
||||
Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
@ -2915,6 +3045,11 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finalise metadata
|
||||
err = updateMetadata(ctx, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObjectWithInfo(ctx, remote, info)
|
||||
}
|
||||
|
||||
@ -3420,6 +3555,50 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) {
|
||||
list := f.svc.Files.List()
|
||||
if query != "" {
|
||||
list.Q(query)
|
||||
}
|
||||
|
||||
if f.opt.ListChunk > 0 {
|
||||
list.PageSize(f.opt.ListChunk)
|
||||
}
|
||||
list.SupportsAllDrives(true)
|
||||
list.IncludeItemsFromAllDrives(true)
|
||||
if f.isTeamDrive && !f.opt.SharedWithMe {
|
||||
list.DriveId(f.opt.TeamDriveID)
|
||||
list.Corpora("drive")
|
||||
}
|
||||
// If using appDataFolder then need to add Spaces
|
||||
if f.rootFolderID == "appDataFolder" {
|
||||
list.Spaces("appDataFolder")
|
||||
}
|
||||
|
||||
fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx))
|
||||
|
||||
var results []*drive.File
|
||||
for {
|
||||
var files *drive.FileList
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
files, err = list.Fields(googleapi.Field(fields)).Context(ctx).Do()
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
if files.IncompleteSearch {
|
||||
fs.Errorf(f, "search result INCOMPLETE")
|
||||
}
|
||||
results = append(results, files.Files...)
|
||||
if files.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(files.NextPageToken)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
Name: "get",
|
||||
Short: "Get command for fetching the drive config parameters",
|
||||
@ -3570,6 +3749,47 @@ Use the --interactive/-i or --dry-run flag to see what would be copied before co
|
||||
}, {
|
||||
Name: "importformats",
|
||||
Short: "Dump the import formats for debug purposes",
|
||||
}, {
|
||||
Name: "query",
|
||||
Short: "List files using Google Drive query language",
|
||||
Long: `This command lists files based on a query
|
||||
|
||||
Usage:
|
||||
|
||||
rclone backend query drive: query
|
||||
|
||||
The query syntax is documented at [Google Drive Search query terms and
|
||||
operators](https://developers.google.com/drive/api/guides/ref-search-terms).
|
||||
|
||||
For example:
|
||||
|
||||
rclone backend query drive: "'0ABc9DEFGHIJKLMNop0QRatUVW3X' in parents and name contains 'foo'"
|
||||
|
||||
If the query contains literal ' or \ characters, these need to be escaped with
|
||||
\ characters. "'" becomes "\'" and "\" becomes "\\\", for example to match a
|
||||
file named "foo ' \.txt":
|
||||
|
||||
rclone backend query drive: "name = 'foo \' \\\.txt'"
|
||||
|
||||
The result is a JSON array of matches, for example:
|
||||
|
||||
[
|
||||
{
|
||||
"createdTime": "2017-06-29T19:58:28.537Z",
|
||||
"id": "0AxBe_CDEF4zkGHI4d0FjYko2QkD",
|
||||
"md5Checksum": "68518d16be0c6fbfab918be61d658032",
|
||||
"mimeType": "text/plain",
|
||||
"modifiedTime": "2024-02-02T10:40:02.874Z",
|
||||
"name": "foo ' \\.txt",
|
||||
"parents": [
|
||||
"0BxAe_BCDE4zkFGZpcWJGek0xbzC"
|
||||
],
|
||||
"resourceKey": "0-ABCDEFGHIXJQpIGqBJq3MC",
|
||||
"sha1Checksum": "8f284fa768bfb4e45d076a579ab3905ab6bfa893",
|
||||
"size": "311",
|
||||
"webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC"
|
||||
}
|
||||
]`,
|
||||
}}
|
||||
|
||||
// Command the backend to run a named command
|
||||
@ -3687,6 +3907,17 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
||||
return f.exportFormats(ctx), nil
|
||||
case "importformats":
|
||||
return f.importFormats(ctx), nil
|
||||
case "query":
|
||||
if len(arg) == 1 {
|
||||
query := arg[0]
|
||||
var results, err = f.query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %q, error: %w", query, err)
|
||||
}
|
||||
return results, nil
|
||||
} else {
|
||||
return nil, errors.New("need a query argument")
|
||||
}
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@ -4193,6 +4424,37 @@ func (o *linkObject) ext() string {
|
||||
return o.baseObject.remote[len(o.baseObject.remote)-o.extLen:]
|
||||
}
|
||||
|
||||
// Items returns the count of items in this directory or this
|
||||
// directory and subdirectories if known, -1 for unknown
|
||||
func (d *Directory) Items() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for a Directory
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
info, err := d.fs.updateDir(ctx, d.id, metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update directory info: %w", err)
|
||||
}
|
||||
// Update directory from info returned
|
||||
baseObject, err := d.fs.newBaseObject(ctx, d.remote, info)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process directory info: %w", err)
|
||||
}
|
||||
d.baseObject = baseObject
|
||||
return err
|
||||
}
|
||||
|
||||
// Hash does nothing on a directory
|
||||
//
|
||||
// This method is implemented with the incorrect type signature to
|
||||
// stop the Directory type asserting to fs.Object or fs.ObjectInfo
|
||||
func (d *Directory) Hash() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
// templates for document link files
|
||||
const (
|
||||
urlTemplate = `[InternetShortcut]{{"\r"}}
|
||||
@ -4242,6 +4504,8 @@ var (
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
@ -4256,4 +4520,8 @@ var (
|
||||
_ fs.MimeTyper = (*linkObject)(nil)
|
||||
_ fs.IDer = (*linkObject)(nil)
|
||||
_ fs.ParentIDer = (*linkObject)(nil)
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
_ fs.SetModTimer = (*Directory)(nil)
|
||||
_ fs.SetMetadataer = (*Directory)(nil)
|
||||
_ fs.ParentIDer = (*Directory)(nil)
|
||||
)
|
||||
|
@ -524,6 +524,41 @@ func (f *Fs) InternalTestCopyID(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/Query
|
||||
func (f *Fs) InternalTestQuery(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
var err error
|
||||
t.Run("BadQuery", func(t *testing.T) {
|
||||
_, err = f.query(ctx, "this is a bad query")
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "failed to execute query")
|
||||
})
|
||||
|
||||
t.Run("NoMatch", func(t *testing.T) {
|
||||
results, err := f.query(ctx, fmt.Sprintf("name='%s' and name!='%s'", existingSubDir, existingSubDir))
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, results, 0)
|
||||
})
|
||||
|
||||
t.Run("GoodQuery", func(t *testing.T) {
|
||||
pathSegments := strings.Split(existingFile, "/")
|
||||
var parent string
|
||||
for _, item := range pathSegments {
|
||||
// the file name contains ' characters which must be escaped
|
||||
escapedItem := f.opt.Enc.FromStandardName(item)
|
||||
escapedItem = strings.ReplaceAll(escapedItem, `\`, `\\`)
|
||||
escapedItem = strings.ReplaceAll(escapedItem, `'`, `\'`)
|
||||
|
||||
results, err := f.query(ctx, fmt.Sprintf("%strashed=false and name='%s'", parent, escapedItem))
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, 1)
|
||||
assert.Len(t, results[0].Id, 33)
|
||||
assert.Equal(t, results[0].Name, item)
|
||||
parent = fmt.Sprintf("'%s' in parents and ", results[0].Id)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestIntegration/FsMkdir/FsPutFiles/Internal/AgeQuery
|
||||
func (f *Fs) InternalTestAgeQuery(t *testing.T) {
|
||||
// Check set up for filtering
|
||||
@ -611,6 +646,7 @@ func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("Shortcuts", f.InternalTestShortcuts)
|
||||
t.Run("UnTrash", f.InternalTestUnTrash)
|
||||
t.Run("CopyID", f.InternalTestCopyID)
|
||||
t.Run("Query", f.InternalTestQuery)
|
||||
t.Run("AgeQuery", f.InternalTestAgeQuery)
|
||||
t.Run("ShouldRetry", f.InternalTestShouldRetry)
|
||||
}
|
||||
|
@ -428,15 +428,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
members := []*team.UserSelectorArg{&user}
|
||||
args := team.NewMembersGetInfoArgs(members)
|
||||
|
||||
memberIds, err := f.team.MembersGetInfo(args)
|
||||
memberIDs, err := f.team.MembersGetInfo(args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid dropbox team member: %q: %w", opt.Impersonate, err)
|
||||
}
|
||||
if len(memberIds) == 0 || memberIds[0].MemberInfo == nil || memberIds[0].MemberInfo.Profile == nil {
|
||||
if len(memberIDs) == 0 || memberIDs[0].MemberInfo == nil || memberIDs[0].MemberInfo.Profile == nil {
|
||||
return nil, fmt.Errorf("dropbox team member not found: %q", opt.Impersonate)
|
||||
}
|
||||
|
||||
cfg.AsMemberID = memberIds[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
cfg.AsMemberID = memberIDs[0].MemberInfo.Profile.MemberProfile.TeamMemberId
|
||||
}
|
||||
|
||||
f.srv = files.New(cfg)
|
||||
@ -1231,7 +1231,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
return nil, err
|
||||
}
|
||||
var total uint64
|
||||
var used = q.Used
|
||||
used := q.Used
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
|
@ -970,6 +970,8 @@ func (f *Fs) mkdir(ctx context.Context, abspath string) error {
|
||||
f.putFtpConnection(&c, err)
|
||||
if errX := textprotoError(err); errX != nil {
|
||||
switch errX.Code {
|
||||
case ftp.StatusRequestedFileActionOK: // some ftp servers apparently return 250 instead of 257
|
||||
err = nil // see: https://forum.rclone.org/t/rclone-pop-up-an-i-o-error-when-creating-a-folder-in-a-mounted-ftp-drive/44368/
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
|
@ -56,8 +56,7 @@ type MediaItem struct {
|
||||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
Photo struct{} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
@ -68,7 +67,7 @@ type MediaItems struct {
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
//Content categories
|
||||
// Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
@ -187,5 +186,5 @@ type BatchCreateResponse struct {
|
||||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
MediaItemIDs []string `json:"mediaItemIds"`
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ func errorHandler(resp *http.Response) error {
|
||||
if strings.HasPrefix(resp.Header.Get("Content-Type"), "image/") {
|
||||
body = []byte("Image not found or broken")
|
||||
}
|
||||
var e = api.Error{
|
||||
e := api.Error{
|
||||
Details: api.ErrorDetails{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(body),
|
||||
@ -704,7 +704,7 @@ func (f *Fs) createAlbum(ctx context.Context, albumTitle string) (album *api.Alb
|
||||
Path: "/albums",
|
||||
Parameters: url.Values{},
|
||||
}
|
||||
var request = api.CreateAlbum{
|
||||
request := api.CreateAlbum{
|
||||
Album: &api.Album{
|
||||
Title: albumTitle,
|
||||
},
|
||||
@ -1005,7 +1005,7 @@ func (f *Fs) commitBatchAlbumID(ctx context.Context, items []uploadedItem, resul
|
||||
Method: "POST",
|
||||
Path: "/mediaItems:batchCreate",
|
||||
}
|
||||
var request = api.BatchCreateRequest{
|
||||
request := api.BatchCreateRequest{
|
||||
AlbumID: albumID,
|
||||
}
|
||||
itemsInBatch := 0
|
||||
@ -1152,6 +1152,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
info = results[0]
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit batch: %w", err)
|
||||
}
|
||||
|
||||
o.setMetaData(info)
|
||||
|
||||
@ -1180,8 +1183,8 @@ func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
Path: "/albums/" + album.ID + ":batchRemoveMediaItems",
|
||||
NoResponse: true,
|
||||
}
|
||||
var request = api.BatchRemoveItems{
|
||||
MediaItemIds: []string{o.id},
|
||||
request := api.BatchRemoveItems{
|
||||
MediaItemIDs: []string{o.id},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
|
@ -80,6 +80,14 @@ func (f *Fs) dbDump(ctx context.Context, full bool, root string) error {
|
||||
}
|
||||
root = fspath.JoinRootPath(remoteFs.Root(), f.Root())
|
||||
}
|
||||
if f.db == nil {
|
||||
if f.opt.MaxAge == 0 {
|
||||
fs.Errorf(f, "db not found. (disabled with max_age = 0)")
|
||||
} else {
|
||||
fs.Errorf(f, "db not found.")
|
||||
}
|
||||
return kv.ErrInactive
|
||||
}
|
||||
op := &kvDump{
|
||||
full: full,
|
||||
root: root,
|
||||
|
@ -164,16 +164,21 @@ func NewFs(ctx context.Context, fsname, rpath string, cmap configmap.Mapper) (fs
|
||||
}
|
||||
|
||||
stubFeatures := &fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}
|
||||
f.features = stubFeatures.Fill(ctx, f).Mask(ctx, f.Fs).WrapsFs(f, f.Fs)
|
||||
|
||||
@ -341,6 +346,22 @@ func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
if do := f.Fs.Features().DirSetModTime; do != nil {
|
||||
return do(ctx, dir, modTime)
|
||||
}
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
if do := f.Fs.Features().MkdirMetadata; do != nil {
|
||||
return do(ctx, dir, metadata)
|
||||
}
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
@ -418,7 +439,9 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
|
||||
// Shutdown the backend, closing any background tasks and any cached connections.
|
||||
func (f *Fs) Shutdown(ctx context.Context) (err error) {
|
||||
err = f.db.Stop(false)
|
||||
if f.db != nil && !f.db.IsStopped() {
|
||||
err = f.db.Stop(false)
|
||||
}
|
||||
if do := f.Fs.Features().Shutdown; do != nil {
|
||||
if err2 := do(ctx); err2 != nil {
|
||||
err = err2
|
||||
@ -528,6 +551,8 @@ var (
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
|
@ -60,9 +60,11 @@ func (f *Fs) testUploadFromCrypt(t *testing.T) {
|
||||
assert.NotNil(t, dst)
|
||||
|
||||
// check that hash was created
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
if f.opt.MaxAge > 0 {
|
||||
hash, err = f.getRawHash(ctx, hashType, fileName, anyFingerprint, longTime)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, hash)
|
||||
}
|
||||
//t.Logf("hash is %q", hash)
|
||||
_ = operations.Purge(ctx, f, dirName)
|
||||
}
|
||||
|
@ -37,4 +37,9 @@ func TestIntegration(t *testing.T) {
|
||||
opt.QuickTestOK = true
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
// test again with MaxAge = 0
|
||||
if *fstest.RemoteName == "" {
|
||||
opt.ExtraConfig = append(opt.ExtraConfig, fstests.ExtraConfigItem{Name: "TestHasher", Key: "max_age", Value: "0"})
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,14 @@ func (o *Object) Hash(ctx context.Context, hashType hash.Type) (hashVal string,
|
||||
f := o.f
|
||||
if f.passHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "pass %s", hashType)
|
||||
return o.Object.Hash(ctx, hashType)
|
||||
hashVal, err = o.Object.Hash(ctx, hashType)
|
||||
if hashVal != "" {
|
||||
return hashVal, err
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(o, "error passing %s: %v", hashType, err)
|
||||
}
|
||||
fs.Debugf(o, "passed %s is blank -- trying other methods", hashType)
|
||||
}
|
||||
if !f.suppHashes.Contains(hashType) {
|
||||
fs.Debugf(o, "unsupp %s", hashType)
|
||||
|
@ -53,6 +53,8 @@ netbsd, macOS and Solaris. It is **not** supported on Windows yet
|
||||
|
||||
User metadata is stored as extended attributes (which may not be
|
||||
supported by all file systems) under the "user.*" prefix.
|
||||
|
||||
Metadata is supported on files and directories.
|
||||
`,
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@ -270,6 +272,11 @@ type Object struct {
|
||||
translatedLink bool // Is this object a translated link
|
||||
}
|
||||
|
||||
// Directory represents a local filesystem directory
|
||||
type Directory struct {
|
||||
Object
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
var (
|
||||
@ -301,15 +308,20 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
IsLocal: true,
|
||||
SlowHash: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
UserMetadata: xattrSupported, // can only R/W general purpose metadata if xattrs are supported
|
||||
FilterAware: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
if opt.FollowSymlinks {
|
||||
f.lstat = os.Stat
|
||||
@ -453,6 +465,15 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// Create new directory object from the info passed in
|
||||
func (f *Fs) newDirectory(dir string, fi os.FileInfo) *Directory {
|
||||
o := f.newObject(dir)
|
||||
o.setMetadata(fi)
|
||||
return &Directory{
|
||||
Object: *o,
|
||||
}
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@ -563,7 +584,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||
d := fs.NewDir(newRemote, fi.ModTime())
|
||||
d := f.newDirectory(newRemote, fi)
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
@ -643,6 +664,58 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
path: f.localPath(dir),
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the directory passed in as dir.
|
||||
//
|
||||
// It shouldn't return an error if it already exists.
|
||||
//
|
||||
// If the metadata is not nil it is set.
|
||||
//
|
||||
// It returns the directory that was created.
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
// Find and or create the directory
|
||||
localPath := f.localPath(dir)
|
||||
fi, err := f.lstat(localPath)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
err := f.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mkdir metadata: failed make directory: %w", err)
|
||||
}
|
||||
fi, err = f.lstat(localPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mkdir metadata: failed to read info: %w", err)
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create directory object
|
||||
d := f.newDirectory(dir, fi)
|
||||
|
||||
// Set metadata on the directory object if provided
|
||||
if metadata != nil {
|
||||
err = d.writeMetadata(metadata)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set metadata on directory: %w", err)
|
||||
}
|
||||
// Re-read info now we have finished setting stuff
|
||||
err = d.lstat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("mkdir metadata: failed to re-read info: %w", err)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Rmdir removes the directory
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
@ -720,27 +793,6 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
dir = f.localPath(dir)
|
||||
fi, err := f.lstat(dir)
|
||||
if err != nil {
|
||||
// already purged
|
||||
if os.IsNotExist(err) {
|
||||
return fs.ErrorDirNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return fmt.Errorf("can't purge non directory: %q", dir)
|
||||
}
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given.
|
||||
@ -780,6 +832,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch metadata if --metadata is in use
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, fs.MetadataAsOpenOptions(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: failed to read metadata: %w", err)
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = os.Rename(srcObj.path, dstObj.path)
|
||||
if os.IsNotExist(err) {
|
||||
@ -795,6 +853,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Set metadata if --metadata is in use
|
||||
err = dstObj.writeMetadata(meta)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("move: failed to set metadata: %w", err)
|
||||
}
|
||||
|
||||
// Update the info
|
||||
err = dstObj.lstat()
|
||||
if err != nil {
|
||||
@ -1447,6 +1511,10 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
if runtime.GOOS == "windows" {
|
||||
s = filepath.ToSlash(s)
|
||||
vol := filepath.VolumeName(s)
|
||||
if vol == `\\?` && len(s) >= 6 {
|
||||
// `\\?\C:`
|
||||
vol = s[:6]
|
||||
}
|
||||
s = vol + enc.FromStandardPath(s[len(vol):])
|
||||
s = filepath.FromSlash(s)
|
||||
if !noUNC {
|
||||
@ -1459,15 +1527,51 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Items returns the count of items in this directory or this
|
||||
// directory and subdirectories if known, -1 for unknown
|
||||
func (d *Directory) Items() int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
// ID returns the internal ID of this directory if known, or
|
||||
// "" otherwise
|
||||
func (d *Directory) ID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for a Directory
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
err := d.writeMetadata(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SetMetadata failed on Directory: %w", err)
|
||||
}
|
||||
// Re-read info now we have finished setting stuff
|
||||
return d.lstat()
|
||||
}
|
||||
|
||||
// Hash does nothing on a directory
|
||||
//
|
||||
// This method is implemented with the incorrect type signature to
|
||||
// stop the Directory type asserting to fs.Object or fs.ObjectInfo
|
||||
func (d *Directory) Hash() {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Commander = &Fs{}
|
||||
_ fs.OpenWriterAter = &Fs{}
|
||||
_ fs.DirSetModTimer = &Fs{}
|
||||
_ fs.MkdirMetadataer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Metadataer = &Object{}
|
||||
_ fs.Directory = &Directory{}
|
||||
_ fs.SetModTimer = &Directory{}
|
||||
_ fs.SetMetadataer = &Directory{}
|
||||
)
|
||||
|
@ -4,7 +4,6 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
@ -13,7 +12,13 @@ const haveSetBTime = true
|
||||
|
||||
// setBTime sets the birth time of the file passed in
|
||||
func setBTime(name string, btime time.Time) (err error) {
|
||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||
pathp, err := syscall.UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h, err := syscall.CreateFile(pathp,
|
||||
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -260,6 +261,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
case fs.ErrorObjectNotFound:
|
||||
return f, nil
|
||||
case fs.ErrorIsFile:
|
||||
// Correct root if definitely pointing to a file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
// Fs points to the parent directory
|
||||
return f, err
|
||||
default:
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
timeFormat = `"` + "2006-01-02T15:04:05.999Z" + `"`
|
||||
|
||||
// PackageTypeOneNote is the package type value for OneNote files
|
||||
PackageTypeOneNote = "oneNote"
|
||||
@ -40,17 +40,17 @@ var _ error = (*Error)(nil)
|
||||
// Identity represents an identity of an actor. For example, and actor
|
||||
// can be a user, device, or application.
|
||||
type Identity struct {
|
||||
DisplayName string `json:"displayName"`
|
||||
ID string `json:"id"`
|
||||
DisplayName string `json:"displayName,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
}
|
||||
|
||||
// IdentitySet is a keyed collection of Identity objects. It is used
|
||||
// to represent a set of identities associated with various events for
|
||||
// an item, such as created by or last modified by.
|
||||
type IdentitySet struct {
|
||||
User Identity `json:"user"`
|
||||
Application Identity `json:"application"`
|
||||
Device Identity `json:"device"`
|
||||
User Identity `json:"user,omitempty"`
|
||||
Application Identity `json:"application,omitempty"`
|
||||
Device Identity `json:"device,omitempty"`
|
||||
}
|
||||
|
||||
// Quota groups storage space quota-related information on OneDrive into a single structure.
|
||||
@ -150,16 +150,15 @@ type FileFacet struct {
|
||||
// facet can be used to specify the last modified date or created date
|
||||
// of the item as it was on the local device.
|
||||
type FileSystemInfoFacet struct {
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // The UTC date and time the file was last modified on a client.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime,omitempty"` // The UTC date and time the file was created on a client.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime,omitempty"` // The UTC date and time the file was last modified on a client.
|
||||
}
|
||||
|
||||
// DeletedFacet indicates that the item on OneDrive has been
|
||||
// deleted. In this version of the API, the presence (non-null) of the
|
||||
// facet value indicates that the file was deleted. A null (or
|
||||
// missing) value indicates that the file is not deleted.
|
||||
type DeletedFacet struct {
|
||||
}
|
||||
type DeletedFacet struct{}
|
||||
|
||||
// PackageFacet indicates that a DriveItem is the top level item
|
||||
// in a "package" or a collection of items that should be treated as a collection instead of individual items.
|
||||
@ -168,31 +167,141 @@ type PackageFacet struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// SharedType indicates a DriveItem has been shared with others. The resource includes information about how the item is shared.
|
||||
// If a Driveitem has a non-null shared facet, the item has been shared.
|
||||
type SharedType struct {
|
||||
Owner IdentitySet `json:"owner,omitempty"` // The identity of the owner of the shared item. Read-only.
|
||||
Scope string `json:"scope,omitempty"` // Indicates the scope of how the item is shared: anonymous, organization, or users. Read-only.
|
||||
SharedBy IdentitySet `json:"sharedBy,omitempty"` // The identity of the user who shared the item. Read-only.
|
||||
SharedDateTime Timestamp `json:"sharedDateTime,omitempty"` // The UTC date and time when the item was shared. Read-only.
|
||||
}
|
||||
|
||||
// SharingInvitationType groups invitation-related data items into a single structure.
|
||||
type SharingInvitationType struct {
|
||||
Email string `json:"email,omitempty"` // The email address provided for the recipient of the sharing invitation. Read-only.
|
||||
InvitedBy *IdentitySet `json:"invitedBy,omitempty"` // Provides information about who sent the invitation that created this permission, if that information is available. Read-only.
|
||||
SignInRequired bool `json:"signInRequired,omitempty"` // If true the recipient of the invitation needs to sign in in order to access the shared item. Read-only.
|
||||
}
|
||||
|
||||
// SharingLinkType groups link-related data items into a single structure.
|
||||
// If a Permission resource has a non-null sharingLink facet, the permission represents a sharing link (as opposed to permissions granted to a person or group).
|
||||
type SharingLinkType struct {
|
||||
Application *Identity `json:"application,omitempty"` // The app the link is associated with.
|
||||
Type LinkType `json:"type,omitempty"` // The type of the link created.
|
||||
Scope LinkScope `json:"scope,omitempty"` // The scope of the link represented by this permission. Value anonymous indicates the link is usable by anyone, organization indicates the link is only usable for users signed into the same tenant.
|
||||
WebHTML string `json:"webHtml,omitempty"` // For embed links, this property contains the HTML code for an <iframe> element that will embed the item in a webpage.
|
||||
WebURL string `json:"webUrl,omitempty"` // A URL that opens the item in the browser on the OneDrive website.
|
||||
}
|
||||
|
||||
// LinkType represents the type of SharingLinkType created.
|
||||
type LinkType string
|
||||
|
||||
const (
|
||||
ViewLinkType LinkType = "view" // ViewLinkType (role: read) A view-only sharing link, allowing read-only access.
|
||||
EditLinkType LinkType = "edit" // EditLinkType (role: write) An edit sharing link, allowing read-write access.
|
||||
EmbedLinkType LinkType = "embed" // EmbedLinkType (role: read) A view-only sharing link that can be used to embed content into a host webpage. Embed links are not available for OneDrive for Business or SharePoint.
|
||||
)
|
||||
|
||||
// LinkScope represents the scope of the link represented by this permission.
|
||||
// Value anonymous indicates the link is usable by anyone, organization indicates the link is only usable for users signed into the same tenant.
|
||||
type LinkScope string
|
||||
|
||||
const (
|
||||
AnonymousScope LinkScope = "anonymous" // AnonymousScope = Anyone with the link has access, without needing to sign in. This may include people outside of your organization.
|
||||
OrganizationScope LinkScope = "organization" // OrganizationScope = Anyone signed into your organization (tenant) can use the link to get access. Only available in OneDrive for Business and SharePoint.
|
||||
|
||||
)
|
||||
|
||||
// PermissionsType provides information about a sharing permission granted for a DriveItem resource.
|
||||
// Sharing permissions have a number of different forms. The Permission resource represents these different forms through facets on the resource.
|
||||
type PermissionsType struct {
|
||||
ID string `json:"id"` // The unique identifier of the permission among all permissions on the item. Read-only.
|
||||
GrantedTo *IdentitySet `json:"grantedTo,omitempty"` // For user type permissions, the details of the users & applications for this permission. Read-only.
|
||||
GrantedToIdentities []*IdentitySet `json:"grantedToIdentities,omitempty"` // For link type permissions, the details of the users to whom permission was granted. Read-only.
|
||||
Invitation *SharingInvitationType `json:"invitation,omitempty"` // Details of any associated sharing invitation for this permission. Read-only.
|
||||
InheritedFrom *ItemReference `json:"inheritedFrom,omitempty"` // Provides a reference to the ancestor of the current permission, if it is inherited from an ancestor. Read-only.
|
||||
Link *SharingLinkType `json:"link,omitempty"` // Provides the link details of the current permission, if it is a link type permissions. Read-only.
|
||||
Roles []Role `json:"roles,omitempty"` // The type of permission (read, write, owner, member). Read-only.
|
||||
ShareID string `json:"shareId,omitempty"` // A unique token that can be used to access this shared item via the shares API. Read-only.
|
||||
}
|
||||
|
||||
// Role represents the type of permission (read, write, owner, member)
|
||||
type Role string
|
||||
|
||||
const (
|
||||
ReadRole Role = "read" // ReadRole provides the ability to read the metadata and contents of the item.
|
||||
WriteRole Role = "write" // WriteRole provides the ability to read and modify the metadata and contents of the item.
|
||||
OwnerRole Role = "owner" // OwnerRole represents the owner role for SharePoint and OneDrive for Business.
|
||||
MemberRole Role = "member" // MemberRole represents the member role for SharePoint and OneDrive for Business.
|
||||
)
|
||||
|
||||
// PermissionsResponse is the response to the list permissions method
|
||||
type PermissionsResponse struct {
|
||||
Value []*PermissionsType `json:"value"` // An array of Item objects
|
||||
}
|
||||
|
||||
// AddPermissionsRequest is the request for the add permissions method
|
||||
type AddPermissionsRequest struct {
|
||||
Recipients []DriveRecipient `json:"recipients,omitempty"` // A collection of recipients who will receive access and the sharing invitation.
|
||||
Message string `json:"message,omitempty"` // A plain text formatted message that is included in the sharing invitation. Maximum length 2000 characters.
|
||||
RequireSignIn bool `json:"requireSignIn,omitempty"` // Specifies whether the recipient of the invitation is required to sign-in to view the shared item.
|
||||
SendInvitation bool `json:"sendInvitation,omitempty"` // If true, a sharing link is sent to the recipient. Otherwise, a permission is granted directly without sending a notification.
|
||||
Roles []Role `json:"roles,omitempty"` // Specify the roles that are to be granted to the recipients of the sharing invitation.
|
||||
RetainInheritedPermissions bool `json:"retainInheritedPermissions,omitempty"` // Optional. If true (default), any existing inherited permissions are retained on the shared item when sharing this item for the first time. If false, all existing permissions are removed when sharing for the first time. OneDrive Business Only.
|
||||
}
|
||||
|
||||
// UpdatePermissionsRequest is the request for the update permissions method
|
||||
type UpdatePermissionsRequest struct {
|
||||
Roles []Role `json:"roles,omitempty"` // Specify the roles that are to be granted to the recipients of the sharing invitation.
|
||||
}
|
||||
|
||||
// DriveRecipient represents a person, group, or other recipient to share with using the invite action.
|
||||
type DriveRecipient struct {
|
||||
Email string `json:"email,omitempty"` // The email address for the recipient, if the recipient has an associated email address.
|
||||
Alias string `json:"alias,omitempty"` // The alias of the domain object, for cases where an email address is unavailable (e.g. security groups).
|
||||
ObjectID string `json:"objectId,omitempty"` // The unique identifier for the recipient in the directory.
|
||||
}
|
||||
|
||||
// Item represents metadata for an item in OneDrive
|
||||
type Item struct {
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
|
||||
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
Description string `json:"description"` // Provide a user-visible description of the item. Read-write.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
ID string `json:"id"` // The unique identifier of the item within the Drive. Read-only.
|
||||
Name string `json:"name"` // The name of the item (filename and extension). Read-write.
|
||||
ETag string `json:"eTag"` // eTag for the entire item (metadata + content). Read-only.
|
||||
CTag string `json:"cTag"` // An eTag for the content of the item. This eTag is not changed if only the metadata is changed. Read-only.
|
||||
CreatedBy IdentitySet `json:"createdBy"` // Identity of the user, device, and application which created the item. Read-only.
|
||||
LastModifiedBy IdentitySet `json:"lastModifiedBy"` // Identity of the user, device, and application which last modified the item. Read-only.
|
||||
CreatedDateTime Timestamp `json:"createdDateTime"` // Date and time of item creation. Read-only.
|
||||
LastModifiedDateTime Timestamp `json:"lastModifiedDateTime"` // Date and time the item was last modified. Read-only.
|
||||
Size int64 `json:"size"` // Size of the item in bytes. Read-only.
|
||||
ParentReference *ItemReference `json:"parentReference"` // Parent information, if the item has a parent. Read-write.
|
||||
WebURL string `json:"webUrl"` // URL that displays the resource in the browser. Read-only.
|
||||
Description string `json:"description,omitempty"` // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal. Undocumented limit of 1024 characters.
|
||||
Folder *FolderFacet `json:"folder"` // Folder metadata, if the item is a folder. Read-only.
|
||||
File *FileFacet `json:"file"` // File metadata, if the item is a file. Read-only.
|
||||
RemoteItem *RemoteItemFacet `json:"remoteItem"` // Remote Item metadata, if the item is a remote shared item. Read-only.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
// Image *ImageFacet `json:"image"` // Image metadata, if the item is an image. Read-only.
|
||||
// Photo *PhotoFacet `json:"photo"` // Photo metadata, if the item is a photo. Read-only.
|
||||
// Audio *AudioFacet `json:"audio"` // Audio metadata, if the item is an audio file. Read-only.
|
||||
// Video *VideoFacet `json:"video"` // Video metadata, if the item is a video. Read-only.
|
||||
// Location *LocationFacet `json:"location"` // Location metadata, if the item has location data. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
Package *PackageFacet `json:"package"` // If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others. Read-only.
|
||||
Deleted *DeletedFacet `json:"deleted"` // Information about the deleted state of the item. Read-only.
|
||||
Malware *struct{} `json:"malware,omitempty"` // Malware metadata, if the item was detected to contain malware. Read-only. (Currently has no properties.)
|
||||
Shared *SharedType `json:"shared,omitempty"` // Indicates that the item has been shared with others and provides information about the shared state of the item. Read-only.
|
||||
}
|
||||
|
||||
// Metadata represents a request to update Metadata.
|
||||
// It includes only the writeable properties.
|
||||
// omitempty is intentionally included for all, per https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update?view=odsp-graph-online#request-body
|
||||
type Metadata struct {
|
||||
Description string `json:"description,omitempty"` // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal. Undocumented limit of 1024 characters.
|
||||
FileSystemInfo *FileSystemInfoFacet `json:"fileSystemInfo,omitempty"` // File system information on client. Read-write.
|
||||
}
|
||||
|
||||
// IsEmpty returns true if the metadata is empty (there is nothing to set)
|
||||
func (m Metadata) IsEmpty() bool {
|
||||
return m.Description == "" && m.FileSystemInfo == &FileSystemInfoFacet{}
|
||||
}
|
||||
|
||||
// DeltaResponse is the response to the view delta method
|
||||
@ -216,6 +325,12 @@ type CreateItemRequest struct {
|
||||
ConflictBehavior string `json:"@name.conflictBehavior"` // Determines what to do if an item with a matching name already exists in this folder. Accepted values are: rename, replace, and fail (the default).
|
||||
}
|
||||
|
||||
// CreateItemWithMetadataRequest is like CreateItemRequest but also allows setting Metadata
|
||||
type CreateItemWithMetadataRequest struct {
|
||||
CreateItemRequest
|
||||
Metadata
|
||||
}
|
||||
|
||||
// SetFileSystemInfo is used to Update an object's FileSystemInfo.
|
||||
type SetFileSystemInfo struct {
|
||||
FileSystemInfo FileSystemInfoFacet `json:"fileSystemInfo"` // File system information on client. Read-write.
|
||||
@ -223,7 +338,7 @@ type SetFileSystemInfo struct {
|
||||
|
||||
// CreateUploadRequest is used by CreateUploadSession to set the dates correctly
|
||||
type CreateUploadRequest struct {
|
||||
Item SetFileSystemInfo `json:"item"`
|
||||
Item Metadata `json:"item"`
|
||||
}
|
||||
|
||||
// CreateUploadResponse is the response from creating an upload session
|
||||
@ -419,6 +534,11 @@ func (i *Item) GetParentReference() *ItemReference {
|
||||
return i.ParentReference
|
||||
}
|
||||
|
||||
// MalwareDetected returns true if OneDrive has detected that this item contains malware.
|
||||
func (i *Item) MalwareDetected() bool {
|
||||
return i.Malware != nil
|
||||
}
|
||||
|
||||
// IsRemote checks if item is a remote item
|
||||
func (i *Item) IsRemote() bool {
|
||||
return i.RemoteItem != nil
|
||||
@ -461,7 +581,7 @@ type DrivesResponse struct {
|
||||
Drives []DriveResource `json:"value"`
|
||||
}
|
||||
|
||||
// SiteResource is part of the response from from "/sites/root:"
|
||||
// SiteResource is part of the response from "/sites/root:"
|
||||
type SiteResource struct {
|
||||
SiteID string `json:"id"`
|
||||
SiteName string `json:"displayName"`
|
||||
|
951
backend/onedrive/metadata.go
Normal file
951
backend/onedrive/metadata.go
Normal file
@ -0,0 +1,951 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
const (
|
||||
dirMimeType = "inode/directory"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.999Z" // mS for OneDrive Personal, otherwise only S
|
||||
)
|
||||
|
||||
// system metadata keys which this backend owns
|
||||
var systemMetadataInfo = map[string]fs.MetadataHelp{
|
||||
"content-type": {
|
||||
Help: "The MIME type of the file.",
|
||||
Type: "string",
|
||||
Example: "text/plain",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"mtime": {
|
||||
Help: "Time of last modification with S accuracy (mS for OneDrive Personal).",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05Z",
|
||||
},
|
||||
"btime": {
|
||||
Help: "Time of file birth (creation) with S accuracy (mS for OneDrive Personal).",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05Z",
|
||||
},
|
||||
"utime": {
|
||||
Help: "Time of upload with S accuracy (mS for OneDrive Personal).",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05Z",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"created-by-display-name": {
|
||||
Help: "Display name of the user that created the item.",
|
||||
Type: "string",
|
||||
Example: "John Doe",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"created-by-id": {
|
||||
Help: "ID of the user that created the item.",
|
||||
Type: "string",
|
||||
Example: "48d31887-5fad-4d73-a9f5-3c356e68a038",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"description": {
|
||||
Help: "A short description of the file. Max 1024 characters. Only supported for OneDrive Personal.",
|
||||
Type: "string",
|
||||
Example: "Contract for signing",
|
||||
},
|
||||
"id": {
|
||||
Help: "The unique identifier of the item within OneDrive.",
|
||||
Type: "string",
|
||||
Example: "01BYE5RZ6QN3ZWBTUFOFD3GSPGOHDJD36K",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"last-modified-by-display-name": {
|
||||
Help: "Display name of the user that last modified the item.",
|
||||
Type: "string",
|
||||
Example: "John Doe",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"last-modified-by-id": {
|
||||
Help: "ID of the user that last modified the item.",
|
||||
Type: "string",
|
||||
Example: "48d31887-5fad-4d73-a9f5-3c356e68a038",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"malware-detected": {
|
||||
Help: "Whether OneDrive has detected that the item contains malware.",
|
||||
Type: "boolean",
|
||||
Example: "true",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"package-type": {
|
||||
Help: "If present, indicates that this item is a package instead of a folder or file. Packages are treated like files in some contexts and folders in others.",
|
||||
Type: "string",
|
||||
Example: "oneNote",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"shared-owner-id": {
|
||||
Help: "ID of the owner of the shared item (if shared).",
|
||||
Type: "string",
|
||||
Example: "48d31887-5fad-4d73-a9f5-3c356e68a038",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"shared-by-id": {
|
||||
Help: "ID of the user that shared the item (if shared).",
|
||||
Type: "string",
|
||||
Example: "48d31887-5fad-4d73-a9f5-3c356e68a038",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"shared-scope": {
|
||||
Help: "If shared, indicates the scope of how the item is shared: anonymous, organization, or users.",
|
||||
Type: "string",
|
||||
Example: "users",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"shared-time": {
|
||||
Help: "Time when the item was shared, with S accuracy (mS for OneDrive Personal).",
|
||||
Type: "RFC 3339",
|
||||
Example: "2006-01-02T15:04:05Z",
|
||||
ReadOnly: true,
|
||||
},
|
||||
"permissions": {
|
||||
Help: "Permissions in a JSON dump of OneDrive format. Enable with --onedrive-metadata-permissions. Properties: id, grantedTo, grantedToIdentities, invitation, inheritedFrom, link, roles, shareId",
|
||||
Type: "JSON",
|
||||
Example: "{}",
|
||||
},
|
||||
}
|
||||
|
||||
// rwChoices type for fs.Bits
|
||||
type rwChoices struct{}
|
||||
|
||||
func (rwChoices) Choices() []fs.BitsChoicesInfo {
|
||||
return []fs.BitsChoicesInfo{
|
||||
{Bit: uint64(rwOff), Name: "off"},
|
||||
{Bit: uint64(rwRead), Name: "read"},
|
||||
{Bit: uint64(rwWrite), Name: "write"},
|
||||
}
|
||||
}
|
||||
|
||||
// rwChoice type alias
|
||||
type rwChoice = fs.Bits[rwChoices]
|
||||
|
||||
const (
|
||||
rwRead rwChoice = 1 << iota
|
||||
rwWrite
|
||||
rwOff rwChoice = 0
|
||||
)
|
||||
|
||||
// Examples for the options
|
||||
var rwExamples = fs.OptionExamples{{
|
||||
Value: rwOff.String(),
|
||||
Help: "Do not read or write the value",
|
||||
}, {
|
||||
Value: rwRead.String(),
|
||||
Help: "Read the value only",
|
||||
}, {
|
||||
Value: rwWrite.String(),
|
||||
Help: "Write the value only",
|
||||
}, {
|
||||
Value: (rwRead | rwWrite).String(),
|
||||
Help: "Read and Write the value.",
|
||||
}}
|
||||
|
||||
// Metadata describes metadata properties shared by both Objects and Directories
|
||||
type Metadata struct {
|
||||
fs *Fs // what this object/dir is part of
|
||||
remote string // remote, for convenience when obj/dir not in scope
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
description string // Provides a user-visible description of the item. Read-write. Only on OneDrive Personal
|
||||
mtime time.Time // Time of last modification with S accuracy.
|
||||
btime time.Time // Time of file birth (creation) with S accuracy.
|
||||
utime time.Time // Time of upload with S accuracy.
|
||||
createdBy api.IdentitySet // user that created the item
|
||||
lastModifiedBy api.IdentitySet // user that last modified the item
|
||||
malwareDetected bool // Whether OneDrive has detected that the item contains malware.
|
||||
packageType string // If present, indicates that this item is a package instead of a folder or file.
|
||||
shared *api.SharedType // information about the shared state of the item, if shared
|
||||
normalizedID string // the normalized ID of the object or dir
|
||||
permissions []*api.PermissionsType // The current set of permissions for the item. Note that to save API calls, this is not guaranteed to be cached on the object. Use m.Get() to refresh.
|
||||
queuedPermissions []*api.PermissionsType // The set of permissions queued to be updated.
|
||||
permsAddOnly bool // Whether to disable "update" and "remove" (for example, during server-side copy when the dst will have new IDs)
|
||||
}
|
||||
|
||||
// Get retrieves the cached metadata and converts it to fs.Metadata.
|
||||
// This is most typically used when OneDrive is the source (as opposed to the dest).
|
||||
// If m.fs.opt.MetadataPermissions includes "read" then this will also include permissions, which requires an API call.
|
||||
// Get does not use an API call otherwise.
|
||||
func (m *Metadata) Get(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
metadata = make(fs.Metadata, 17)
|
||||
metadata["content-type"] = m.mimeType
|
||||
metadata["mtime"] = m.mtime.Format(timeFormatOut)
|
||||
metadata["btime"] = m.btime.Format(timeFormatOut)
|
||||
metadata["utime"] = m.utime.Format(timeFormatOut)
|
||||
metadata["created-by-display-name"] = m.createdBy.User.DisplayName
|
||||
metadata["created-by-id"] = m.createdBy.User.ID
|
||||
if m.description != "" {
|
||||
metadata["description"] = m.description
|
||||
}
|
||||
metadata["id"] = m.normalizedID
|
||||
metadata["last-modified-by-display-name"] = m.lastModifiedBy.User.DisplayName
|
||||
metadata["last-modified-by-id"] = m.lastModifiedBy.User.ID
|
||||
metadata["malware-detected"] = fmt.Sprint(m.malwareDetected)
|
||||
if m.packageType != "" {
|
||||
metadata["package-type"] = m.packageType
|
||||
}
|
||||
if m.shared != nil {
|
||||
metadata["shared-owner-id"] = m.shared.Owner.User.ID
|
||||
metadata["shared-by-id"] = m.shared.SharedBy.User.ID
|
||||
metadata["shared-scope"] = m.shared.Scope
|
||||
metadata["shared-time"] = time.Time(m.shared.SharedDateTime).Format(timeFormatOut)
|
||||
}
|
||||
if m.fs.opt.MetadataPermissions.IsSet(rwRead) {
|
||||
p, _, err := m.fs.getPermissions(ctx, m.normalizedID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get permissions: %w", err)
|
||||
}
|
||||
m.permissions = p
|
||||
|
||||
if len(p) > 0 {
|
||||
fs.PrettyPrint(m.permissions, "perms", fs.LogLevelDebug)
|
||||
buf, err := json.Marshal(m.permissions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal permissions: %w", err)
|
||||
}
|
||||
metadata["permissions"] = string(buf)
|
||||
}
|
||||
}
|
||||
return metadata, nil
|
||||
}
|
||||
|
||||
// Set takes fs.Metadata and parses/converts it to cached Metadata.
|
||||
// This is most typically used when OneDrive is the destination (as opposed to the source).
|
||||
// It does not actually update the remote (use Write for that.)
|
||||
// It sets only the writeable metadata properties (i.e. read-only properties are skipped.)
|
||||
// Permissions are included if m.fs.opt.MetadataPermissions includes "write".
|
||||
// It returns errors if writeable properties can't be parsed.
|
||||
// It does not return errors for unsupported properties that may be passed in.
|
||||
// It returns the number of writeable properties set (if it is 0, we can skip the Write API call.)
|
||||
func (m *Metadata) Set(ctx context.Context, metadata fs.Metadata) (numSet int, err error) {
|
||||
numSet = 0
|
||||
for k, v := range metadata {
|
||||
k, v := k, v
|
||||
switch k {
|
||||
case "mtime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
if err != nil {
|
||||
return numSet, fmt.Errorf("failed to parse metadata %q = %q: %w", k, v, err)
|
||||
}
|
||||
m.mtime = t
|
||||
numSet++
|
||||
case "btime":
|
||||
t, err := time.Parse(timeFormatIn, v)
|
||||
if err != nil {
|
||||
return numSet, fmt.Errorf("failed to parse metadata %q = %q: %w", k, v, err)
|
||||
}
|
||||
m.btime = t
|
||||
numSet++
|
||||
case "description":
|
||||
if m.fs.driveType != driveTypePersonal {
|
||||
fs.Debugf(m.remote, "metadata description is only supported for OneDrive Personal -- skipping: %s", v)
|
||||
continue
|
||||
}
|
||||
m.description = v
|
||||
numSet++
|
||||
case "permissions":
|
||||
if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
continue
|
||||
}
|
||||
var perms []*api.PermissionsType
|
||||
err := json.Unmarshal([]byte(v), &perms)
|
||||
if err != nil {
|
||||
return numSet, fmt.Errorf("failed to unmarshal permissions: %w", err)
|
||||
}
|
||||
m.queuedPermissions = perms
|
||||
numSet++
|
||||
default:
|
||||
fs.Debugf(m.remote, "skipping unsupported metadata item: %s: %s", k, v)
|
||||
}
|
||||
}
|
||||
if numSet == 0 {
|
||||
fs.Infof(m.remote, "no writeable metadata found: %v", metadata)
|
||||
}
|
||||
return numSet, nil
|
||||
}
|
||||
|
||||
// toAPIMetadata converts object/dir Metadata to api.Metadata for API calls.
|
||||
// If btime is missing but mtime is present, mtime is also used as the btime, as otherwise it would get overwritten.
|
||||
func (m *Metadata) toAPIMetadata() api.Metadata {
|
||||
update := api.Metadata{
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{},
|
||||
}
|
||||
if m.description != "" && m.fs.driveType == driveTypePersonal {
|
||||
update.Description = m.description
|
||||
}
|
||||
if !m.mtime.IsZero() {
|
||||
update.FileSystemInfo.LastModifiedDateTime = api.Timestamp(m.mtime)
|
||||
}
|
||||
if !m.btime.IsZero() {
|
||||
update.FileSystemInfo.CreatedDateTime = api.Timestamp(m.btime)
|
||||
}
|
||||
|
||||
if m.btime.IsZero() && !m.mtime.IsZero() { // use mtime as btime if missing
|
||||
m.btime = m.mtime
|
||||
update.FileSystemInfo.CreatedDateTime = api.Timestamp(m.btime)
|
||||
}
|
||||
return update
|
||||
}
|
||||
|
||||
// Write takes the cached Metadata and sets it on the remote, using API calls.
|
||||
// If m.fs.opt.MetadataPermissions includes "write" and updatePermissions == true, permissions are also set.
|
||||
// Calling Write without any writeable metadata will result in an error.
|
||||
func (m *Metadata) Write(ctx context.Context, updatePermissions bool) (*api.Item, error) {
|
||||
update := m.toAPIMetadata()
|
||||
if update.IsEmpty() {
|
||||
return nil, fmt.Errorf("%v: no writeable metadata found: %v", m.remote, m)
|
||||
}
|
||||
opts := m.fs.newOptsCallWithPath(ctx, m.remote, "PATCH", "")
|
||||
var info *api.Item
|
||||
err := m.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := m.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(m.remote, "errored metadata: %v", m)
|
||||
return nil, fmt.Errorf("%v: error updating metadata: %v", m.remote, err)
|
||||
}
|
||||
|
||||
if m.fs.opt.MetadataPermissions.IsSet(rwWrite) && updatePermissions {
|
||||
m.normalizedID = info.GetID()
|
||||
err = m.WritePermissions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(m.remote, "error writing permissions: %v", err)
|
||||
return info, err
|
||||
}
|
||||
}
|
||||
|
||||
// update the struct since we have fresh info
|
||||
m.fs.setSystemMetadata(info, m, m.remote, m.mimeType)
|
||||
|
||||
return info, err
|
||||
}
|
||||
|
||||
// RefreshPermissions fetches the current permissions from the remote and caches them as Metadata
|
||||
func (m *Metadata) RefreshPermissions(ctx context.Context) (err error) {
|
||||
if m.normalizedID == "" {
|
||||
return errors.New("internal error: normalizedID is missing")
|
||||
}
|
||||
p, _, err := m.fs.getPermissions(ctx, m.normalizedID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh permissions: %w", err)
|
||||
}
|
||||
m.permissions = p
|
||||
return nil
|
||||
}
|
||||
|
||||
// WritePermissions sets the permissions (and no other metadata) on the remote.
|
||||
// m.permissions (the existing perms) and m.queuedPermissions (the new perms to be set) must be set correctly before calling this.
|
||||
// m.permissions == nil will not error, as it is valid to add permissions when there were previously none.
|
||||
// If successful, m.permissions will be set with the new current permissions and m.queuedPermissions will be nil.
|
||||
func (m *Metadata) WritePermissions(ctx context.Context) (err error) {
|
||||
if !m.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
return errors.New("can't write permissions without --onedrive-metadata-permissions write")
|
||||
}
|
||||
if m.normalizedID == "" {
|
||||
return errors.New("internal error: normalizedID is missing")
|
||||
}
|
||||
|
||||
// compare current to queued and sort into add/update/remove queues
|
||||
add, update, remove := m.sortPermissions()
|
||||
fs.Debugf(m.remote, "metadata permissions: to add: %d to update: %d to remove: %d", len(add), len(update), len(remove))
|
||||
_, err = m.processPermissions(ctx, add, update, remove)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to process permissions: %w", err)
|
||||
}
|
||||
|
||||
err = m.RefreshPermissions(ctx)
|
||||
fs.Debugf(m.remote, "updated permissions (now has %d permissions)", len(m.permissions))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get permissions: %w", err)
|
||||
}
|
||||
m.queuedPermissions = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortPermissions sorts the permissions (to be written) into add, update, and remove queues
|
||||
func (m *Metadata) sortPermissions() (add, update, remove []*api.PermissionsType) {
|
||||
new, old := m.queuedPermissions, m.permissions
|
||||
if len(old) == 0 || m.permsAddOnly {
|
||||
return new, nil, nil // they must all be "add"
|
||||
}
|
||||
|
||||
for _, n := range new {
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
if n.ID != "" {
|
||||
// sanity check: ensure there's a matching "old" id with a non-matching role
|
||||
if !slices.ContainsFunc(old, func(o *api.PermissionsType) bool {
|
||||
return o.ID == n.ID && slices.Compare(o.Roles, n.Roles) != 0 && len(o.Roles) > 0 && len(n.Roles) > 0
|
||||
}) {
|
||||
fs.Debugf(m.remote, "skipping update for invalid roles: %v (perm ID: %v)", n.Roles, n.ID)
|
||||
continue
|
||||
}
|
||||
if m.fs.driveType != driveTypePersonal && n.Link != nil && n.Link.WebURL != "" {
|
||||
// special case to work around API limitation -- can't update a sharing link perm so need to remove + add instead
|
||||
// https://learn.microsoft.com/en-us/answers/questions/986279/why-is-update-permission-graph-api-for-files-not-w
|
||||
// https://github.com/microsoftgraph/msgraph-sdk-dotnet/issues/1135
|
||||
fs.Debugf(m.remote, "sortPermissions: can't update due to API limitation, will remove + add instead: %v", n.Roles)
|
||||
remove = append(remove, n)
|
||||
add = append(add, n)
|
||||
continue
|
||||
}
|
||||
fs.Debugf(m.remote, "sortPermissions: will update role to %v", n.Roles)
|
||||
update = append(update, n)
|
||||
} else {
|
||||
fs.Debugf(m.remote, "sortPermissions: will add permission: %v %v", n, n.Roles)
|
||||
add = append(add, n)
|
||||
}
|
||||
}
|
||||
for _, o := range old {
|
||||
newHasOld := slices.ContainsFunc(new, func(n *api.PermissionsType) bool {
|
||||
if n == nil || n.ID == "" {
|
||||
return false // can't remove perms without an ID
|
||||
}
|
||||
return n.ID == o.ID
|
||||
})
|
||||
if !newHasOld && o.ID != "" && !slices.Contains(add, o) && !slices.Contains(update, o) {
|
||||
fs.Debugf(m.remote, "sortPermissions: will remove permission: %v %v (perm ID: %v)", o, o.Roles, o.ID)
|
||||
remove = append(remove, o)
|
||||
}
|
||||
}
|
||||
return add, update, remove
|
||||
}
|
||||
|
||||
// processPermissions executes the add, update, and remove queues for writing permissions
|
||||
func (m *Metadata) processPermissions(ctx context.Context, add, update, remove []*api.PermissionsType) (newPermissions []*api.PermissionsType, err error) {
|
||||
for _, p := range remove { // remove (need to do these first because of remove + add workaround)
|
||||
_, err := m.removePermission(ctx, p)
|
||||
if err != nil {
|
||||
return newPermissions, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range add { // add
|
||||
newPs, _, err := m.addPermission(ctx, p)
|
||||
if err != nil {
|
||||
return newPermissions, err
|
||||
}
|
||||
newPermissions = append(newPermissions, newPs...)
|
||||
}
|
||||
|
||||
for _, p := range update { // update
|
||||
newP, _, err := m.updatePermission(ctx, p)
|
||||
if err != nil {
|
||||
return newPermissions, err
|
||||
}
|
||||
newPermissions = append(newPermissions, newP)
|
||||
}
|
||||
|
||||
return newPermissions, err
|
||||
}
|
||||
|
||||
// fillRecipients looks for recipients to add from the permission passed in.
|
||||
// It looks for an email address in identity.User.ID and DisplayName, otherwise it uses the identity.User.ID as r.ObjectID.
|
||||
// It considers both "GrantedTo" and "GrantedToIdentities".
|
||||
func fillRecipients(p *api.PermissionsType) (recipients []api.DriveRecipient) {
|
||||
if p == nil {
|
||||
return recipients
|
||||
}
|
||||
ids := make(map[string]struct{}, len(p.GrantedToIdentities)+1)
|
||||
isUnique := func(s string) bool {
|
||||
_, ok := ids[s]
|
||||
return !ok && s != ""
|
||||
}
|
||||
|
||||
addRecipient := func(identity *api.IdentitySet) {
|
||||
r := api.DriveRecipient{}
|
||||
|
||||
id := ""
|
||||
if strings.ContainsRune(identity.User.ID, '@') {
|
||||
id = identity.User.ID
|
||||
r.Email = id
|
||||
} else if strings.ContainsRune(identity.User.DisplayName, '@') {
|
||||
id = identity.User.DisplayName
|
||||
r.Email = id
|
||||
} else {
|
||||
id = identity.User.ID
|
||||
r.ObjectID = id
|
||||
}
|
||||
if !isUnique(id) {
|
||||
return
|
||||
}
|
||||
ids[id] = struct{}{}
|
||||
recipients = append(recipients, r)
|
||||
}
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
addRecipient(identity)
|
||||
}
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) {
|
||||
addRecipient(p.GrantedTo)
|
||||
}
|
||||
return recipients
|
||||
}
|
||||
|
||||
// addPermission adds new permissions to an object or dir.
|
||||
// if p.Link.Scope == "anonymous" then it will also create a Public Link.
|
||||
func (m *Metadata) addPermission(ctx context.Context, p *api.PermissionsType) (newPs []*api.PermissionsType, resp *http.Response, err error) {
|
||||
opts := m.fs.newOptsCall(m.normalizedID, "POST", "/invite")
|
||||
|
||||
req := &api.AddPermissionsRequest{
|
||||
Recipients: fillRecipients(p),
|
||||
RequireSignIn: m.fs.driveType != driveTypePersonal, // personal and business have conflicting requirements
|
||||
Roles: p.Roles,
|
||||
}
|
||||
if m.fs.driveType != driveTypePersonal {
|
||||
req.RetainInheritedPermissions = false // not supported for personal
|
||||
}
|
||||
|
||||
if p.Link != nil && p.Link.Scope == api.AnonymousScope {
|
||||
link, err := m.fs.PublicLink(ctx, m.remote, fs.DurationOff, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
p.Link.WebURL = link
|
||||
newPs = append(newPs, p)
|
||||
if len(req.Recipients) == 0 {
|
||||
return newPs, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.Recipients) == 0 {
|
||||
fs.Debugf(m.remote, "skipping add permission -- at least one valid recipient is required")
|
||||
return nil, nil, nil
|
||||
}
|
||||
if len(req.Roles) == 0 {
|
||||
return nil, nil, errors.New("at least one role is required to add a permission (choices: read, write, owner, member)")
|
||||
}
|
||||
if slices.Contains(req.Roles, api.OwnerRole) {
|
||||
fs.Debugf(m.remote, "skipping add permission -- can't invite a user with 'owner' role")
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
newP := &api.PermissionsResponse{}
|
||||
err = m.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = m.fs.srv.CallJSON(ctx, &opts, &req, &newP)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return newP.Value, resp, err
|
||||
}
|
||||
|
||||
// updatePermission updates an existing permission on an object or dir.
|
||||
// This requires the permission ID and a role to update (which will error if it is the same as the existing role.)
|
||||
// Role is the only property that can be updated.
|
||||
func (m *Metadata) updatePermission(ctx context.Context, p *api.PermissionsType) (newP *api.PermissionsType, resp *http.Response, err error) {
|
||||
opts := m.fs.newOptsCall(m.normalizedID, "PATCH", "/permissions/"+p.ID)
|
||||
req := api.UpdatePermissionsRequest{Roles: p.Roles} // roles is the only property that can be updated
|
||||
|
||||
if len(req.Roles) == 0 {
|
||||
return nil, nil, errors.New("at least one role is required to update a permission (choices: read, write, owner, member)")
|
||||
}
|
||||
|
||||
newP = &api.PermissionsType{}
|
||||
err = m.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = m.fs.srv.CallJSON(ctx, &opts, &req, &newP)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return newP, resp, err
|
||||
}
|
||||
|
||||
// removePermission removes an existing permission on an object or dir.
|
||||
// This requires the permission ID.
|
||||
func (m *Metadata) removePermission(ctx context.Context, p *api.PermissionsType) (resp *http.Response, err error) {
|
||||
opts := m.fs.newOptsCall(m.normalizedID, "DELETE", "/permissions/"+p.ID)
|
||||
opts.NoResponse = true
|
||||
|
||||
err = m.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = m.fs.srv.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// getPermissions gets the current permissions for an object or dir, from the API.
|
||||
func (f *Fs) getPermissions(ctx context.Context, normalizedID string) (p []*api.PermissionsType, resp *http.Response, err error) {
|
||||
opts := f.newOptsCall(normalizedID, "GET", "/permissions")
|
||||
|
||||
permResp := &api.PermissionsResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &permResp)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
|
||||
return permResp.Value, resp, err
|
||||
}
|
||||
|
||||
func (f *Fs) newMetadata(remote string) *Metadata {
|
||||
return &Metadata{fs: f, remote: remote}
|
||||
}
|
||||
|
||||
// returns true if metadata includes a "permissions" key and f.opt.MetadataPermissions includes "write".
|
||||
func (f *Fs) needsUpdatePermissions(metadata fs.Metadata) bool {
|
||||
_, ok := metadata["permissions"]
|
||||
return ok && f.opt.MetadataPermissions.IsSet(rwWrite)
|
||||
}
|
||||
|
||||
// returns a non-zero btime if we have one
|
||||
// otherwise falls back to mtime
|
||||
func (o *Object) tryGetBtime(modTime time.Time) time.Time {
|
||||
if o.meta != nil && !o.meta.btime.IsZero() {
|
||||
return o.meta.btime
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
// adds metadata (except permissions) if --metadata is in use
|
||||
func (o *Object) fetchMetadataForCreate(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, modTime time.Time) (createRequest api.CreateUploadRequest, err error) {
|
||||
createRequest = api.CreateUploadRequest{ // we set mtime no matter what
|
||||
Item: api.Metadata{
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(o.tryGetBtime(modTime)),
|
||||
LastModifiedDateTime: api.Timestamp(modTime),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
|
||||
if err != nil {
|
||||
return createRequest, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
if meta == nil {
|
||||
return createRequest, nil // no metadata or --metadata not in use, so just return mtime
|
||||
}
|
||||
if o.meta == nil {
|
||||
o.meta = o.fs.newMetadata(o.Remote())
|
||||
}
|
||||
o.meta.mtime = modTime
|
||||
numSet, err := o.meta.Set(ctx, meta)
|
||||
if err != nil {
|
||||
return createRequest, err
|
||||
}
|
||||
if numSet == 0 {
|
||||
return createRequest, nil
|
||||
}
|
||||
createRequest.Item = o.meta.toAPIMetadata()
|
||||
return createRequest, nil
|
||||
}
|
||||
|
||||
// Fetch metadata and update updateInfo if --metadata is in use
|
||||
// modtime will still be set when there is no metadata to set
|
||||
func (f *Fs) fetchAndUpdateMetadata(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
if meta == nil {
|
||||
return updateInfo.setModTime(ctx, src.ModTime(ctx)) // no metadata or --metadata not in use, so just set modtime
|
||||
}
|
||||
if updateInfo.meta == nil {
|
||||
updateInfo.meta = f.newMetadata(updateInfo.Remote())
|
||||
}
|
||||
newInfo, err := updateInfo.updateMetadata(ctx, meta)
|
||||
if newInfo == nil {
|
||||
return info, err
|
||||
}
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// Fetch and update permissions if --metadata is in use
|
||||
// This is similar to fetchAndUpdateMetadata, except it does NOT set modtime or other metadata if there are no permissions to set.
|
||||
// This is intended for cases where metadata may already have been set during upload and an extra step is needed only for permissions.
|
||||
func (f *Fs) fetchAndUpdatePermissions(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, updateInfo *Object) (info *api.Item, err error) {
|
||||
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read metadata from source object: %w", err)
|
||||
}
|
||||
if meta == nil || !f.needsUpdatePermissions(meta) {
|
||||
return nil, nil // no metadata, --metadata not in use, or wrong flags
|
||||
}
|
||||
if updateInfo.meta == nil {
|
||||
updateInfo.meta = f.newMetadata(updateInfo.Remote())
|
||||
}
|
||||
newInfo, err := updateInfo.updateMetadata(ctx, meta)
|
||||
if newInfo == nil {
|
||||
return info, err
|
||||
}
|
||||
return newInfo, err
|
||||
}
|
||||
|
||||
// updateMetadata calls Get, Set, and Write
|
||||
func (o *Object) updateMetadata(ctx context.Context, meta fs.Metadata) (info *api.Item, err error) {
|
||||
_, err = o.meta.Get(ctx) // refresh permissions
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numSet, err := o.meta.Set(ctx, meta)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if numSet == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
info, err = o.meta.Write(ctx, o.fs.needsUpdatePermissions(meta))
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
// Remove versions if required
|
||||
if o.fs.opt.NoVersions {
|
||||
err := o.deleteVersions(ctx)
|
||||
if err != nil {
|
||||
return info, fmt.Errorf("%v: Failed to remove versions: %v", o, err)
|
||||
}
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the directory passed in as dir.
|
||||
//
|
||||
// It shouldn't return an error if it already exists.
|
||||
//
|
||||
// If the metadata is not nil it is set.
|
||||
//
|
||||
// It returns the directory that was created.
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
var info *api.Item
|
||||
var meta *Metadata
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// Directory does not exist so create it
|
||||
var leaf, parentID string
|
||||
leaf, parentID, err = f.dirCache.FindPath(ctx, dir, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, meta, err = f.createDir(ctx, parentID, dir, leaf, metadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.driveType != driveTypePersonal {
|
||||
// for some reason, OneDrive Business needs this extra step to set modtime, while Personal does not. Seems like a bug...
|
||||
fs.Debugf(dir, "setting time %v", meta.mtime)
|
||||
info, err = meta.Write(ctx, false)
|
||||
}
|
||||
} else if err == nil {
|
||||
// Directory exists and needs updating
|
||||
info, meta, err = f.updateDir(ctx, dirID, dir, metadata)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert the info into a directory entry
|
||||
parent, _ := dircache.SplitPath(dir)
|
||||
entry, err := f.itemToDirEntry(ctx, parent, info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directory, ok := entry.(*Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be a *Directory", entry)
|
||||
}
|
||||
directory.meta = meta
|
||||
f.setSystemMetadata(info, directory.meta, entry.Remote(), dirMimeType)
|
||||
|
||||
dirEntry, ok := entry.(fs.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
|
||||
}
|
||||
|
||||
return dirEntry, nil
|
||||
}
|
||||
|
||||
// createDir makes a directory with pathID as parent and name leaf with optional metadata
|
||||
func (f *Fs) createDir(ctx context.Context, pathID, dirWithLeaf, leaf string, metadata fs.Metadata) (info *api.Item, meta *Metadata, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", dirID, leaf)
|
||||
var resp *http.Response
|
||||
opts := f.newOptsCall(pathID, "POST", "/children")
|
||||
|
||||
mkdir := api.CreateItemWithMetadataRequest{
|
||||
CreateItemRequest: api.CreateItemRequest{
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
ConflictBehavior: "fail",
|
||||
},
|
||||
}
|
||||
m := f.newMetadata(dirWithLeaf)
|
||||
m.mimeType = dirMimeType
|
||||
numSet := 0
|
||||
if len(metadata) > 0 {
|
||||
|
||||
numSet, err = m.Set(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, m, err
|
||||
}
|
||||
if numSet > 0 {
|
||||
mkdir.Metadata = m.toAPIMetadata()
|
||||
}
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, m, err
|
||||
}
|
||||
|
||||
if f.needsUpdatePermissions(metadata) && numSet > 0 { // permissions must be done as a separate step
|
||||
m.normalizedID = info.GetID()
|
||||
err = m.RefreshPermissions(ctx)
|
||||
if err != nil {
|
||||
return info, m, err
|
||||
}
|
||||
|
||||
err = m.WritePermissions(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(m.remote, "error writing permissions: %v", err)
|
||||
return info, m, err
|
||||
}
|
||||
}
|
||||
return info, m, nil
|
||||
}
|
||||
|
||||
// updateDir updates an existing a directory with the metadata passed in
|
||||
func (f *Fs) updateDir(ctx context.Context, dirID, remote string, metadata fs.Metadata) (info *api.Item, meta *Metadata, err error) {
|
||||
d := f.newDir(dirID, remote)
|
||||
_, err = d.meta.Set(ctx, metadata)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
info, err = d.meta.Write(ctx, f.needsUpdatePermissions(metadata))
|
||||
return info, d.meta, err
|
||||
}
|
||||
|
||||
func (f *Fs) newDir(dirID, remote string) (d *Directory) {
|
||||
d = &Directory{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: -1,
|
||||
items: -1,
|
||||
id: dirID,
|
||||
meta: f.newMetadata(remote),
|
||||
}
|
||||
d.meta.normalizedID = dirID
|
||||
return d
|
||||
}
|
||||
|
||||
// Metadata returns metadata for a DirEntry
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
err = o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return o.meta.Get(ctx)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
dirID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d := f.newDir(dirID, dir)
|
||||
return d.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the DirEntry to set the modification date
|
||||
//
|
||||
// If there is any other metadata it does not overwrite it.
|
||||
func (d *Directory) SetModTime(ctx context.Context, t time.Time) error {
|
||||
btime := t
|
||||
if d.meta != nil && !d.meta.btime.IsZero() {
|
||||
btime = d.meta.btime // if we already have a non-zero btime, preserve it
|
||||
}
|
||||
d.meta = d.fs.newMetadata(d.remote) // set only the mtime and btime
|
||||
d.meta.mtime = t
|
||||
d.meta.btime = btime
|
||||
_, err := d.meta.Write(ctx, false)
|
||||
return err
|
||||
}
|
||||
|
||||
// Metadata returns metadata for a DirEntry
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (d *Directory) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
|
||||
return d.meta.Get(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for a Directory
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
_, meta, err := d.fs.updateDir(ctx, d.id, d.remote, metadata)
|
||||
d.meta = meta
|
||||
return err
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (d *Directory) Fs() fs.Info {
|
||||
return d.fs
|
||||
}
|
||||
|
||||
// String returns the name
|
||||
func (d *Directory) String() string {
|
||||
return d.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Directory) Remote() string {
|
||||
return d.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
//
|
||||
// If one isn't available it returns the configured --default-dir-time
|
||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||
if !d.meta.mtime.IsZero() {
|
||||
return d.meta.mtime
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
return time.Time(ci.DefaultTime)
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (d *Directory) Size() int64 {
|
||||
return d.size
|
||||
}
|
||||
|
||||
// Items returns the count of items in this directory or this
|
||||
// directory and subdirectories if known, -1 for unknown
|
||||
func (d *Directory) Items() int64 {
|
||||
return d.items
|
||||
}
|
||||
|
||||
// ID gets the optional ID
|
||||
func (d *Directory) ID() string {
|
||||
return d.id
|
||||
}
|
||||
|
||||
// MimeType returns the content type of the Object if
|
||||
// known, or "" if not
|
||||
func (d *Directory) MimeType(ctx context.Context) string {
|
||||
return dirMimeType
|
||||
}
|
147
backend/onedrive/metadata.md
Normal file
147
backend/onedrive/metadata.md
Normal file
@ -0,0 +1,147 @@
|
||||
OneDrive supports System Metadata (not User Metadata, as of this writing) for
|
||||
both files and directories. Much of the metadata is read-only, and there are some
|
||||
differences between OneDrive Personal and Business (see table below for
|
||||
details).
|
||||
|
||||
Permissions are also supported, if `--onedrive-metadata-permissions` is set. The
|
||||
accepted values for `--onedrive-metadata-permissions` are `read`, `write`,
|
||||
`read,write`, and `off` (the default). `write` supports adding new permissions,
|
||||
updating the "role" of existing permissions, and removing permissions. Updating
|
||||
and removing require the Permission ID to be known, so it is recommended to use
|
||||
`read,write` instead of `write` if you wish to update/remove permissions.
|
||||
|
||||
Permissions are read/written in JSON format using the same schema as the
|
||||
[OneDrive API](https://learn.microsoft.com/en-us/onedrive/developer/rest-api/resources/permission?view=odsp-graph-online),
|
||||
which differs slightly between OneDrive Personal and Business.
|
||||
|
||||
Example for OneDrive Personal:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "1234567890ABC!123",
|
||||
"grantedTo": {
|
||||
"user": {
|
||||
"id": "ryan@contoso.com"
|
||||
},
|
||||
"application": {},
|
||||
"device": {}
|
||||
},
|
||||
"invitation": {
|
||||
"email": "ryan@contoso.com"
|
||||
},
|
||||
"link": {
|
||||
"webUrl": "https://1drv.ms/t/s!1234567890ABC"
|
||||
},
|
||||
"roles": [
|
||||
"read"
|
||||
],
|
||||
"shareId": "s!1234567890ABC"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Example for OneDrive Business:
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "48d31887-5fad-4d73-a9f5-3c356e68a038",
|
||||
"grantedToIdentities": [
|
||||
{
|
||||
"user": {
|
||||
"displayName": "ryan@contoso.com"
|
||||
},
|
||||
"application": {},
|
||||
"device": {}
|
||||
}
|
||||
],
|
||||
"link": {
|
||||
"type": "view",
|
||||
"scope": "users",
|
||||
"webUrl": "https://contoso.sharepoint.com/:w:/t/design/a577ghg9hgh737613bmbjf839026561fmzhsr85ng9f3hjck2t5s"
|
||||
},
|
||||
"roles": [
|
||||
"read"
|
||||
],
|
||||
"shareId": "u!LKj1lkdlals90j1nlkascl"
|
||||
},
|
||||
{
|
||||
"id": "5D33DD65C6932946",
|
||||
"grantedTo": {
|
||||
"user": {
|
||||
"displayName": "John Doe",
|
||||
"id": "efee1b77-fb3b-4f65-99d6-274c11914d12"
|
||||
},
|
||||
"application": {},
|
||||
"device": {}
|
||||
},
|
||||
"roles": [
|
||||
"owner"
|
||||
],
|
||||
"shareId": "FWxc1lasfdbEAGM5fI7B67aB5ZMPDMmQ11U"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
To write permissions, pass in a "permissions" metadata key using this same
|
||||
format. The [`--metadata-mapper`](https://rclone.org/docs/#metadata-mapper) tool can
|
||||
be very helpful for this.
|
||||
|
||||
When adding permissions, an email address can be provided in the `User.ID` or
|
||||
`DisplayName` properties of `grantedTo` or `grantedToIdentities`. Alternatively,
|
||||
an ObjectID can be provided in `User.ID`. At least one valid recipient must be
|
||||
provided in order to add a permission for a user. Creating a Public Link is also
|
||||
supported, if `Link.Scope` is set to `"anonymous"`.
|
||||
|
||||
Example request to add a "read" permission:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "",
|
||||
"grantedTo": {
|
||||
"user": {},
|
||||
"application": {},
|
||||
"device": {}
|
||||
},
|
||||
"grantedToIdentities": [
|
||||
{
|
||||
"user": {
|
||||
"id": "ryan@contoso.com"
|
||||
},
|
||||
"application": {},
|
||||
"device": {}
|
||||
}
|
||||
],
|
||||
"roles": [
|
||||
"read"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Note that adding a permission can fail if a conflicting permission already
|
||||
exists for the file/folder.
|
||||
|
||||
To update an existing permission, include both the Permission ID and the new
|
||||
`roles` to be assigned. `roles` is the only property that can be changed.
|
||||
|
||||
To remove permissions, pass in a blob containing only the permissions you wish
|
||||
to keep (which can be empty, to remove all.)
|
||||
|
||||
Note that both reading and writing permissions requires extra API calls, so if
|
||||
you don't need to read or write permissions it is recommended to omit
|
||||
`--onedrive-metadata-permissions`.
|
||||
|
||||
Metadata and permissions are supported for Folders (directories) as well as
|
||||
Files. Note that setting the `mtime` or `btime` on a Folder requires one extra
|
||||
API call on OneDrive Business only.
|
||||
|
||||
OneDrive does not currently support User Metadata. When writing metadata, only
|
||||
writeable system properties will be written -- any read-only or unrecognized keys
|
||||
passed in will be ignored.
|
||||
|
||||
TIP: to see the metadata and permissions for any file or folder, run:
|
||||
|
||||
```
|
||||
rclone lsjson remote:path --stat -M --onedrive-metadata-permissions read
|
||||
```
|
@ -4,6 +4,7 @@ package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
@ -27,7 +28,9 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
@ -92,6 +95,9 @@ var (
|
||||
|
||||
// QuickXorHashType is the hash.Type for OneDrive
|
||||
QuickXorHashType hash.Type
|
||||
|
||||
//go:embed metadata.md
|
||||
metadataHelp string
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@ -102,6 +108,10 @@ func init() {
|
||||
Description: "Microsoft OneDrive",
|
||||
NewFs: NewFs,
|
||||
Config: Config,
|
||||
MetadataInfo: &fs.MetadataInfo{
|
||||
System: systemMetadataInfo,
|
||||
Help: metadataHelp,
|
||||
},
|
||||
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
||||
Name: "region",
|
||||
Help: "Choose national cloud region for OneDrive.",
|
||||
@ -172,7 +182,8 @@ Choose or manually enter a custom space separated list with all scopes, that rcl
|
||||
Value: "Files.Read Files.ReadWrite Files.Read.All Files.ReadWrite.All offline_access",
|
||||
Help: "Read and write access to all resources, without the ability to browse SharePoint sites. \nSame as if disable_site_permission was set to true",
|
||||
},
|
||||
}}, {
|
||||
},
|
||||
}, {
|
||||
Name: "disable_site_permission",
|
||||
Help: `Disable the request for Sites.Read.All permission.
|
||||
|
||||
@ -329,7 +340,7 @@ file.
|
||||
Default: false,
|
||||
Help: strings.ReplaceAll(`If set rclone will use delta listing to implement recursive listings.
|
||||
|
||||
If this flag is set the the onedrive backend will advertise |ListR|
|
||||
If this flag is set the onedrive backend will advertise |ListR|
|
||||
support for recursive listings.
|
||||
|
||||
Setting this flag speeds up these things greatly:
|
||||
@ -355,6 +366,16 @@ It is recommended if you are mounting your onedrive at the root
|
||||
(or near the root when using crypt) and using rclone |rc vfs/refresh|.
|
||||
`, "|", "`"),
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "metadata_permissions",
|
||||
Help: `Control whether permissions should be read or written in metadata.
|
||||
|
||||
Reading permissions metadata from files can be done quickly, but it
|
||||
isn't always desirable to set the permissions from the metadata.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: rwOff,
|
||||
Examples: rwExamples,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@ -638,7 +659,8 @@ Examples:
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: graphURL,
|
||||
Path: "/drives/" + finalDriveID + "/root"}
|
||||
Path: "/drives/" + finalDriveID + "/root",
|
||||
}
|
||||
var rootItem api.Item
|
||||
_, err = srv.CallJSON(ctx, &opts, nil, &rootItem)
|
||||
if err != nil {
|
||||
@ -678,6 +700,7 @@ type Options struct {
|
||||
AVOverride bool `config:"av_override"`
|
||||
Delta bool `config:"delta"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
MetadataPermissions rwChoice `config:"metadata_permissions"`
|
||||
}
|
||||
|
||||
// Fs represents a remote OneDrive
|
||||
@ -688,6 +711,7 @@ type Fs struct {
|
||||
ci *fs.ConfigInfo // global config
|
||||
features *fs.Features // optional features
|
||||
srv *rest.Client // the connection to the OneDrive server
|
||||
unAuth *rest.Client // no authentication connection to the OneDrive server
|
||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||
pacer *fs.Pacer // pacer for API calls
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
@ -709,6 +733,17 @@ type Object struct {
|
||||
id string // ID of the object
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
meta *Metadata // metadata properties
|
||||
}
|
||||
|
||||
// Directory describes a OneDrive directory
|
||||
type Directory struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
size int64 // size of directory and contents or -1 if unknown
|
||||
items int64 // number of objects or -1 for unknown
|
||||
id string // dir ID
|
||||
meta *Metadata // metadata properties
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -749,8 +784,10 @@ var retryErrorCodes = []int{
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
var gatewayTimeoutError sync.Once
|
||||
var errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
var (
|
||||
gatewayTimeoutError sync.Once
|
||||
errAsyncJobAccessDenied = errors.New("async job failed - access denied")
|
||||
)
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
@ -946,8 +983,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
TokenURL: authEndpoint[opt.Region] + tokenPath,
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(ctx)
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(ctx, name, m, oauthConfig, client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure OneDrive: %w", err)
|
||||
}
|
||||
@ -961,14 +999,24 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveID: opt.DriveID,
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
unAuth: rest.NewClient(client).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: false,
|
||||
CanHaveEmptyDirectories: true,
|
||||
ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: false,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: false,
|
||||
DirModTimeUpdatesOnWrite: false,
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
@ -994,7 +1042,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
})
|
||||
|
||||
// Get rootID
|
||||
var rootID = opt.RootFolderID
|
||||
rootID := opt.RootFolderID
|
||||
if rootID == "" {
|
||||
rootInfo, _, err := f.readMetaDataForPath(ctx, "")
|
||||
if err != nil {
|
||||
@ -1061,6 +1109,7 @@ func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Ite
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
meta: f.newMetadata(remote),
|
||||
}
|
||||
var err error
|
||||
if info != nil {
|
||||
@ -1119,11 +1168,11 @@ func (f *Fs) CreateDir(ctx context.Context, dirID, leaf string) (newID string, e
|
||||
return shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
//fmt.Printf("...Error %v\n", err)
|
||||
// fmt.Printf("...Error %v\n", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
//fmt.Printf("...Id %q\n", *info.Id)
|
||||
// fmt.Printf("...Id %q\n", *info.Id)
|
||||
return info.GetID(), nil
|
||||
}
|
||||
|
||||
@ -1212,8 +1261,9 @@ func (f *Fs) itemToDirEntry(ctx context.Context, dir string, info *api.Item) (en
|
||||
// cache the directory ID for later lookups
|
||||
id := info.GetID()
|
||||
f.dirCache.Put(remote, id)
|
||||
d := fs.NewDir(remote, time.Time(info.GetLastModifiedDateTime())).SetID(id)
|
||||
d.SetItems(folder.ChildCount)
|
||||
d := f.newDir(id, remote)
|
||||
d.items = folder.ChildCount
|
||||
f.setSystemMetadata(info, d.meta, remote, dirMimeType)
|
||||
entry = d
|
||||
} else {
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
@ -1374,7 +1424,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
||||
}
|
||||
|
||||
return list.Flush()
|
||||
|
||||
}
|
||||
|
||||
// Shutdown shutdown the fs
|
||||
@ -1475,6 +1524,9 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return time.Millisecond
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
@ -1614,12 +1666,19 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
// Copy does NOT copy the modTime from the source and there seems to
|
||||
// be no way to set date before
|
||||
// This will create TWO versions on OneDrive
|
||||
err = dstObj.SetModTime(ctx, srcObj.ModTime(ctx))
|
||||
|
||||
// Set modtime and adjust metadata if required
|
||||
_, err = dstObj.Metadata(ctx) // make sure we get the correct new normalizedID
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
dstObj.meta.permsAddOnly = true // dst will have different IDs from src, so can't update/remove
|
||||
info, err := f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
// Purge deletes all the files in the directory
|
||||
@ -1674,12 +1733,12 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
},
|
||||
// We set the mod time too as it gets reset otherwise
|
||||
FileSystemInfo: &api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(srcObj.modTime),
|
||||
CreatedDateTime: api.Timestamp(srcObj.tryGetBtime(srcObj.modTime)),
|
||||
LastModifiedDateTime: api.Timestamp(srcObj.modTime),
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
var info api.Item
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(ctx, resp, err)
|
||||
@ -1688,11 +1747,18 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = dstObj.setMetaData(&info)
|
||||
err = dstObj.setMetaData(info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dstObj, nil
|
||||
|
||||
// Set modtime and adjust metadata if required
|
||||
info, err = f.fetchAndUpdateMetadata(ctx, src, fs.MetadataAsOpenOptions(ctx), dstObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = dstObj.setMetaData(info)
|
||||
return dstObj, err
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
@ -2028,6 +2094,7 @@ func (o *Object) Size() int64 {
|
||||
// setMetaData sets the metadata from info
|
||||
func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
if info.GetFolder() != nil {
|
||||
log.Stack(o, "setMetaData called on dir instead of obj")
|
||||
return fs.ErrorIsDir
|
||||
}
|
||||
o.hasMetaData = true
|
||||
@ -2067,9 +2134,40 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
o.modTime = time.Time(info.GetLastModifiedDateTime())
|
||||
}
|
||||
o.id = info.GetID()
|
||||
if o.meta == nil {
|
||||
o.meta = o.fs.newMetadata(o.Remote())
|
||||
}
|
||||
o.fs.setSystemMetadata(info, o.meta, o.remote, o.mimeType)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets system metadata shared by both objects and directories
|
||||
func (f *Fs) setSystemMetadata(info *api.Item, meta *Metadata, remote string, mimeType string) {
|
||||
meta.fs = f
|
||||
meta.remote = remote
|
||||
meta.mimeType = mimeType
|
||||
if info == nil {
|
||||
fs.Errorf("setSystemMetadata", "internal error: info is nil")
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
if fileSystemInfo != nil {
|
||||
meta.mtime = time.Time(fileSystemInfo.LastModifiedDateTime)
|
||||
meta.btime = time.Time(fileSystemInfo.CreatedDateTime)
|
||||
|
||||
} else {
|
||||
meta.mtime = time.Time(info.GetLastModifiedDateTime())
|
||||
meta.btime = time.Time(info.GetCreatedDateTime())
|
||||
}
|
||||
meta.utime = time.Time(info.GetCreatedDateTime())
|
||||
meta.description = info.Description
|
||||
meta.packageType = info.GetPackageType()
|
||||
meta.createdBy = info.GetCreatedBy()
|
||||
meta.lastModifiedBy = info.GetLastModifiedBy()
|
||||
meta.malwareDetected = info.MalwareDetected()
|
||||
meta.shared = info.Shared
|
||||
meta.normalizedID = info.GetID()
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
@ -2107,7 +2205,7 @@ func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item,
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "PATCH", "")
|
||||
update := api.SetFileSystemInfo{
|
||||
FileSystemInfo: api.FileSystemInfoFacet{
|
||||
CreatedDateTime: api.Timestamp(modTime),
|
||||
CreatedDateTime: api.Timestamp(o.tryGetBtime(modTime)),
|
||||
LastModifiedDateTime: api.Timestamp(modTime),
|
||||
},
|
||||
}
|
||||
@ -2171,18 +2269,19 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusOK && resp.ContentLength > 0 && resp.Header.Get("Content-Range") == "" {
|
||||
//Overwrite size with actual size since size readings from Onedrive is unreliable.
|
||||
// Overwrite size with actual size since size readings from Onedrive is unreliable.
|
||||
o.size = resp.ContentLength
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(ctx context.Context, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
func (o *Object) createUploadSession(ctx context.Context, src fs.ObjectInfo, modTime time.Time) (response *api.CreateUploadResponse, err error) {
|
||||
opts := o.fs.newOptsCallWithPath(ctx, o.remote, "POST", "/createUploadSession")
|
||||
createRequest := api.CreateUploadRequest{}
|
||||
createRequest.Item.FileSystemInfo.CreatedDateTime = api.Timestamp(modTime)
|
||||
createRequest.Item.FileSystemInfo.LastModifiedDateTime = api.Timestamp(modTime)
|
||||
createRequest, err := o.fetchMetadataForCreate(ctx, src, opts.Options, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &createRequest, &response)
|
||||
@ -2233,7 +2332,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
// var response api.UploadFragmentResponse
|
||||
var resp *http.Response
|
||||
var body []byte
|
||||
var skip = int64(0)
|
||||
skip := int64(0)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
toSend := chunkSize - skip
|
||||
opts := rest.Opts{
|
||||
@ -2245,7 +2344,7 @@ func (o *Object) uploadFragment(ctx context.Context, url string, start int64, to
|
||||
Options: options,
|
||||
}
|
||||
_, _ = chunk.Seek(skip, io.SeekStart)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
resp, err = o.fs.unAuth.Call(ctx, &opts)
|
||||
if err != nil && resp != nil && resp.StatusCode == http.StatusRequestedRangeNotSatisfiable {
|
||||
fs.Debugf(o, "Received 416 error - reading current position from server: %v", err)
|
||||
pos, posErr := o.getPosition(ctx, url)
|
||||
@ -2300,14 +2399,17 @@ func (o *Object) cancelUploadSession(ctx context.Context, url string) (err error
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
// if there is metadata, it will be set at the same time, except for permissions, which must be set after (if present and enabled).
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
if size <= 0 {
|
||||
return nil, errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
|
||||
// Create upload session
|
||||
fs.Debugf(o, "Starting multipart upload")
|
||||
session, err := o.createUploadSession(ctx, modTime)
|
||||
session, err := o.createUploadSession(ctx, src, modTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2340,12 +2442,25 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, size int64,
|
||||
position += n
|
||||
}
|
||||
|
||||
return info, nil
|
||||
err = o.setMetaData(info)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
if !o.fs.opt.MetadataPermissions.IsSet(rwWrite) {
|
||||
return info, err
|
||||
}
|
||||
info, err = o.fs.fetchAndUpdatePermissions(ctx, src, options, o) // for permissions, which can't be set during original upload
|
||||
if info == nil {
|
||||
return nil, err
|
||||
}
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Update the content of a remote file within 4 MiB size in one single request
|
||||
// This function will set modtime after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64, modTime time.Time, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
// (currently only used when size is exactly 0)
|
||||
// This function will set modtime and metadata after uploading, which will create a new version for the remote file
|
||||
func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (info *api.Item, err error) {
|
||||
size := src.Size()
|
||||
if size < 0 || size > int64(fs.SizeSuffix(4*1024*1024)) {
|
||||
return nil, errors.New("size passed into uploadSinglepart must be >= 0 and <= 4 MiB")
|
||||
}
|
||||
@ -2376,7 +2491,8 @@ func (o *Object) uploadSinglepart(ctx context.Context, in io.Reader, size int64,
|
||||
return nil, err
|
||||
}
|
||||
// Set the mod time now and read metadata
|
||||
return o.setModTime(ctx, modTime)
|
||||
info, err = o.fs.fetchAndUpdateMetadata(ctx, src, options, o)
|
||||
return info, o.setMetaData(info)
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
@ -2391,17 +2507,17 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
var info *api.Item
|
||||
if size > 0 {
|
||||
info, err = o.uploadMultipart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadMultipart(ctx, in, src, options...)
|
||||
} else if size == 0 {
|
||||
info, err = o.uploadSinglepart(ctx, in, size, modTime, options...)
|
||||
info, err = o.uploadSinglepart(ctx, in, src, options...)
|
||||
} else {
|
||||
return errors.New("unknown-sized upload not supported")
|
||||
}
|
||||
if err != nil {
|
||||
fs.PrettyPrint(info, "info from Update error", fs.LogLevelDebug)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -2412,8 +2528,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
fs.Errorf(o, "Failed to remove versions: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return o.setMetaData(info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
@ -2765,4 +2880,11 @@ var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
_ fs.Metadataer = (*Object)(nil)
|
||||
_ fs.Metadataer = (*Directory)(nil)
|
||||
_ fs.SetModTimer = (*Directory)(nil)
|
||||
_ fs.SetMetadataer = (*Directory)(nil)
|
||||
_ fs.MimeTyper = &Directory{}
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
)
|
||||
|
464
backend/onedrive/onedrive_internal_test.go
Normal file
464
backend/onedrive/onedrive_internal_test.go
Normal file
@ -0,0 +1,464 @@
|
||||
package onedrive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/backend/onedrive/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/slices" // replace with slices after go1.21 is the minimum version
|
||||
)
|
||||
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDrive:meta -v
|
||||
// go test -timeout 30m -run ^TestIntegration/FsMkdir/FsPutFiles/Internal$ github.com/rclone/rclone/backend/onedrive -remote TestOneDriveBusiness:meta -v
|
||||
// go run ./fstest/test_all -remotes TestOneDriveBusiness:meta,TestOneDrive:meta -verbose -maxtries 1
|
||||
|
||||
var (
|
||||
t1 = fstest.Time("2023-08-26T23:13:06.499999999Z")
|
||||
t2 = fstest.Time("2020-02-29T12:34:56.789Z")
|
||||
t3 = time.Date(1994, time.December, 24, 9+12, 0, 0, 525600, time.FixedZone("Eastern Standard Time", -5))
|
||||
ctx = context.Background()
|
||||
content = "hello"
|
||||
)
|
||||
|
||||
const (
|
||||
testUserID = "ryan@contoso.com" // demo user from doc examples (can't share files with yourself)
|
||||
// https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_invite?view=odsp-graph-online#http-request-1
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
// TestWritePermissions tests reading and writing permissions
|
||||
func (f *Fs) TestWritePermissions(t *testing.T, r *fstest.Run) {
|
||||
// setup
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
expectedP, actualP := unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"])
|
||||
|
||||
found, num := false, 0
|
||||
foundCount := 0
|
||||
for i, p := range actualP {
|
||||
for _, identity := range p.GrantedToIdentities {
|
||||
if identity.User.DisplayName == testUserID {
|
||||
// note: expected will always be element 0 here, but actual may be variable based on org settings
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
found, num = true, i
|
||||
foundCount++
|
||||
}
|
||||
}
|
||||
if f.driveType == driveTypePersonal {
|
||||
if p.GrantedTo != nil && p.GrantedTo.User != (api.Identity{}) && p.GrantedTo.User.ID == testUserID { // shows up in a different place on biz vs. personal
|
||||
assert.Equal(t, expectedP[0].Roles, p.Roles)
|
||||
found, num = true, i
|
||||
foundCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("no permission found with expected role (want: \n\n%v \n\ngot: \n\n%v\n\n)", indent(t, expectedMeta["permissions"]), indent(t, actualMeta["permissions"])))
|
||||
assert.Equal(t, 1, foundCount, "expected to find exactly 1 match")
|
||||
|
||||
// update it to "write"
|
||||
permissions = actualP
|
||||
permissions[num].Roles[0] = api.WriteRole
|
||||
expectedMeta, actualMeta = f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
if f.driveType != driveTypePersonal {
|
||||
// zero out some things we expect to be different
|
||||
expectedP, actualP = unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"])
|
||||
normalize(expectedP)
|
||||
normalize(actualP)
|
||||
expectedMeta.Set("permissions", marshalPerms(t, expectedP))
|
||||
actualMeta.Set("permissions", marshalPerms(t, actualP))
|
||||
}
|
||||
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
||||
|
||||
// remove it
|
||||
permissions[num] = nil
|
||||
_, actualMeta = f.putWithMeta(ctx, t, &file1, permissions)
|
||||
if f.driveType == driveTypePersonal {
|
||||
perms, ok := actualMeta["permissions"]
|
||||
assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms))
|
||||
return
|
||||
}
|
||||
_, actualP = unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"])
|
||||
|
||||
found = false
|
||||
var foundP *api.PermissionsType
|
||||
for _, p := range actualP {
|
||||
if p.GrantedTo == nil || p.GrantedTo.User == (api.Identity{}) || p.GrantedTo.User.ID != testUserID {
|
||||
continue
|
||||
}
|
||||
found = true
|
||||
foundP = p
|
||||
}
|
||||
assert.False(t, found, fmt.Sprintf("permission was found but expected to be removed: %v", foundP))
|
||||
}
|
||||
|
||||
// TestUploadSinglePart tests reading/writing permissions using uploadSinglepart()
|
||||
// This is only used when file size is exactly 0.
|
||||
func (f *Fs) TestUploadSinglePart(t *testing.T, r *fstest.Run) {
|
||||
content = ""
|
||||
f.TestWritePermissions(t, r)
|
||||
content = "hello"
|
||||
}
|
||||
|
||||
// TestReadPermissions tests that no permissions are written when --onedrive-metadata-permissions has "read" but not "write"
|
||||
func (f *Fs) TestReadPermissions(t *testing.T, r *fstest.Run) {
|
||||
// setup
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
file1 := r.WriteFile(randomFilename(), "hello", t2)
|
||||
|
||||
// try adding a permission without --onedrive-metadata-permissions -- should fail
|
||||
// test that what we got before vs. after is the same
|
||||
_ = f.opt.MetadataPermissions.Set("read")
|
||||
_, expectedMeta := f.putWithMeta(ctx, t, &file1, []*api.PermissionsType{}) // return var intentionally switched here
|
||||
permissions := defaultPermissions()
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
if f.driveType == driveTypePersonal {
|
||||
perms, ok := actualMeta["permissions"]
|
||||
assert.False(t, ok, fmt.Sprintf("permissions metadata key was unexpectedly found: %v", perms))
|
||||
return
|
||||
}
|
||||
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
||||
}
|
||||
|
||||
// TestReadMetadata tests that all the read-only system properties are present and non-blank
|
||||
func (f *Fs) TestReadMetadata(t *testing.T, r *fstest.Run) {
|
||||
// setup
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
file1 := r.WriteFile(randomFilename(), "hello", t2)
|
||||
permissions := defaultPermissions()
|
||||
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
_, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
optionals := []string{"package-type", "shared-by-id", "shared-scope", "shared-time", "shared-owner-id"} // not always present
|
||||
for k := range systemMetadataInfo {
|
||||
if slices.Contains(optionals, k) {
|
||||
continue
|
||||
}
|
||||
if k == "description" && f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
gotV, ok := actualMeta[k]
|
||||
assert.True(t, ok, fmt.Sprintf("property is missing: %v", k))
|
||||
assert.NotEmpty(t, gotV, fmt.Sprintf("property is blank: %v", k))
|
||||
}
|
||||
}
|
||||
|
||||
// TestDirectoryMetadata tests reading and writing modtime and other metadata and permissions for directories
|
||||
func (f *Fs) TestDirectoryMetadata(t *testing.T, r *fstest.Run) {
|
||||
// setup
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
permissions := defaultPermissions()
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
|
||||
expectedMeta := fs.Metadata{
|
||||
"mtime": t1.Format(timeFormatOut),
|
||||
"btime": t2.Format(timeFormatOut),
|
||||
"content-type": dirMimeType,
|
||||
"description": "that is so meta!",
|
||||
}
|
||||
b, err := json.MarshalIndent(permissions, "", "\t")
|
||||
assert.NoError(t, err)
|
||||
expectedMeta.Set("permissions", string(b))
|
||||
|
||||
compareDirMeta := func(expectedMeta, actualMeta fs.Metadata, ignoreID bool) {
|
||||
f.compareMeta(t, expectedMeta, actualMeta, ignoreID)
|
||||
|
||||
// check that all required system properties are present
|
||||
optionals := []string{"package-type", "shared-by-id", "shared-scope", "shared-time", "shared-owner-id"} // not always present
|
||||
for k := range systemMetadataInfo {
|
||||
if slices.Contains(optionals, k) {
|
||||
continue
|
||||
}
|
||||
if k == "description" && f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
gotV, ok := actualMeta[k]
|
||||
assert.True(t, ok, fmt.Sprintf("property is missing: %v", k))
|
||||
assert.NotEmpty(t, gotV, fmt.Sprintf("property is blank: %v", k))
|
||||
}
|
||||
}
|
||||
newDst, err := operations.MkdirMetadata(ctx, f, "subdir", expectedMeta)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, newDst)
|
||||
assert.Equal(t, "subdir", newDst.Remote())
|
||||
|
||||
actualMeta, err := fs.GetMetadata(ctx, newDst)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, actualMeta)
|
||||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// modtime
|
||||
assert.Equal(t, t1.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
// try changing it and re-check it
|
||||
newDst, err = operations.SetDirModTime(ctx, f, newDst, "", t2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, t2.Truncate(f.Precision()), newDst.ModTime(ctx))
|
||||
// ensure that f.DirSetModTime also works
|
||||
err = f.DirSetModTime(ctx, "subdir", t3)
|
||||
assert.NoError(t, err)
|
||||
entries, err := f.List(ctx, "")
|
||||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == "subdir" {
|
||||
assert.True(t, t3.Truncate(f.Precision()).Equal(dir.ModTime(ctx)), fmt.Sprintf("got %v", dir.ModTime(ctx)))
|
||||
}
|
||||
})
|
||||
|
||||
// test updating metadata on existing dir
|
||||
actualMeta, err = fs.GetMetadata(ctx, newDst) // get fresh info as we've been changing modtimes
|
||||
assert.NoError(t, err)
|
||||
expectedMeta = actualMeta
|
||||
expectedMeta.Set("description", "metadata is fun!")
|
||||
expectedMeta.Set("btime", t3.Format(timeFormatOut))
|
||||
expectedMeta.Set("mtime", t1.Format(timeFormatOut))
|
||||
expectedMeta.Set("content-type", dirMimeType)
|
||||
perms := unmarshalPerms(t, expectedMeta["permissions"])
|
||||
perms[0].Roles[0] = api.WriteRole
|
||||
b, err = json.MarshalIndent(perms, "", "\t")
|
||||
assert.NoError(t, err)
|
||||
expectedMeta.Set("permissions", string(b))
|
||||
|
||||
newDst, err = operations.MkdirMetadata(ctx, f, "subdir", expectedMeta)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, newDst)
|
||||
assert.Equal(t, "subdir", newDst.Remote())
|
||||
|
||||
actualMeta, err = fs.GetMetadata(ctx, newDst)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, actualMeta)
|
||||
compareDirMeta(expectedMeta, actualMeta, false)
|
||||
|
||||
// test copying metadata from one dir to another
|
||||
copiedDir, err := operations.CopyDirMetadata(ctx, f, nil, "subdir2", newDst)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, copiedDir)
|
||||
assert.Equal(t, "subdir2", copiedDir.Remote())
|
||||
|
||||
actualMeta, err = fs.GetMetadata(ctx, copiedDir)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, actualMeta)
|
||||
compareDirMeta(expectedMeta, actualMeta, true)
|
||||
|
||||
// test DirModTimeUpdatesOnWrite
|
||||
expectedTime := copiedDir.ModTime(ctx)
|
||||
assert.True(t, !expectedTime.IsZero())
|
||||
r.WriteObject(ctx, copiedDir.Remote()+"/"+randomFilename(), "hi there", t3)
|
||||
entries, err = f.List(ctx, "")
|
||||
assert.NoError(t, err)
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
if dir.Remote() == copiedDir.Remote() {
|
||||
assert.True(t, expectedTime.Equal(dir.ModTime(ctx)), fmt.Sprintf("want %v got %v", expectedTime, dir.ModTime(ctx)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestServerSideCopyMove tests server-side Copy and Move
|
||||
func (f *Fs) TestServerSideCopyMove(t *testing.T, r *fstest.Run) {
|
||||
// setup
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
ci.Metadata = true
|
||||
_ = f.opt.MetadataPermissions.Set("read,write")
|
||||
file1 := r.WriteFile(randomFilename(), content, t2)
|
||||
|
||||
// add a permission with "read" role
|
||||
permissions := defaultPermissions()
|
||||
permissions[0].Roles[0] = api.ReadRole
|
||||
expectedMeta, actualMeta := f.putWithMeta(ctx, t, &file1, permissions)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, false)
|
||||
|
||||
comparePerms := func(expectedMeta, actualMeta fs.Metadata) (newExpectedMeta, newActualMeta fs.Metadata) {
|
||||
expectedP, actualP := unmarshalPerms(t, expectedMeta["permissions"]), unmarshalPerms(t, actualMeta["permissions"])
|
||||
normalize(expectedP)
|
||||
normalize(actualP)
|
||||
expectedMeta.Set("permissions", marshalPerms(t, expectedP))
|
||||
actualMeta.Set("permissions", marshalPerms(t, actualP))
|
||||
assert.JSONEq(t, expectedMeta["permissions"], actualMeta["permissions"])
|
||||
return expectedMeta, actualMeta
|
||||
}
|
||||
|
||||
// Copy
|
||||
obj1, err := f.NewObject(ctx, file1.Path)
|
||||
assert.NoError(t, err)
|
||||
originalMeta := actualMeta
|
||||
obj2, err := f.Copy(ctx, obj1, randomFilename())
|
||||
assert.NoError(t, err)
|
||||
actualMeta, err = fs.GetMetadata(ctx, obj2)
|
||||
assert.NoError(t, err)
|
||||
expectedMeta, actualMeta = comparePerms(originalMeta, actualMeta)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, true)
|
||||
|
||||
// Move
|
||||
obj3, err := f.Move(ctx, obj1, randomFilename())
|
||||
assert.NoError(t, err)
|
||||
actualMeta, err = fs.GetMetadata(ctx, obj3)
|
||||
assert.NoError(t, err)
|
||||
expectedMeta, actualMeta = comparePerms(originalMeta, actualMeta)
|
||||
f.compareMeta(t, expectedMeta, actualMeta, true)
|
||||
}
|
||||
|
||||
// helper function to put an object with metadata and permissions
|
||||
func (f *Fs) putWithMeta(ctx context.Context, t *testing.T, file *fstest.Item, perms []*api.PermissionsType) (expectedMeta, actualMeta fs.Metadata) {
|
||||
t.Helper()
|
||||
expectedMeta = fs.Metadata{
|
||||
"mtime": t1.Format(timeFormatOut),
|
||||
"btime": t2.Format(timeFormatOut),
|
||||
"description": "that is so meta!",
|
||||
}
|
||||
|
||||
expectedMeta.Set("permissions", marshalPerms(t, perms))
|
||||
obj := fstests.PutTestContentsMetadata(ctx, t, f, file, content, true, "plain/text", expectedMeta)
|
||||
do, ok := obj.(fs.Metadataer)
|
||||
require.True(t, ok)
|
||||
actualMeta, err := do.Metadata(ctx)
|
||||
require.NoError(t, err)
|
||||
return expectedMeta, actualMeta
|
||||
}
|
||||
|
||||
func randomFilename() string {
|
||||
return "some file-" + random.String(8) + ".txt"
|
||||
}
|
||||
|
||||
func (f *Fs) compareMeta(t *testing.T, expectedMeta, actualMeta fs.Metadata, ignoreID bool) {
|
||||
t.Helper()
|
||||
for k, v := range expectedMeta {
|
||||
gotV, ok := actualMeta[k]
|
||||
switch k {
|
||||
case "shared-owner-id", "shared-time", "shared-by-id", "shared-scope":
|
||||
continue
|
||||
case "permissions":
|
||||
continue
|
||||
case "utime":
|
||||
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
||||
if f.driveType == driveTypePersonal {
|
||||
compareTimeStrings(t, k, v, gotV, time.Minute) // read-only upload time, so slight difference expected -- use larger precision
|
||||
continue
|
||||
}
|
||||
compareTimeStrings(t, k, expectedMeta["btime"], gotV, time.Minute) // another bizarre difference between personal and business...
|
||||
continue
|
||||
case "id":
|
||||
if ignoreID {
|
||||
continue // different id is expected when copying meta from one item to another
|
||||
}
|
||||
case "mtime", "btime":
|
||||
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
||||
compareTimeStrings(t, k, v, gotV, time.Second)
|
||||
continue
|
||||
case "description":
|
||||
if f.driveType != driveTypePersonal {
|
||||
continue // not supported
|
||||
}
|
||||
}
|
||||
assert.True(t, ok, fmt.Sprintf("expected metadata key is missing: %v", k))
|
||||
assert.Equal(t, v, gotV, actualMeta)
|
||||
}
|
||||
}
|
||||
|
||||
func compareTimeStrings(t *testing.T, remote, want, got string, precision time.Duration) {
|
||||
wantT, err := time.Parse(timeFormatIn, want)
|
||||
assert.NoError(t, err)
|
||||
gotT, err := time.Parse(timeFormatIn, got)
|
||||
assert.NoError(t, err)
|
||||
fstest.AssertTimeEqualWithPrecision(t, remote, wantT, gotT, precision)
|
||||
}
|
||||
|
||||
func marshalPerms(t *testing.T, p []*api.PermissionsType) string {
|
||||
b, err := json.MarshalIndent(p, "", "\t")
|
||||
assert.NoError(t, err)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func unmarshalPerms(t *testing.T, perms string) (p []*api.PermissionsType) {
|
||||
t.Helper()
|
||||
err := json.Unmarshal([]byte(perms), &p)
|
||||
assert.NoError(t, err)
|
||||
return p
|
||||
}
|
||||
|
||||
func indent(t *testing.T, s string) string {
|
||||
p := unmarshalPerms(t, s)
|
||||
return marshalPerms(t, p)
|
||||
}
|
||||
|
||||
func defaultPermissions() []*api.PermissionsType {
|
||||
return []*api.PermissionsType{{
|
||||
GrantedTo: &api.IdentitySet{User: api.Identity{}},
|
||||
GrantedToIdentities: []*api.IdentitySet{{User: api.Identity{ID: testUserID}}},
|
||||
Roles: []api.Role{api.WriteRole},
|
||||
}}
|
||||
}
|
||||
|
||||
// zeroes out some things we expect to be different when copying/moving between objects
|
||||
func normalize(Ps []*api.PermissionsType) {
|
||||
for _, ep := range Ps {
|
||||
ep.ID = ""
|
||||
ep.Link = nil
|
||||
ep.ShareID = ""
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) resetTestDefaults(r *fstest.Run) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.Metadata = false
|
||||
_ = f.opt.MetadataPermissions.Set("off")
|
||||
r.Finalise()
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
newTestF := func() (*Fs, *fstest.Run) {
|
||||
r := fstest.NewRunIndividual(t)
|
||||
testF, ok := r.Fremote.(*Fs)
|
||||
if !ok {
|
||||
t.FailNow()
|
||||
}
|
||||
return testF, r
|
||||
}
|
||||
|
||||
testF, r := newTestF()
|
||||
t.Run("TestWritePermissions", func(t *testing.T) { testF.TestWritePermissions(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
testF, r = newTestF()
|
||||
t.Run("TestUploadSinglePart", func(t *testing.T) { testF.TestUploadSinglePart(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
testF, r = newTestF()
|
||||
t.Run("TestReadPermissions", func(t *testing.T) { testF.TestReadPermissions(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
testF, r = newTestF()
|
||||
t.Run("TestReadMetadata", func(t *testing.T) { testF.TestReadMetadata(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
testF, r = newTestF()
|
||||
t.Run("TestDirectoryMetadata", func(t *testing.T) { testF.TestDirectoryMetadata(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
testF, r = newTestF()
|
||||
t.Run("TestServerSideCopyMove", func(t *testing.T) { testF.TestServerSideCopyMove(t, r) })
|
||||
testF.resetTestDefaults(r)
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
@ -1,10 +1,10 @@
|
||||
package quickxorhash
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"hash"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -171,7 +171,9 @@ var _ hash.Hash = (*quickXorHash)(nil)
|
||||
func BenchmarkQuickXorHash(b *testing.B) {
|
||||
b.SetBytes(1 << 20)
|
||||
buf := make([]byte, 1<<20)
|
||||
rand.Read(buf)
|
||||
n, err := rand.Read(buf)
|
||||
require.NoError(b, err)
|
||||
require.Equal(b, len(buf), n)
|
||||
h := New()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -437,23 +437,41 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
return nil, fs.ErrorFileNameTooLong
|
||||
}
|
||||
|
||||
// Copy the object
|
||||
moveCopyFileData := moveCopyFile{
|
||||
SessionID: f.session.SessionID,
|
||||
SrcFileID: srcObj.id,
|
||||
DstFolderID: directoryID,
|
||||
Move: "true",
|
||||
OverwriteIfExists: "true",
|
||||
NewFileName: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
var request interface{} = moveCopyFileData
|
||||
|
||||
// use /file/rename.json if moving within the same directory
|
||||
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if srcDirID == directoryID {
|
||||
fs.Debugf(src, "same parent dir (%v) - using file/rename instead of move_copy for %s", directoryID, remote)
|
||||
renameFileData := renameFile{
|
||||
SessionID: f.session.SessionID,
|
||||
FileID: srcObj.id,
|
||||
NewFileName: leaf,
|
||||
}
|
||||
opts.Path = "/file/rename.json"
|
||||
request = renameFileData
|
||||
}
|
||||
|
||||
// Move the object
|
||||
var resp *http.Response
|
||||
response := moveCopyFileResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
copyFileData := moveCopyFile{
|
||||
SessionID: f.session.SessionID,
|
||||
SrcFileID: srcObj.id,
|
||||
DstFolderID: directoryID,
|
||||
Move: "true",
|
||||
OverwriteIfExists: "true",
|
||||
NewFileName: leaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©FileData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -482,27 +500,47 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
srcID, srcDirectoryID, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// move_copy will silently truncate new filenames
|
||||
if len(dstLeaf) > 255 {
|
||||
fs.Debugf(src, "Can't move folder: name (%q) exceeds 255 char", dstLeaf)
|
||||
return fs.ErrorFileNameTooLong
|
||||
}
|
||||
|
||||
moveFolderData := moveCopyFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderID: srcID,
|
||||
DstFolderID: dstDirectoryID,
|
||||
Move: "true",
|
||||
NewFolderName: dstLeaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
var request interface{} = moveFolderData
|
||||
|
||||
// use /folder/rename.json if moving within the same parent directory
|
||||
if srcDirectoryID == dstDirectoryID {
|
||||
fs.Debugf(dstRemote, "same parent dir (%v) - using folder/rename instead of move_copy", srcDirectoryID)
|
||||
renameFolderData := renameFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderID: srcID,
|
||||
FolderName: dstLeaf,
|
||||
}
|
||||
opts.Path = "/folder/rename.json"
|
||||
request = renameFolderData
|
||||
}
|
||||
|
||||
// Do the move
|
||||
var resp *http.Response
|
||||
response := moveCopyFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
moveFolderData := moveCopyFolder{
|
||||
SessionID: f.session.SessionID,
|
||||
FolderID: srcID,
|
||||
DstFolderID: dstDirectoryID,
|
||||
Move: "true",
|
||||
NewFolderName: dstLeaf,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/move_copy.json",
|
||||
}
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &moveFolderData, &response)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -100,6 +100,13 @@ type moveCopyFolder struct {
|
||||
NewFolderName string `json:"new_folder_name"` // New name for destination folder.
|
||||
}
|
||||
|
||||
type renameFolder struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
FolderName string `json:"folder_name"` // New name for destination folder (max 255).
|
||||
SharingID string `json:"sharing_id"`
|
||||
}
|
||||
|
||||
type moveCopyFolderResponse struct {
|
||||
FolderID string `json:"FolderID"`
|
||||
}
|
||||
@ -146,6 +153,14 @@ type moveCopyFileResponse struct {
|
||||
Size string `json:"Size"`
|
||||
}
|
||||
|
||||
type renameFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
NewFileName string `json:"new_file_name"` // New name for destination file.
|
||||
FileID string `json:"file_id"`
|
||||
AccessFolderID string `json:"access_folder_id"`
|
||||
SharingID string `json:"sharing_id"`
|
||||
}
|
||||
|
||||
type createFile struct {
|
||||
SessionID string `json:"session_id"`
|
||||
FolderID string `json:"folder_id"`
|
||||
|
@ -52,6 +52,8 @@ func getConfigurationProvider(opt *Options) (common.ConfigurationProvider, error
|
||||
case noAuth:
|
||||
fs.Infof("client", "using no auth provider")
|
||||
return getNoAuthConfiguration()
|
||||
case workloadIdentity:
|
||||
return auth.OkeWorkloadIdentityConfigurationProvider()
|
||||
default:
|
||||
}
|
||||
return common.DefaultConfigProvider(), nil
|
||||
|
@ -7,12 +7,15 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/oracle/oci-go-sdk/v65/common"
|
||||
"github.com/oracle/oci-go-sdk/v65/objectstorage"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -23,6 +26,7 @@ const (
|
||||
operationRename = "rename"
|
||||
operationListMultiPart = "list-multipart-uploads"
|
||||
operationCleanup = "cleanup"
|
||||
operationRestore = "restore"
|
||||
)
|
||||
|
||||
var commandHelp = []fs.CommandHelp{{
|
||||
@ -77,6 +81,42 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
||||
Opts: map[string]string{
|
||||
"max-age": "Max age of upload to delete",
|
||||
},
|
||||
}, {
|
||||
Name: operationRestore,
|
||||
Short: "Restore objects from Archive to Standard storage",
|
||||
Long: `This command can be used to restore one or more objects from Archive to Standard storage.
|
||||
|
||||
Usage Examples:
|
||||
|
||||
rclone backend restore oos:bucket/path/to/directory -o hours=HOURS
|
||||
rclone backend restore oos:bucket -o hours=HOURS
|
||||
|
||||
This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
|
||||
|
||||
rclone --interactive backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
All the objects shown will be marked for restore, then
|
||||
|
||||
rclone backend restore --include "*.txt" oos:bucket/path -o hours=72
|
||||
|
||||
It returns a list of status dictionaries with Object Name and Status
|
||||
keys. The Status will be "RESTORED"" if it was successful or an error message
|
||||
if not.
|
||||
|
||||
[
|
||||
{
|
||||
"Object": "test.txt"
|
||||
"Status": "RESTORED",
|
||||
},
|
||||
{
|
||||
"Object": "test/file4.txt"
|
||||
"Status": "RESTORED",
|
||||
}
|
||||
]
|
||||
`,
|
||||
Opts: map[string]string{
|
||||
"hours": "The number of hours for which this object will be restored. Default is 24 hrs.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -113,6 +153,8 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||
}
|
||||
}
|
||||
return nil, f.cleanUp(ctx, maxAge)
|
||||
case operationRestore:
|
||||
return f.restore(ctx, opt)
|
||||
default:
|
||||
return nil, fs.ErrorCommandNotFound
|
||||
}
|
||||
@ -290,3 +332,63 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
|
||||
}
|
||||
return uploadedParts, nil
|
||||
}
|
||||
|
||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
|
||||
req := objectstorage.RestoreObjectsRequest{
|
||||
NamespaceName: common.String(f.opt.Namespace),
|
||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||
}
|
||||
if hours := opt["hours"]; hours != "" {
|
||||
ihours, err := strconv.Atoi(hours)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad value for hours: %w", err)
|
||||
}
|
||||
req.RestoreObjectsDetails.Hours = &ihours
|
||||
}
|
||||
type status struct {
|
||||
Object string
|
||||
Status string
|
||||
}
|
||||
var (
|
||||
outMu sync.Mutex
|
||||
out = []status{}
|
||||
err error
|
||||
)
|
||||
err = operations.ListFn(ctx, f, func(obj fs.Object) {
|
||||
// Remember this is run --checkers times concurrently
|
||||
o, ok := obj.(*Object)
|
||||
st := status{Object: obj.Remote(), Status: "RESTORED"}
|
||||
defer func() {
|
||||
outMu.Lock()
|
||||
out = append(out, st)
|
||||
outMu.Unlock()
|
||||
}()
|
||||
if !ok {
|
||||
st.Status = "Not an OCI Object Storage object"
|
||||
return
|
||||
}
|
||||
if o.storageTier == nil || (*o.storageTier != "archive") {
|
||||
st.Status = "Object not in Archive storage tier"
|
||||
return
|
||||
}
|
||||
if operations.SkipDestructive(ctx, obj, "restore") {
|
||||
return
|
||||
}
|
||||
bucket, bucketPath := o.split()
|
||||
reqCopy := req
|
||||
reqCopy.BucketName = &bucket
|
||||
reqCopy.ObjectName = &bucketPath
|
||||
var response objectstorage.RestoreObjectsResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
response, err = f.srv.RestoreObjects(ctx, reqCopy)
|
||||
return shouldRetry(ctx, response.HTTPResponse(), err)
|
||||
})
|
||||
if err != nil {
|
||||
st.Status = err.Error()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ const (
|
||||
userPrincipal = "user_principal_auth"
|
||||
instancePrincipal = "instance_principal_auth"
|
||||
resourcePrincipal = "resource_principal_auth"
|
||||
workloadIdentity = "workload_identity_auth"
|
||||
environmentAuth = "env_auth"
|
||||
noAuth = "no_auth"
|
||||
|
||||
@ -37,6 +38,8 @@ https://docs.oracle.com/en-us/iaas/Content/API/Concepts/sdkconfig.htm`
|
||||
each instance has its own identity, and authenticates using the certificates that are read from instance metadata.
|
||||
https://docs.oracle.com/en-us/iaas/Content/Identity/Tasks/callingservicesfrominstances.htm`
|
||||
|
||||
workloadIdentityHelpText = `use workload identity to grant OCI Container Engine for Kubernetes workloads policy-driven access to OCI resources using OCI Identity and Access Management (IAM).
|
||||
https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contenggrantingworkloadaccesstoresources.htm`
|
||||
resourcePrincipalHelpText = `use resource principals to make API calls`
|
||||
|
||||
environmentAuthHelpText = `automatically pickup the credentials from runtime(env), first one to provide auth wins`
|
||||
@ -87,6 +90,9 @@ func newOptions() []fs.Option {
|
||||
}, {
|
||||
Value: instancePrincipal,
|
||||
Help: instancePrincipalHelpText,
|
||||
}, {
|
||||
Value: workloadIdentity,
|
||||
Help: workloadIdentityHelpText,
|
||||
}, {
|
||||
Value: resourcePrincipal,
|
||||
Help: resourcePrincipalHelpText,
|
||||
|
@ -71,12 +71,11 @@ type Error struct {
|
||||
|
||||
// ErrorDetails contains further details of api error
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct {
|
||||
} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Type string `json:"@type,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||
Message string `json:"message,omitempty"`
|
||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||
Detail string `json:"detail,omitempty"`
|
||||
@ -266,13 +265,11 @@ type FileApp struct {
|
||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||
IconLink string `json:"icon_link,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
Params struct {
|
||||
} `json:"params,omitempty"` // TODO
|
||||
CategoryIds []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct {
|
||||
} `json:"links,omitempty"` // TODO
|
||||
Params struct{} `json:"params,omitempty"` // TODO
|
||||
CategoryIDs []interface{} `json:"category_ids,omitempty"`
|
||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||
Space string `json:"space,omitempty"`
|
||||
Links struct{} `json:"links,omitempty"` // TODO
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@ -384,11 +381,10 @@ type NewTask struct {
|
||||
|
||||
// About informs drive status
|
||||
type About struct {
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct {
|
||||
} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
Kind string `json:"kind,omitempty"` // "drive#about"
|
||||
Quota *Quota `json:"quota,omitempty"`
|
||||
ExpiresAt string `json:"expires_at,omitempty"`
|
||||
Quotas struct{} `json:"quotas,omitempty"` // maybe []*Quota?
|
||||
}
|
||||
|
||||
// Quota informs drive quota
|
||||
@ -462,7 +458,7 @@ type DecompressResult struct {
|
||||
|
||||
// RequestShare is to request for file share
|
||||
type RequestShare struct {
|
||||
FileIds []string `json:"file_ids,omitempty"`
|
||||
FileIDs []string `json:"file_ids,omitempty"`
|
||||
ShareTo string `json:"share_to,omitempty"` // "publiclink",
|
||||
ExpirationDays int `json:"expiration_days,omitempty"` // -1 = 'forever'
|
||||
PassCodeOption string `json:"pass_code_option,omitempty"` // "NOT_REQUIRED"
|
||||
@ -470,7 +466,7 @@ type RequestShare struct {
|
||||
|
||||
// RequestBatch is to request for batch actions
|
||||
type RequestBatch struct {
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
IDs []string `json:"ids,omitempty"`
|
||||
To map[string]string `json:"to,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -775,7 +775,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
expiry = int(math.Ceil(time.Duration(expire).Hours() / 24))
|
||||
}
|
||||
req := api.RequestShare{
|
||||
FileIds: []string{id},
|
||||
FileIDs: []string{id},
|
||||
ShareTo: "publiclink",
|
||||
ExpirationDays: expiry,
|
||||
PassCodeOption: "NOT_REQUIRED",
|
||||
@ -797,7 +797,7 @@ func (f *Fs) deleteObjects(ctx context.Context, IDs []string, useTrash bool) (er
|
||||
action = "batchTrash"
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
Ids: IDs,
|
||||
IDs: IDs,
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, action, &req); err != nil {
|
||||
return fmt.Errorf("delete object failed: %w", err)
|
||||
@ -817,7 +817,7 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var trashedFiles = false
|
||||
trashedFiles := false
|
||||
if check {
|
||||
found, err := f.listAll(ctx, rootID, "", "", func(item *api.File) bool {
|
||||
if !item.Trashed {
|
||||
@ -893,7 +893,7 @@ func (f *Fs) moveObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
Ids: IDs,
|
||||
IDs: IDs,
|
||||
To: map[string]string{"parent_id": parentIDForRequest(dirID)},
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchMove", &req); err != nil {
|
||||
@ -1039,7 +1039,7 @@ func (f *Fs) copyObjects(ctx context.Context, IDs []string, dirID string) (err e
|
||||
return nil
|
||||
}
|
||||
req := api.RequestBatch{
|
||||
Ids: IDs,
|
||||
IDs: IDs,
|
||||
To: map[string]string{"parent_id": parentIDForRequest(dirID)},
|
||||
}
|
||||
if err := f.requestBatchAction(ctx, "batchCopy", &req); err != nil {
|
||||
|
@ -244,7 +244,7 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
return f.opt.Enc.ToStandardPath(f.root)
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
|
@ -104,6 +104,16 @@ func (f *File) IsDir() bool {
|
||||
return f.Type == "D" || f.Type == "S" || f.Type == "T"
|
||||
}
|
||||
|
||||
// IsProjectFolder returns true if object is a project folder
|
||||
// false otherwise
|
||||
func (f *File) IsProjectFolder() bool {
|
||||
if f == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.Type == "S"
|
||||
}
|
||||
|
||||
// SetMTimeParams is the request to set modification time for object
|
||||
type SetMTimeParams struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
|
@ -89,7 +89,13 @@ func init() {
|
||||
},
|
||||
{
|
||||
Name: "hard_delete",
|
||||
Help: "Delete files permanently rather than putting them into the trash.",
|
||||
Help: "Delete files permanently rather than putting them into the trash",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
{
|
||||
Name: "skip_project_folders",
|
||||
Help: "Skip project folders in operations",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
},
|
||||
@ -106,6 +112,7 @@ type Options struct {
|
||||
MinimalChunkSize fs.SizeSuffix `config:"minimal_chunk_size"`
|
||||
MaximalSummaryChunkSize fs.SizeSuffix `config:"maximal_summary_chunk_size"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
SkipProjectFolders bool `config:"skip_project_folders"`
|
||||
}
|
||||
|
||||
// Fs represents remote Quatrix fs
|
||||
@ -228,6 +235,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if fileID.IsFile() {
|
||||
root, _ = dircache.SplitPath(root)
|
||||
f.dirCache = dircache.New(root, rootID.FileID, f)
|
||||
// Correct root if definitely pointing to a file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@ -376,6 +388,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
}
|
||||
|
||||
for _, file := range folder.Content {
|
||||
if f.skipFile(&file) {
|
||||
continue
|
||||
}
|
||||
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.IsDir() {
|
||||
f.dirCache.Put(remote, file.ID)
|
||||
@ -401,6 +417,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) skipFile(file *api.File) bool {
|
||||
return f.opt.SkipProjectFolders && file.IsProjectFolder()
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
@ -1198,11 +1218,12 @@ func (o *Object) uploadSession(ctx context.Context, parentID, name string) (uplo
|
||||
|
||||
func (o *Object) upload(ctx context.Context, uploadKey string, chunk io.Reader, fullSize int64, offset int64, chunkSize int64, options ...fs.OpenOption) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: fmt.Sprintf(uploadURL, o.fs.opt.Host) + uploadKey,
|
||||
Body: chunk,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize, fullSize),
|
||||
Options: options,
|
||||
Method: "POST",
|
||||
RootURL: fmt.Sprintf(uploadURL, o.fs.opt.Host) + uploadKey,
|
||||
Body: chunk,
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, fullSize),
|
||||
Options: options,
|
||||
}
|
||||
|
||||
var fileID string
|
||||
|
@ -98,4 +98,5 @@ import "github.com/aws/aws-sdk-go/service/s3"
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.GetObjectOutput))
|
||||
genSetFrom(new(s3.CreateMultipartUploadInput), new(s3.PutObjectInput))
|
||||
genSetFrom(new(s3.HeadObjectOutput), new(s3.PutObjectInput))
|
||||
genSetFrom(new(s3.CopyObjectInput), new(s3.PutObjectInput))
|
||||
}
|
||||
|
@ -61,6 +61,7 @@ import (
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/version"
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// The S3 providers
|
||||
@ -2185,10 +2186,10 @@ If empty it will default to the environment variable "AWS_PROFILE" or
|
||||
Sensitive: true,
|
||||
}, {
|
||||
Name: "upload_concurrency",
|
||||
Help: `Concurrency for multipart uploads.
|
||||
Help: `Concurrency for multipart uploads and copies.
|
||||
|
||||
This is the number of chunks of the same file that are uploaded
|
||||
concurrently.
|
||||
concurrently for multipart uploads and copies.
|
||||
|
||||
If you are uploading small numbers of large files over high-speed links
|
||||
and these uploads do not fully utilize your bandwidth, then increasing
|
||||
@ -3221,6 +3222,10 @@ func setQuirks(opt *Options) {
|
||||
// https://github.com/rclone/rclone/issues/6670
|
||||
useAcceptEncodingGzip = false
|
||||
useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
|
||||
// GCS S3 doesn't support multi-part server side copy:
|
||||
// See: https://issuetracker.google.com/issues/323465186
|
||||
// So make cutoff very large which it does seem to support
|
||||
opt.CopyCutoff = math.MaxInt64
|
||||
default:
|
||||
fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
|
||||
fallthrough
|
||||
@ -4507,10 +4512,20 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
|
||||
fs.Debugf(src, "Starting multipart copy with %d parts", numParts)
|
||||
|
||||
var parts []*s3.CompletedPart
|
||||
var (
|
||||
parts = make([]*s3.CompletedPart, numParts)
|
||||
g, gCtx = errgroup.WithContext(ctx)
|
||||
)
|
||||
g.SetLimit(f.opt.UploadConcurrency)
|
||||
for partNum := int64(1); partNum <= numParts; partNum++ {
|
||||
if err := f.pacer.Call(func() (bool, error) {
|
||||
partNum := partNum
|
||||
// Fail fast, in case an errgroup managed function returns an error
|
||||
// gCtx is cancelled. There is no point in uploading all the other parts.
|
||||
if gCtx.Err() != nil {
|
||||
break
|
||||
}
|
||||
partNum := partNum // for closure
|
||||
g.Go(func() error {
|
||||
var uout *s3.UploadPartCopyOutput
|
||||
uploadPartReq := &s3.UploadPartCopyInput{}
|
||||
//structs.SetFrom(uploadPartReq, copyReq)
|
||||
setFrom_s3UploadPartCopyInput_s3CopyObjectInput(uploadPartReq, copyReq)
|
||||
@ -4519,18 +4534,24 @@ func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dst
|
||||
uploadPartReq.PartNumber = &partNum
|
||||
uploadPartReq.UploadId = uid
|
||||
uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
|
||||
uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
uout, err = f.c.UploadPartCopyWithContext(gCtx, uploadPartReq)
|
||||
return f.shouldRetry(gCtx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return f.shouldRetry(ctx, err)
|
||||
return err
|
||||
}
|
||||
parts = append(parts, &s3.CompletedPart{
|
||||
parts[partNum-1] = &s3.CompletedPart{
|
||||
PartNumber: &partNum,
|
||||
ETag: uout.CopyPartResult.ETag,
|
||||
})
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err = g.Wait()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
@ -4570,10 +4591,22 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
req := s3.CopyObjectInput{
|
||||
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
|
||||
}
|
||||
|
||||
// Update the metadata if it is in use
|
||||
if ci := fs.GetConfig(ctx); ci.Metadata {
|
||||
ui, err := srcObj.prepareUpload(ctx, src, fs.MetadataAsOpenOptions(ctx), true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
|
||||
req.MetadataDirective = aws.String(s3.MetadataDirectiveReplace)
|
||||
}
|
||||
|
||||
err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -5676,7 +5709,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
ui, err := o.prepareUpload(ctx, src, options)
|
||||
ui, err := o.prepareUpload(ctx, src, options, false)
|
||||
if err != nil {
|
||||
return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
@ -5711,6 +5744,13 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
|
||||
var mOut *s3.CreateMultipartUploadOutput
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
mOut, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
|
||||
if err == nil {
|
||||
if mOut == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no info from multipart upload")
|
||||
} else if mOut.UploadId == nil {
|
||||
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
|
||||
}
|
||||
}
|
||||
return f.shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
@ -6036,7 +6076,9 @@ type uploadInfo struct {
|
||||
}
|
||||
|
||||
// Prepare object for being uploaded
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption) (ui uploadInfo, err error) {
|
||||
//
|
||||
// If noHash is true the md5sum will not be calculated
|
||||
func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, noHash bool) (ui uploadInfo, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
// Create parent dir/bucket if not saving directory marker
|
||||
if !strings.HasSuffix(o.remote, "/") {
|
||||
@ -6110,7 +6152,7 @@ func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options [
|
||||
var md5sumBase64 string
|
||||
size := src.Size()
|
||||
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
|
||||
if !multipart || !o.fs.opt.DisableChecksum {
|
||||
if !noHash && (!multipart || !o.fs.opt.DisableChecksum) {
|
||||
ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
|
||||
if err == nil && matchMd5.MatchString(ui.md5sumHex) {
|
||||
hashBytes, err := hex.DecodeString(ui.md5sumHex)
|
||||
@ -6222,7 +6264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
if multipart {
|
||||
wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in, options...)
|
||||
} else {
|
||||
ui, err = o.prepareUpload(ctx, src, options)
|
||||
ui, err = o.prepareUpload(ctx, src, options, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare upload: %w", err)
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ func setFrom_s3ListObjectsInput_s3ListObjectsV2Input(a *s3.ListObjectsInput, b *
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.OptionalObjectAttributes = b.OptionalObjectAttributes
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
@ -25,6 +26,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(a *s3.ListObjectsV2Output
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
}
|
||||
|
||||
// setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input copies matching elements from a to b
|
||||
@ -34,7 +36,9 @@ func setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(a *s3.ListObjectVers
|
||||
a.EncodingType = b.EncodingType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.OptionalObjectAttributes = b.OptionalObjectAttributes
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestPayer = b.RequestPayer
|
||||
}
|
||||
|
||||
// setFrom_s3ObjectVersion_s3DeleteMarkerEntry copies matching elements from a to b
|
||||
@ -55,6 +59,7 @@ func setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(a *s3.ListObjectsV
|
||||
a.MaxKeys = b.MaxKeys
|
||||
a.Name = b.Name
|
||||
a.Prefix = b.Prefix
|
||||
a.RequestCharged = b.RequestCharged
|
||||
}
|
||||
|
||||
// setFrom_s3Object_s3ObjectVersion copies matching elements from a to b
|
||||
@ -64,6 +69,7 @@ func setFrom_s3Object_s3ObjectVersion(a *s3.Object, b *s3.ObjectVersion) {
|
||||
a.Key = b.Key
|
||||
a.LastModified = b.LastModified
|
||||
a.Owner = b.Owner
|
||||
a.RestoreStatus = b.RestoreStatus
|
||||
a.Size = b.Size
|
||||
a.StorageClass = b.StorageClass
|
||||
}
|
||||
@ -237,3 +243,37 @@ func setFrom_s3HeadObjectOutput_s3PutObjectInput(a *s3.HeadObjectOutput, b *s3.P
|
||||
a.StorageClass = b.StorageClass
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
||||
// setFrom_s3CopyObjectInput_s3PutObjectInput copies matching elements from a to b
|
||||
func setFrom_s3CopyObjectInput_s3PutObjectInput(a *s3.CopyObjectInput, b *s3.PutObjectInput) {
|
||||
a.ACL = b.ACL
|
||||
a.Bucket = b.Bucket
|
||||
a.BucketKeyEnabled = b.BucketKeyEnabled
|
||||
a.CacheControl = b.CacheControl
|
||||
a.ChecksumAlgorithm = b.ChecksumAlgorithm
|
||||
a.ContentDisposition = b.ContentDisposition
|
||||
a.ContentEncoding = b.ContentEncoding
|
||||
a.ContentLanguage = b.ContentLanguage
|
||||
a.ContentType = b.ContentType
|
||||
a.ExpectedBucketOwner = b.ExpectedBucketOwner
|
||||
a.Expires = b.Expires
|
||||
a.GrantFullControl = b.GrantFullControl
|
||||
a.GrantRead = b.GrantRead
|
||||
a.GrantReadACP = b.GrantReadACP
|
||||
a.GrantWriteACP = b.GrantWriteACP
|
||||
a.Key = b.Key
|
||||
a.Metadata = b.Metadata
|
||||
a.ObjectLockLegalHoldStatus = b.ObjectLockLegalHoldStatus
|
||||
a.ObjectLockMode = b.ObjectLockMode
|
||||
a.ObjectLockRetainUntilDate = b.ObjectLockRetainUntilDate
|
||||
a.RequestPayer = b.RequestPayer
|
||||
a.SSECustomerAlgorithm = b.SSECustomerAlgorithm
|
||||
a.SSECustomerKey = b.SSECustomerKey
|
||||
a.SSECustomerKeyMD5 = b.SSECustomerKeyMD5
|
||||
a.SSEKMSEncryptionContext = b.SSEKMSEncryptionContext
|
||||
a.SSEKMSKeyId = b.SSEKMSKeyId
|
||||
a.ServerSideEncryption = b.ServerSideEncryption
|
||||
a.StorageClass = b.StorageClass
|
||||
a.Tagging = b.Tagging
|
||||
a.WebsiteRedirectLocation = b.WebsiteRedirectLocation
|
||||
}
|
||||
|
@ -301,6 +301,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
// Correct root if definitely pointing to a file
|
||||
f.root = path.Dir(f.root)
|
||||
if f.root == "." || f.root == "/" {
|
||||
f.root = ""
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ func (f *Fs) getDownloadLink(ctx context.Context, libraryID, filePath string) (s
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (f *Fs) download(ctx context.Context, downloadLink string, size int64, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
// Check if we need to download partial content
|
||||
var start, end int64 = 0, size
|
||||
partialContent := false
|
||||
@ -606,11 +606,18 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
// Build the http request
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: url,
|
||||
Options: options,
|
||||
}
|
||||
parsedURL, err := url.Parse(downloadLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse download url: %w", err)
|
||||
}
|
||||
if parsedURL.IsAbs() {
|
||||
opts.RootURL = downloadLink
|
||||
} else {
|
||||
opts.Path = downloadLink
|
||||
}
|
||||
var resp *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
@ -618,7 +625,7 @@ func (f *Fs) download(ctx context.Context, url string, size int64, options ...fs
|
||||
if err != nil {
|
||||
if resp != nil {
|
||||
if resp.StatusCode == 404 {
|
||||
return nil, fmt.Errorf("file not found '%s'", url)
|
||||
return nil, fmt.Errorf("file not found '%s'", downloadLink)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
@ -688,11 +695,19 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, uploadLink, filePath stri
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: uploadLink,
|
||||
Body: formReader,
|
||||
ContentType: contentType,
|
||||
Parameters: url.Values{"ret-json": {"1"}}, // It needs to be on the url, not in the body parameters
|
||||
}
|
||||
parsedURL, err := url.Parse(uploadLink)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse upload url: %w", err)
|
||||
}
|
||||
if parsedURL.IsAbs() {
|
||||
opts.RootURL = uploadLink
|
||||
} else {
|
||||
opts.Path = uploadLink
|
||||
}
|
||||
result := make([]api.FileDetail, 1)
|
||||
var resp *http.Response
|
||||
// If an error occurs during the call, do not attempt to retry: The upload link is single use only
|
||||
|
@ -1066,9 +1066,10 @@ func NewFsWithConnection(ctx context.Context, f *Fs, name string, root string, m
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
PartialUploads: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SlowHash: true,
|
||||
PartialUploads: true,
|
||||
DirModTimeUpdatesOnWrite: true, // indicate writing files to a directory updates its modtime
|
||||
}).Fill(ctx, f)
|
||||
if !opt.CopyIsHardlink {
|
||||
// Disable server side copy unless --sftp-copy-is-hardlink is set
|
||||
@ -1367,6 +1368,15 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.mkdir(ctx, root)
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
o := Object{
|
||||
fs: f,
|
||||
remote: dir,
|
||||
}
|
||||
return o.SetModTime(ctx, modTime)
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
// Check to see if directory is empty as some servers will
|
||||
@ -1985,7 +1995,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return fmt.Errorf("SetModTime failed: %w", err)
|
||||
}
|
||||
err = o.stat(ctx)
|
||||
if err != nil {
|
||||
if err != nil && err != fs.ErrorIsDir {
|
||||
return fmt.Errorf("SetModTime stat failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
@ -2179,12 +2189,13 @@ func (o *Object) Remove(ctx context.Context) error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.DirSetModTimer = &Fs{}
|
||||
_ fs.Abouter = &Fs{}
|
||||
_ fs.Shutdowner = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
@ -93,8 +94,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||
|
||||
// Allow the command a short time only to shut down
|
||||
// FIXME enable when we get rid of go1.19
|
||||
// s.cmd.WaitDelay = time.Second
|
||||
s.cmd.WaitDelay = time.Second
|
||||
|
||||
return s
|
||||
}
|
||||
|
@ -1531,12 +1531,14 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
o.size = int64(inCount.BytesRead())
|
||||
}
|
||||
}
|
||||
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
|
||||
// If file was a large object and the container is not enable versioning then remove old/all segments
|
||||
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
|
||||
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
if isLargeObject && len(segmentsContainer) > 0 {
|
||||
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
|
||||
if !isInContainerVersioning {
|
||||
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ type Object struct {
|
||||
// This is a wrapped object contains all candidates
|
||||
type Directory struct {
|
||||
*upstream.Directory
|
||||
fs *Fs // what this directory is part of
|
||||
cd []upstream.Entry
|
||||
}
|
||||
|
||||
@ -227,7 +228,56 @@ func (d *Directory) Size() (s int64) {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an DirEntry
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (d *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
entries, err := d.fs.actionEntries(d.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if d, ok := entries[i].(*upstream.Directory); ok {
|
||||
err := d.SetMetadata(ctx, metadata)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", d.UpstreamFs().Name(), err)
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorIsFile
|
||||
}
|
||||
})
|
||||
wg.Wait()
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the DirEntry to set the modification date
|
||||
//
|
||||
// If there is any other metadata it does not overwrite it.
|
||||
func (d *Directory) SetModTime(ctx context.Context, t time.Time) error {
|
||||
entries, err := d.fs.actionEntries(d.candidates()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
errs := Errors(make([]error, len(entries)))
|
||||
multithread(len(entries), func(i int) {
|
||||
if d, ok := entries[i].(*upstream.Directory); ok {
|
||||
err := d.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", d.UpstreamFs().Name(), err)
|
||||
}
|
||||
} else {
|
||||
errs[i] = fs.ErrorIsFile
|
||||
}
|
||||
})
|
||||
wg.Wait()
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.FullDirectory = (*Directory)(nil)
|
||||
)
|
||||
|
@ -95,6 +95,7 @@ func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
||||
case *upstream.Directory:
|
||||
return &Directory{
|
||||
Directory: e,
|
||||
fs: f,
|
||||
cd: entries,
|
||||
}, nil
|
||||
default:
|
||||
@ -182,6 +183,51 @@ func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// MkdirMetadata makes the root directory of the Fs object
|
||||
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
||||
upstreams, err := f.create(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
entries := make([]upstream.Entry, len(upstreams))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
if do := u.Features().MkdirMetadata; do != nil {
|
||||
newDir, err := do(ctx, dir, metadata)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
} else {
|
||||
entries[i], err = u.WrapEntry(newDir)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Just do Mkdir on upstreams which don't support MkdirMetadata
|
||||
err := u.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
err = errs.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entry, err := f.wrapEntries(entries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDir, ok := entry.(fs.Directory)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
|
||||
}
|
||||
return newDir, nil
|
||||
}
|
||||
|
||||
// Purge all files in the directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
@ -386,6 +432,26 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// DirSetModTime sets the directory modtime for dir
|
||||
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
||||
upstreams, err := f.action(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
errs := Errors(make([]error, len(upstreams)))
|
||||
multithread(len(upstreams), func(i int) {
|
||||
u := upstreams[i]
|
||||
// ignore DirSetModTime on upstreams which don't support it
|
||||
if do := u.Features().DirSetModTime; do != nil {
|
||||
err := do(ctx, dir, modTime)
|
||||
if err != nil {
|
||||
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
||||
}
|
||||
}
|
||||
})
|
||||
return errs.Err()
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
@ -902,18 +968,23 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
||||
var features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
PartialUploads: true,
|
||||
CaseInsensitive: true,
|
||||
DuplicateFiles: false,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
BucketBased: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
ReadMetadata: true,
|
||||
WriteMetadata: true,
|
||||
UserMetadata: true,
|
||||
ReadDirMetadata: true,
|
||||
WriteDirMetadata: true,
|
||||
WriteDirSetModTime: true,
|
||||
UserDirMetadata: true,
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove, slowHash := true, false
|
||||
for _, f := range upstreams {
|
||||
@ -988,6 +1059,8 @@ var (
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirSetModTimer = (*Fs)(nil)
|
||||
_ fs.MkdirMetadataer = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
|
@ -322,6 +322,39 @@ func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// Metadata returns metadata for an DirEntry
|
||||
//
|
||||
// It should return nil if there is no Metadata
|
||||
func (e *Directory) Metadata(ctx context.Context) (fs.Metadata, error) {
|
||||
do, ok := e.Directory.(fs.Metadataer)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
return do.Metadata(ctx)
|
||||
}
|
||||
|
||||
// SetMetadata sets metadata for an DirEntry
|
||||
//
|
||||
// It should return fs.ErrorNotImplemented if it can't set metadata
|
||||
func (e *Directory) SetMetadata(ctx context.Context, metadata fs.Metadata) error {
|
||||
do, ok := e.Directory.(fs.SetMetadataer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetMetadata(ctx, metadata)
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the DirEntry to set the modification date
|
||||
//
|
||||
// If there is any other metadata it does not overwrite it.
|
||||
func (e *Directory) SetModTime(ctx context.Context, t time.Time) error {
|
||||
do, ok := e.Directory.(fs.SetModTimer)
|
||||
if !ok {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do.SetModTime(ctx, t)
|
||||
}
|
||||
|
||||
// Writeback writes the object back and returns a new object
|
||||
//
|
||||
// If it returns nil, nil then the original object is OK
|
||||
@ -457,5 +490,6 @@ func (f *Fs) updateUsageCore(lock bool) error {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.FullObject = (*Object)(nil)
|
||||
_ fs.FullDirectory = (*Directory)(nil)
|
||||
)
|
||||
|
@ -75,6 +75,7 @@ type Prop struct {
|
||||
Size int64 `xml:"DAV: prop>getcontentlength,omitempty"`
|
||||
Modified Time `xml:"DAV: prop>getlastmodified,omitempty"`
|
||||
Checksums []string `xml:"prop>checksums>checksum,omitempty"`
|
||||
Permissions string `xml:"prop>permissions,omitempty"`
|
||||
MESha1Hex *string `xml:"ME: prop>sha1hex,omitempty"` // Fastmail-specific sha1 checksum
|
||||
}
|
||||
|
||||
|
@ -149,6 +149,16 @@ Set to 0 to disable chunked uploading.
|
||||
`,
|
||||
Advanced: true,
|
||||
Default: 10 * fs.Mebi, // Default NextCloud `max_chunk_size` is `10 MiB`. See https://github.com/nextcloud/server/blob/0447b53bda9fe95ea0cbed765aa332584605d652/apps/files/lib/App.php#L57
|
||||
}, {
|
||||
Name: "owncloud_exclude_shares",
|
||||
Help: "Exclude ownCloud shares",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "owncloud_exclude_mounts",
|
||||
Help: "Exclude ownCloud mounted storages",
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@ -165,6 +175,8 @@ type Options struct {
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
PacerMinSleep fs.Duration `config:"pacer_min_sleep"`
|
||||
ChunkSize fs.SizeSuffix `config:"nextcloud_chunk_size"`
|
||||
ExcludeShares bool `config:"owncloud_exclude_shares"`
|
||||
ExcludeMounts bool `config:"owncloud_exclude_mounts"`
|
||||
}
|
||||
|
||||
// Fs represents a remote webdav
|
||||
@ -608,7 +620,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
|
||||
}
|
||||
|
||||
f.chunksUploadURL = chunksUploadURL
|
||||
fs.Logf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
fs.Debugf(nil, "Chunks temporary upload directory: %s", f.chunksUploadURL)
|
||||
}
|
||||
case "sharepoint":
|
||||
// To mount sharepoint, two Cookies are required
|
||||
@ -702,6 +714,7 @@ var owncloudProps = []byte(`<?xml version="1.0"?>
|
||||
<d:resourcetype />
|
||||
<d:getcontenttype />
|
||||
<oc:checksums />
|
||||
<oc:permissions />
|
||||
</d:prop>
|
||||
</d:propfind>
|
||||
`)
|
||||
@ -797,6 +810,18 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f.opt.ExcludeShares {
|
||||
// https: //owncloud.dev/apis/http/webdav/#supported-webdav-properties
|
||||
if strings.Contains(item.Props.Permissions, "S") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if f.opt.ExcludeMounts {
|
||||
// https: //owncloud.dev/apis/http/webdav/#supported-webdav-properties
|
||||
if strings.Contains(item.Props.Permissions, "M") {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// item.Name = restoreReservedChars(item.Name)
|
||||
if fn(remote, isDir, &item.Props) {
|
||||
found = true
|
||||
|
@ -23,8 +23,6 @@ CATEGORY = re.compile(r"(^[\w/ ]+(?:, *[\w/ ]+)*):\s*(.*)$")
|
||||
backends = [ x for x in os.listdir("backend") if x != "all"]
|
||||
|
||||
backend_aliases = {
|
||||
"amazon cloud drive" : "amazonclouddrive",
|
||||
"acd" : "amazonclouddrive",
|
||||
"google cloud storage" : "googlecloudstorage",
|
||||
"gcs" : "googlecloudstorage",
|
||||
"azblob" : "azureblob",
|
||||
@ -34,7 +32,6 @@ backend_aliases = {
|
||||
}
|
||||
|
||||
backend_titles = {
|
||||
"amazonclouddrive": "Amazon Cloud Drive",
|
||||
"googlecloudstorage": "Google Cloud Storage",
|
||||
"azureblob": "Azure Blob",
|
||||
"ftp": "FTP",
|
||||
|
@ -30,7 +30,6 @@ docs = [
|
||||
# Keep these alphabetical by full name
|
||||
"fichier.md",
|
||||
"alias.md",
|
||||
"amazonclouddrive.md",
|
||||
"s3.md",
|
||||
"b2.md",
|
||||
"box.md",
|
||||
@ -122,6 +121,7 @@ ignore_docs = [
|
||||
"downloads.md",
|
||||
"privacy.md",
|
||||
"sponsor.md",
|
||||
"amazonclouddrive.md",
|
||||
]
|
||||
|
||||
def read_doc(doc):
|
||||
|
14
bin/use-deadlock-detector
Executable file
14
bin/use-deadlock-detector
Executable file
@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ! -z $(git status --short --untracked-files=no) ]]; then
|
||||
echo "Detected uncommitted changes - commit before running this"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing deadlock detector - use 'git reset --hard HEAD' to undo"
|
||||
|
||||
go get -v github.com/sasha-s/go-deadlock/...
|
||||
find . -type f -name "*.go" -print0 | xargs -0 sed -i~ 's/sync.RWMutex/deadlock.RWMutex/; s/sync.Mutex/deadlock.Mutex/;'
|
||||
goimports -w .
|
||||
|
||||
echo "Done"
|
@ -2,16 +2,19 @@
|
||||
package bilib
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
// FsPath converts Fs to a suitable rclone argument
|
||||
func FsPath(f fs.Fs) string {
|
||||
func FsPath(f fs.Info) string {
|
||||
name, path, slash := f.Name(), f.Root(), "/"
|
||||
if name == "local" {
|
||||
slash = string(os.PathSeparator)
|
||||
@ -38,5 +41,57 @@ var nonCanonicalChars = regexp.MustCompile(`[\s\\/:?*]`)
|
||||
|
||||
// SessionName makes a unique base name for the sync operation
|
||||
func SessionName(fs1, fs2 fs.Fs) string {
|
||||
return CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
return StripHexString(CanonicalPath(FsPath(fs1))) + ".." + StripHexString(CanonicalPath(FsPath(fs2)))
|
||||
}
|
||||
|
||||
// StripHexString strips the (first) canonical {hexstring} suffix
|
||||
func StripHexString(path string) string {
|
||||
open := strings.IndexRune(path, '{')
|
||||
close := strings.IndexRune(path, '}')
|
||||
if open >= 0 && close > open {
|
||||
return path[:open] + path[close+1:] // (trailing underscore)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
// HasHexString returns true if path contains at least one canonical {hexstring} suffix
|
||||
func HasHexString(path string) bool {
|
||||
open := strings.IndexRune(path, '{')
|
||||
if open >= 0 && strings.IndexRune(path, '}') > open {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// BasePath joins the workDir with the SessionName, stripping {hexstring} suffix if necessary
|
||||
func BasePath(ctx context.Context, workDir string, fs1, fs2 fs.Fs) string {
|
||||
suffixedSession := CanonicalPath(FsPath(fs1)) + ".." + CanonicalPath(FsPath(fs2))
|
||||
suffixedBasePath := filepath.Join(workDir, suffixedSession)
|
||||
listing1 := suffixedBasePath + ".path1.lst"
|
||||
listing2 := suffixedBasePath + ".path2.lst"
|
||||
|
||||
sessionName := SessionName(fs1, fs2)
|
||||
basePath := filepath.Join(workDir, sessionName)
|
||||
|
||||
// Normalize to non-canonical version for overridden configs
|
||||
// to ensure that backend-specific flags don't change the listing filename.
|
||||
// For backward-compatibility, we first check if we found a listing file with the suffixed version.
|
||||
// If so, we rename it (and overwrite non-suffixed version, if any.)
|
||||
// If not, we carry on with the non-suffixed version.
|
||||
// We should only find a suffixed version if bisync v1.66 or older created it.
|
||||
if HasHexString(suffixedSession) && FileExists(listing1) {
|
||||
fs.Infof(listing1, "renaming to: %s", basePath+".path1.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path1.lst") {
|
||||
_ = os.Rename(listing1, basePath+".path1.lst")
|
||||
}
|
||||
}
|
||||
if HasHexString(suffixedSession) && FileExists(listing2) {
|
||||
fs.Infof(listing2, "renaming to: %s", basePath+".path2.lst")
|
||||
if !operations.SkipDestructive(ctx, listing1, "rename to "+basePath+".path2.lst") {
|
||||
_ = os.Rename(listing2, basePath+".path2.lst")
|
||||
} else {
|
||||
return suffixedBasePath
|
||||
}
|
||||
}
|
||||
return basePath
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func FileExists(file string) bool {
|
||||
return !os.IsNotExist(err)
|
||||
}
|
||||
|
||||
// CopyFileIfExists is like CopyFile but does to fail if source does not exist
|
||||
// CopyFileIfExists is like CopyFile but does not fail if source does not exist
|
||||
func CopyFileIfExists(srcFile, dstFile string) error {
|
||||
if !FileExists(srcFile) {
|
||||
return nil
|
||||
|
@ -5,6 +5,8 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Names comprises a set of file names
|
||||
@ -59,3 +61,105 @@ func SaveList(list []string, path string) error {
|
||||
}
|
||||
return os.WriteFile(path, buf.Bytes(), PermSecure)
|
||||
}
|
||||
|
||||
// AliasMap comprises a pair of names that are not equal but treated as equal for comparison purposes
|
||||
// For example, when normalizing unicode and casing
|
||||
// This helps reduce repeated normalization functions, which really slow things down
|
||||
type AliasMap map[string]string
|
||||
|
||||
// Add adds new pair to the set, in both directions
|
||||
func (am AliasMap) Add(name1, name2 string) {
|
||||
if name1 != name2 {
|
||||
am[name1] = name2
|
||||
am[name2] = name1
|
||||
}
|
||||
}
|
||||
|
||||
// Alias returns the alternate version, if any, else the original.
|
||||
func (am AliasMap) Alias(name1 string) string {
|
||||
// note: we don't need to check normalization settings, because we already did it in March.
|
||||
// the AliasMap will only exist if March paired up two unequal filenames.
|
||||
name2, ok := am[name1]
|
||||
if ok {
|
||||
return name2
|
||||
}
|
||||
return name1
|
||||
}
|
||||
|
||||
// ParseGlobs determines whether a string contains {brackets}
|
||||
// and returns the substring (including both brackets) for replacing
|
||||
// substring is first opening bracket to last closing bracket --
|
||||
// good for {{this}} but not {this}{this}
|
||||
func ParseGlobs(s string) (hasGlobs bool, substring string) {
|
||||
open := strings.Index(s, "{")
|
||||
close := strings.LastIndex(s, "}")
|
||||
if open >= 0 && close > open {
|
||||
return true, s[open : close+1]
|
||||
}
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// TrimBrackets converts {{this}} to this
|
||||
func TrimBrackets(s string) string {
|
||||
return strings.Trim(s, "{}")
|
||||
}
|
||||
|
||||
// TimeFormat converts a user-supplied string to a Go time constant, if possible
|
||||
func TimeFormat(timeFormat string) string {
|
||||
switch timeFormat {
|
||||
case "Layout":
|
||||
timeFormat = time.Layout
|
||||
case "ANSIC":
|
||||
timeFormat = time.ANSIC
|
||||
case "UnixDate":
|
||||
timeFormat = time.UnixDate
|
||||
case "RubyDate":
|
||||
timeFormat = time.RubyDate
|
||||
case "RFC822":
|
||||
timeFormat = time.RFC822
|
||||
case "RFC822Z":
|
||||
timeFormat = time.RFC822Z
|
||||
case "RFC850":
|
||||
timeFormat = time.RFC850
|
||||
case "RFC1123":
|
||||
timeFormat = time.RFC1123
|
||||
case "RFC1123Z":
|
||||
timeFormat = time.RFC1123Z
|
||||
case "RFC3339":
|
||||
timeFormat = time.RFC3339
|
||||
case "RFC3339Nano":
|
||||
timeFormat = time.RFC3339Nano
|
||||
case "Kitchen":
|
||||
timeFormat = time.Kitchen
|
||||
case "Stamp":
|
||||
timeFormat = time.Stamp
|
||||
case "StampMilli":
|
||||
timeFormat = time.StampMilli
|
||||
case "StampMicro":
|
||||
timeFormat = time.StampMicro
|
||||
case "StampNano":
|
||||
timeFormat = time.StampNano
|
||||
case "DateTime":
|
||||
// timeFormat = time.DateTime // missing in go1.19
|
||||
timeFormat = "2006-01-02 15:04:05"
|
||||
case "DateOnly":
|
||||
// timeFormat = time.DateOnly // missing in go1.19
|
||||
timeFormat = "2006-01-02"
|
||||
case "TimeOnly":
|
||||
// timeFormat = time.TimeOnly // missing in go1.19
|
||||
timeFormat = "15:04:05"
|
||||
case "MacFriendlyTime", "macfriendlytime", "mac":
|
||||
timeFormat = "2006-01-02 0304PM" // not actually a Go constant -- but useful as macOS filenames can't have colons
|
||||
}
|
||||
return timeFormat
|
||||
}
|
||||
|
||||
// AppyTimeGlobs converts "myfile-{DateOnly}.txt" to "myfile-2006-01-02.txt"
|
||||
func AppyTimeGlobs(s string, t time.Time) string {
|
||||
hasGlobs, substring := ParseGlobs(s)
|
||||
if !hasGlobs {
|
||||
return s
|
||||
}
|
||||
timeString := t.Local().Format(TimeFormat(TrimBrackets(substring)))
|
||||
return strings.ReplaceAll(s, substring, timeString)
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
@ -29,12 +30,17 @@ import (
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
|
||||
"github.com/pmezard/go-difflib/difflib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -53,6 +59,8 @@ const (
|
||||
fixSlash = (runtime.GOOS == "windows")
|
||||
)
|
||||
|
||||
var initDate = time.Date(2000, time.January, 1, 0, 0, 0, 0, bisync.TZ)
|
||||
|
||||
// logReplacements make modern test logs comparable with golden dir.
|
||||
// It is a string slice of even length with this structure:
|
||||
//
|
||||
@ -71,6 +79,20 @@ var logReplacements = []string{
|
||||
`^NOTICE: too_many_(requests|write_operations)/\.*: Too many requests or write operations.*$`, dropMe,
|
||||
`^NOTICE: Dropbox root .*?: Forced to upload files to set modification times on this backend.$`, dropMe,
|
||||
`^INFO : .*?: src and dst identical but can't set mod time without deleting and re-uploading$`, dropMe,
|
||||
// ignore crypt info messages
|
||||
`^INFO : .*?: Crypt detected! Using cryptcheck instead of check. \(Use --size-only or --ignore-checksum to disable\)$`, dropMe,
|
||||
// ignore drive info messages
|
||||
`^NOTICE:.*?Files of unknown size \(such as Google Docs\) do not sync reliably with --checksum or --size-only\. Consider using modtime instead \(the default\) or --drive-skip-gdocs.*?$`, dropMe,
|
||||
// ignore differences in backend features
|
||||
`^.*?"HashType1":.*?$`, dropMe,
|
||||
`^.*?"HashType2":.*?$`, dropMe,
|
||||
`^.*?"SlowHashDetected":.*?$`, dropMe,
|
||||
`^.*? for same-side diffs on .*?$`, dropMe,
|
||||
`^.*?Downloading hashes.*?$`, dropMe,
|
||||
// ignore timestamps in directory time updates
|
||||
`^(INFO : .*?: (Made directory with|Set directory) (metadata|modification time)).*$`, dropMe,
|
||||
// ignore sizes in directory time updates
|
||||
`^(NOTICE: .*?: Skipped set directory modification time as --dry-run is set).*$`, dropMe,
|
||||
}
|
||||
|
||||
// Some dry-run messages differ depending on the particular remote.
|
||||
@ -96,17 +118,26 @@ var logHoppers = []string{
|
||||
// subdirectories. The order inconsistency initially showed up in the
|
||||
// listings and triggered reordering of log messages, but the actual
|
||||
// files will in fact match.
|
||||
`ERROR : - +Access test failed: Path[12] file not found in Path[12] - .*`,
|
||||
`.* +.....Access test failed: Path[12] file not found in Path[12].*`,
|
||||
|
||||
// Test case `resync` suffered from the order of queued copies.
|
||||
`(?:INFO |NOTICE): - Path2 Resync will copy to Path1 +- .*`,
|
||||
|
||||
// Test case `normalization` can have random order of fix-case files.
|
||||
`(?:INFO |NOTICE): .*: Fixed case by renaming to: .*`,
|
||||
|
||||
// order of files re-checked prior to a conflict rename
|
||||
`ERROR : .*: md5 differ.*`,
|
||||
|
||||
// Directory modification time setting can happen in any order
|
||||
`INFO : .*: (Set directory modification time|Made directory with metadata).*`,
|
||||
}
|
||||
|
||||
// Some log lines can contain Windows path separator that must be
|
||||
// converted to "/" in every matching token to match golden logs.
|
||||
var logLinesWithSlash = []string{
|
||||
`\(\d\d\) : (touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - Path[12] +Queue copy to Path[12] `,
|
||||
`.*\(\d\d\) :.*(fix-names|touch-glob|touch-copy|copy-file|copy-as|copy-dir|delete-file) `,
|
||||
`INFO : - .*Path[12].* +.*Queue copy to.* Path[12].*`,
|
||||
`INFO : Synching Path1 .*? with Path2 `,
|
||||
`INFO : Validating listings for `,
|
||||
}
|
||||
@ -165,8 +196,11 @@ type bisyncTest struct {
|
||||
golden bool
|
||||
debug bool
|
||||
stopAt int
|
||||
TestFn bisync.TestFunc
|
||||
}
|
||||
|
||||
var color = bisync.Color
|
||||
|
||||
// TestBisync is a test engine for bisync test cases.
|
||||
func TestBisync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
@ -180,6 +214,8 @@ func TestBisync(t *testing.T) {
|
||||
if *argRefreshTimes {
|
||||
ci.RefreshTimes = true
|
||||
}
|
||||
bisync.Colors = true
|
||||
time.Local, _ = time.LoadLocation("America/New_York")
|
||||
|
||||
baseDir, err := os.Getwd()
|
||||
require.NoError(t, err, "get current directory")
|
||||
@ -234,6 +270,10 @@ func TestBisync(t *testing.T) {
|
||||
testList = nil
|
||||
for _, testCase := range b.listDir(b.dataRoot) {
|
||||
if strings.HasPrefix(testCase, "test_") {
|
||||
// if dir is empty, skip it (can happen due to gitignored files/dirs when checking out branch)
|
||||
if len(b.listDir(filepath.Join(b.dataRoot, testCase))) == 0 {
|
||||
continue
|
||||
}
|
||||
testList = append(testList, testCase)
|
||||
}
|
||||
}
|
||||
@ -277,9 +317,12 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
b.goldenDir = b.ensureDir(b.testDir, "golden", false)
|
||||
b.dataDir = b.ensureDir(b.testDir, "modfiles", true) // optional
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
b.sessionName = norm.NFC.String(b.sessionName)
|
||||
b.goldenDir = norm.NFC.String(b.goldenDir)
|
||||
|
||||
// For test stability, jam initial dates to a fixed past date.
|
||||
// Test cases that change files will touch specific files to fixed new dates.
|
||||
initDate := time.Date(2000, time.January, 1, 0, 0, 0, 0, bisync.TZ)
|
||||
err = filepath.Walk(b.initDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err == nil && !info.IsDir() {
|
||||
return os.Chtimes(path, initDate, initDate)
|
||||
@ -373,16 +416,16 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
|
||||
var passed bool
|
||||
switch errorCount {
|
||||
case 0:
|
||||
msg = fmt.Sprintf("TEST %s PASSED", b.testCase)
|
||||
msg = color(terminal.GreenFg, fmt.Sprintf("TEST %s PASSED", b.testCase))
|
||||
passed = true
|
||||
case -2:
|
||||
msg = fmt.Sprintf("TEST %s SKIPPED", b.testCase)
|
||||
msg = color(terminal.YellowFg, fmt.Sprintf("TEST %s SKIPPED", b.testCase))
|
||||
passed = true
|
||||
case -1:
|
||||
msg = fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase)
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - WRONG NUMBER OF FILES", b.testCase))
|
||||
passed = false
|
||||
default:
|
||||
msg = fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount)
|
||||
msg = color(terminal.RedFg, fmt.Sprintf("TEST %s FAILED - %d MISCOMPARED FILES", b.testCase, errorCount))
|
||||
buckets := b.fs1.Features().BucketBased || b.fs2.Features().BucketBased
|
||||
passed = false
|
||||
if b.testCase == "rmdirs" && buckets {
|
||||
@ -449,7 +492,7 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
|
||||
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
var fsrc, fdst fs.Fs
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
b.logPrintf("%s %s", b.stepStr, line)
|
||||
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
ciSave := *ci
|
||||
@ -461,6 +504,23 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
ci.LogLevel = fs.LogLevelDebug
|
||||
}
|
||||
|
||||
testFunc := func() {
|
||||
src := filepath.Join(b.dataDir, "file7.txt")
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||
err := b.copyFile(ctx, src, b.path2, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "error copying file: %v", err)
|
||||
}
|
||||
dst = "file" + fmt.Sprint(100-i) + ".txt"
|
||||
err = b.copyFile(ctx, src, b.path1, dst)
|
||||
if err != nil {
|
||||
fs.Errorf(dst, "error copying file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
args := splitLine(line)
|
||||
switch args[0] {
|
||||
case "test":
|
||||
@ -477,7 +537,11 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
if fsrc, err = fs.NewFs(ctx, args[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
return purgeChildren(ctx, fsrc, "")
|
||||
err = purgeChildren(ctx, fsrc, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
case "delete-file":
|
||||
b.checkArgs(args, 1, 1)
|
||||
dir, file := filepath.Split(args[1])
|
||||
@ -520,6 +584,16 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
case "copy-as":
|
||||
b.checkArgs(args, 3, 3)
|
||||
return b.copyFile(ctx, args[1], args[2], args[3])
|
||||
case "copy-as-NFC":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFC.String(args[2]), norm.NFC.String(args[3]))
|
||||
case "copy-as-NFD":
|
||||
b.checkArgs(args, 3, 3)
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
return b.copyFile(ctx, args[1], norm.NFD.String(args[2]), norm.NFD.String(args[3]))
|
||||
case "copy-dir", "sync-dir":
|
||||
b.checkArgs(args, 2, 2)
|
||||
if fsrc, err = cache.Get(ctx, args[1]); err != nil {
|
||||
@ -537,9 +611,131 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
||||
return err
|
||||
case "list-dirs":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1])
|
||||
return b.listSubdirs(ctx, args[1], true)
|
||||
case "list-files":
|
||||
b.checkArgs(args, 1, 1)
|
||||
return b.listSubdirs(ctx, args[1], false)
|
||||
case "bisync":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = false
|
||||
// ci.FixCase = true
|
||||
return b.runBisync(ctx, args[1:])
|
||||
case "test-func":
|
||||
b.TestFn = testFunc
|
||||
return
|
||||
case "fix-names":
|
||||
// in case the local os converted any filenames
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.FixCase = true
|
||||
ci.IgnoreTimes = true
|
||||
reset := func() {
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.FixCase = false
|
||||
ci.IgnoreTimes = false
|
||||
}
|
||||
defer reset()
|
||||
b.checkArgs(args, 1, 1)
|
||||
var ok bool
|
||||
var remoteName string
|
||||
var remotePath string
|
||||
remoteName, remotePath, err = fspath.SplitFs(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if remoteName == "" {
|
||||
remoteName = "/"
|
||||
}
|
||||
|
||||
fsrc, err = fs.NewFs(ctx, remoteName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
fs.Debugf(remotePath, "is NFC: %v", norm.NFC.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is NFD: %v", norm.NFD.IsNormalString(remotePath))
|
||||
fs.Debugf(remotePath, "is valid UTF8: %v", utf8.ValidString(remotePath))
|
||||
|
||||
// check if it's a dir, try moving it
|
||||
var leaf string
|
||||
_, leaf, err = fspath.Split(remotePath)
|
||||
if err == nil && leaf == "" {
|
||||
remotePath = args[1]
|
||||
fs.Debugf(remotePath, "attempting to fix directory")
|
||||
|
||||
fixDirname := func(old, new string) {
|
||||
if new != old {
|
||||
oldName, err := fs.NewFs(ctx, old)
|
||||
if err != nil {
|
||||
fs.Logf(old, "error getting Fs: %v", err)
|
||||
}
|
||||
fs.Debugf(nil, "Attempting to move %s to %s", oldName.Root(), new)
|
||||
// Create random name to temporarily move dir to
|
||||
tmpDirName := strings.TrimSuffix(new, slash) + "-rclone-move-" + random.String(8)
|
||||
var tmpDirFs fs.Fs
|
||||
tmpDirFs, _ = fs.NewFs(ctx, tmpDirName)
|
||||
err = sync.MoveDir(ctx, tmpDirFs, oldName, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(oldName, "error attempting to move folder: %v", err)
|
||||
}
|
||||
// now move the temp dir to real name
|
||||
fsrc, _ = fs.NewFs(ctx, new)
|
||||
err = sync.MoveDir(ctx, fsrc, tmpDirFs, true, true)
|
||||
if err != nil {
|
||||
fs.Debugf(tmpDirFs, "error attempting to move folder to %s: %v", fsrc.Root(), err)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "old and new are equal. Skipping. %s (%s) %s (%s)", old, stringToHash(old), new, stringToHash(new))
|
||||
}
|
||||
}
|
||||
|
||||
if norm.NFC.String(remotePath) != remotePath && norm.NFD.String(remotePath) != remotePath {
|
||||
fs.Debugf(remotePath, "This is neither fully NFD or NFC -- can't fix reliably!")
|
||||
}
|
||||
fixDirname(norm.NFC.String(remotePath), remotePath)
|
||||
fixDirname(norm.NFD.String(remotePath), remotePath)
|
||||
return
|
||||
}
|
||||
|
||||
// if it's a file
|
||||
fs.Debugf(remotePath, "attempting to fix file -- filename hash: %s", stringToHash(leaf))
|
||||
fixFilename := func(old, new string) {
|
||||
ok, err := fs.FileExists(ctx, fsrc, old)
|
||||
if err != nil {
|
||||
fs.Debugf(remotePath, "error checking if file exists: %v", err)
|
||||
}
|
||||
fs.Debugf(old, "file exists: %v %s", ok, stringToHash(old))
|
||||
fs.Debugf(nil, "FILE old: %s new: %s equal: %v", old, new, old == new)
|
||||
fs.Debugf(nil, "HASH old: %s new: %s equal: %v", stringToHash(old), stringToHash(new), stringToHash(old) == stringToHash(new))
|
||||
if ok && new != old {
|
||||
fs.Debugf(new, "attempting to rename %s to %s", old, new)
|
||||
err = operations.MoveFile(ctx, fsrc, fsrc, new, old)
|
||||
if err != nil {
|
||||
fs.Errorf(new, "error trying to rename %s to %s - %v", old, new, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// look for NFC version
|
||||
fixFilename(norm.NFC.String(remotePath), remotePath)
|
||||
// if it's in a subdir we just moved, the file and directory might have different encodings. Check for that.
|
||||
mixed := strings.TrimSuffix(norm.NFD.String(remotePath), norm.NFD.String(leaf)) + norm.NFC.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// Try NFD
|
||||
fixFilename(norm.NFD.String(remotePath), remotePath)
|
||||
// Try mixed in reverse
|
||||
mixed = strings.TrimSuffix(norm.NFC.String(remotePath), norm.NFC.String(leaf)) + norm.NFD.String(leaf)
|
||||
fixFilename(mixed, remotePath)
|
||||
// check if it's right now, error if not
|
||||
ok, err = fs.FileExists(ctx, fsrc, remotePath)
|
||||
if !ok || err != nil {
|
||||
fs.Logf(remotePath, "Can't find expected file %s (was it renamed by the os?) %v", args[1], err)
|
||||
return
|
||||
} else {
|
||||
// include hash of filename to make unicode form differences easier to see in logs
|
||||
fs.Debugf(remotePath, "verified file exists at correct path. filename hash: %s", stringToHash(leaf))
|
||||
}
|
||||
return
|
||||
default:
|
||||
return fmt.Errorf("unknown command: %q", args[0])
|
||||
}
|
||||
@ -589,6 +785,7 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
MaxDelete: bisync.DefaultMaxDelete,
|
||||
CheckFilename: bisync.DefaultCheckFilename,
|
||||
CheckSync: bisync.CheckSyncTrue,
|
||||
TestFn: b.TestFn,
|
||||
}
|
||||
octx, ci := fs.AddConfig(ctx)
|
||||
fs1, fs2 := b.fs1, b.fs2
|
||||
@ -633,14 +830,60 @@ func (b *bisyncTest) runBisync(ctx context.Context, args []string) (err error) {
|
||||
require.NoError(b.t, err, "parsing max-delete=%q", val)
|
||||
case "size-only":
|
||||
ci.SizeOnly = true
|
||||
case "ignore-size":
|
||||
ci.IgnoreSize = true
|
||||
case "checksum":
|
||||
ci.CheckSum = true
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "compare-all":
|
||||
opt.CompareFlag = "size,modtime,checksum"
|
||||
opt.Compare.DownloadHash = true // allows us to test crypt and the like
|
||||
case "subdir":
|
||||
fs1 = addSubdir(b.path1, val)
|
||||
fs2 = addSubdir(b.path2, val)
|
||||
case "backupdir1":
|
||||
opt.BackupDir1 = val
|
||||
case "backupdir2":
|
||||
opt.BackupDir2 = val
|
||||
case "ignore-listing-checksum":
|
||||
opt.IgnoreListingChecksum = true
|
||||
case "no-norm":
|
||||
ci.NoUnicodeNormalization = true
|
||||
ci.IgnoreCaseSync = false
|
||||
case "norm":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
case "fix-case":
|
||||
ci.NoUnicodeNormalization = false
|
||||
ci.IgnoreCaseSync = true
|
||||
ci.FixCase = true
|
||||
case "conflict-resolve":
|
||||
_ = opt.ConflictResolve.Set(val)
|
||||
case "conflict-loser":
|
||||
_ = opt.ConflictLoser.Set(val)
|
||||
case "conflict-suffix":
|
||||
opt.ConflictSuffixFlag = val
|
||||
case "resync-mode":
|
||||
_ = opt.ResyncMode.Set(val)
|
||||
default:
|
||||
return fmt.Errorf("invalid bisync option %q", arg)
|
||||
}
|
||||
}
|
||||
|
||||
// set all dirs to a fixed date for test stability, as they are considered as of v1.66.
|
||||
jamDirTimes := func(f fs.Fs) {
|
||||
err := walk.ListR(ctx, f, "", true, -1, walk.ListDirs, func(entries fs.DirEntries) error {
|
||||
var err error
|
||||
entries.ForDir(func(dir fs.Directory) {
|
||||
_, err = operations.SetDirModTime(ctx, f, dir, "", initDate)
|
||||
})
|
||||
return err
|
||||
})
|
||||
assert.NoError(b.t, err, "error jamming dirtimes")
|
||||
}
|
||||
jamDirTimes(fs1)
|
||||
jamDirTimes(fs2)
|
||||
|
||||
output := bilib.CaptureOutput(func() {
|
||||
err = bisync.Bisync(octx, fs1, fs2, opt)
|
||||
})
|
||||
@ -689,7 +932,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
var fsrc, fdst fs.Fs
|
||||
var srcPath, srcFile, dstPath, dstFile string
|
||||
|
||||
switch fsrc, err = cache.Get(ctx, src); err {
|
||||
switch fsrc, err = fs.NewFs(ctx, src); err {
|
||||
case fs.ErrorIsFile:
|
||||
// ok
|
||||
case nil:
|
||||
@ -712,7 +955,7 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
if dstFile != "" {
|
||||
dstPath = dst // force directory
|
||||
}
|
||||
if fdst, err = cache.Get(ctx, dstPath); err != nil {
|
||||
if fdst, err = fs.NewFs(ctx, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -729,23 +972,24 @@ func (b *bisyncTest) copyFile(ctx context.Context, src, dst, asName string) (err
|
||||
return operations.CopyFile(fctx, fdst, fsrc, dstFile, srcFile)
|
||||
}
|
||||
|
||||
// listSubdirs is equivalent to `rclone lsf -R --dirs-only`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string) error {
|
||||
// listSubdirs is equivalent to `rclone lsf -R [--dirs-only]`
|
||||
func (b *bisyncTest) listSubdirs(ctx context.Context, remote string, DirsOnly bool) error {
|
||||
f, err := fs.NewFs(ctx, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opt := operations.ListJSONOpt{
|
||||
NoModTime: true,
|
||||
NoMimeType: true,
|
||||
DirsOnly: true,
|
||||
DirsOnly: DirsOnly,
|
||||
Recurse: true,
|
||||
}
|
||||
fmt := operations.ListFormat{}
|
||||
fmt.SetDirSlash(true)
|
||||
fmt.AddPath()
|
||||
printItem := func(item *operations.ListJSONItem) error {
|
||||
b.logPrintf("%s", fmt.Format(item))
|
||||
b.logPrintf("%s - filename hash: %s", fmt.Format(item), stringToHash(item.Name))
|
||||
return nil
|
||||
}
|
||||
return operations.ListJSON(ctx, f, "", &opt, printItem)
|
||||
@ -873,7 +1117,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
|
||||
if goldenNum != resultNum {
|
||||
log.Print(divider)
|
||||
log.Printf("MISCOMPARE - Number of Golden and Results files do not match:")
|
||||
log.Print(color(terminal.RedFg, "MISCOMPARE - Number of Golden and Results files do not match:"))
|
||||
log.Printf(" Golden count: %d", goldenNum)
|
||||
log.Printf(" Result count: %d", resultNum)
|
||||
log.Printf(" Golden files: %s", strings.Join(goldenFiles, ", "))
|
||||
@ -909,7 +1153,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
require.NoError(b.t, os.WriteFile(resultFile, []byte(resultText), bilib.PermSecure))
|
||||
}
|
||||
|
||||
if goldenText == resultText {
|
||||
if goldenText == resultText || strings.Contains(resultText, ".DS_Store") {
|
||||
continue
|
||||
}
|
||||
errorCount++
|
||||
@ -923,7 +1167,7 @@ func (b *bisyncTest) compareResults() int {
|
||||
require.NoError(b.t, err, "diff failed")
|
||||
|
||||
log.Print(divider)
|
||||
log.Printf("| MISCOMPARE -Golden vs +Results for %s", file)
|
||||
log.Printf(color(terminal.RedFg, "| MISCOMPARE -Golden vs +Results for %s"), file)
|
||||
for _, line := range strings.Split(strings.TrimSpace(text), "\n") {
|
||||
log.Printf("| %s", strings.TrimSpace(line))
|
||||
}
|
||||
@ -951,6 +1195,10 @@ func (b *bisyncTest) storeGolden() {
|
||||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
goldName := b.toGolden(fileName)
|
||||
if goldName != fileName {
|
||||
targetPath := filepath.Join(b.workDir, goldName)
|
||||
@ -972,6 +1220,10 @@ func (b *bisyncTest) storeGolden() {
|
||||
if fileType(fileName) == "lock" {
|
||||
continue
|
||||
}
|
||||
if fileName == "backupdirs" {
|
||||
log.Printf("skipping: %v", fileName)
|
||||
continue
|
||||
}
|
||||
text := b.mangleResult(b.goldenDir, fileName, true)
|
||||
|
||||
goldName := b.toGolden(fileName)
|
||||
@ -988,17 +1240,27 @@ func (b *bisyncTest) storeGolden() {
|
||||
|
||||
// mangleResult prepares test logs or listings for comparison
|
||||
func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
if file == "backupdirs" {
|
||||
return "skipping backupdirs"
|
||||
}
|
||||
buf, err := os.ReadFile(filepath.Join(dir, file))
|
||||
require.NoError(b.t, err)
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
buf = norm.NFC.Bytes(buf)
|
||||
|
||||
text := string(buf)
|
||||
|
||||
switch fileType(strings.TrimSuffix(file, ".sav")) {
|
||||
case "queue":
|
||||
lines := strings.Split(text, eol)
|
||||
sort.Strings(lines)
|
||||
for i, line := range lines {
|
||||
lines[i] = normalizeEncoding(line)
|
||||
}
|
||||
return joinLines(lines)
|
||||
case "listing":
|
||||
return mangleListing(text, golden)
|
||||
return b.mangleListing(text, golden, file)
|
||||
case "log":
|
||||
// fall thru
|
||||
default:
|
||||
@ -1006,7 +1268,16 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
}
|
||||
|
||||
// Adapt log lines to the golden way.
|
||||
lines := strings.Split(string(buf), eol)
|
||||
// First replace filenames with whitespace
|
||||
// some backends (such as crypt) log them on multiple lines due to encoding differences, while others (local) do not
|
||||
wsrep := []string{
|
||||
"subdir with" + eol + "white space.txt/file2 with" + eol + "white space.txt",
|
||||
"subdir with white space.txt/file2 with white space.txt",
|
||||
}
|
||||
whitespaceJoiner := strings.NewReplacer(wsrep...)
|
||||
s := whitespaceJoiner.Replace(string(buf))
|
||||
|
||||
lines := strings.Split(s, eol)
|
||||
pathReplacer := b.newReplacer(true)
|
||||
|
||||
rep := logReplacements
|
||||
@ -1090,7 +1361,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
||||
}
|
||||
|
||||
// mangleListing sorts listing lines before comparing.
|
||||
func mangleListing(text string, golden bool) string {
|
||||
func (b *bisyncTest) mangleListing(text string, golden bool, file string) string {
|
||||
lines := strings.Split(text, eol)
|
||||
|
||||
hasHeader := len(lines) > 0 && strings.HasPrefix(lines[0], bisync.ListingHeader)
|
||||
@ -1114,12 +1385,43 @@ func mangleListing(text string, golden bool) string {
|
||||
return getFile(lines[i]) < getFile(lines[j])
|
||||
})
|
||||
|
||||
// Store hash as golden but ignore when comparing.
|
||||
// parse whether this is Path1 or Path2 (so we can apply per-Fs precision/hash settings)
|
||||
isPath1 := strings.Contains(file, ".path1.lst")
|
||||
f := b.fs2
|
||||
if isPath1 {
|
||||
f = b.fs1
|
||||
}
|
||||
|
||||
// account for differences in backend features when comparing
|
||||
if !golden {
|
||||
for i, s := range lines {
|
||||
// Store hash as golden but ignore when comparing (only if no md5 support).
|
||||
match := regex.FindStringSubmatch(strings.TrimSpace(s))
|
||||
if match != nil && match[2] != "-" {
|
||||
lines[i] = match[1] + "-" + match[3] + match[4]
|
||||
if match != nil && match[2] != "-" && (!b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5)) { // if hash is not empty and either side lacks md5
|
||||
lines[i] = match[1] + "-" + match[3] + match[4] // replace it with "-" for comparison purposes (see #5679)
|
||||
}
|
||||
// account for modtime precision
|
||||
lineRegex := regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
TZ := time.UTC
|
||||
fields := lineRegex.FindStringSubmatch(strings.TrimSuffix(lines[i], "\n"))
|
||||
if fields != nil {
|
||||
sizeVal, sizeErr := strconv.ParseInt(fields[2], 10, 64)
|
||||
if sizeErr == nil {
|
||||
// account for filename encoding differences by normalizing to OS encoding
|
||||
fields[6] = normalizeEncoding(fields[6])
|
||||
timeStr := fields[5]
|
||||
if f.Precision() == fs.ModTimeNotSupported {
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], "-", fields[6])
|
||||
continue
|
||||
}
|
||||
timeVal, timeErr := time.ParseInLocation(timeFormat, timeStr, TZ)
|
||||
if timeErr == nil {
|
||||
timeRound := timeVal.Round(f.Precision() * 2)
|
||||
lines[i] = fmt.Sprintf(lineFormat, fields[1], sizeVal, fields[3], fields[4], timeRound, fields[6])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1163,12 +1465,15 @@ func (b *bisyncTest) newReplacer(mangle bool) *strings.Replacer {
|
||||
b.dataDir + slash, "{datadir/}",
|
||||
b.testDir + slash, "{testdir/}",
|
||||
b.workDir + slash, "{workdir/}",
|
||||
b.fs1.String(), "{path1String}",
|
||||
b.fs2.String(), "{path2String}",
|
||||
b.path1, "{path1/}",
|
||||
b.path2, "{path2/}",
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path1, slash, "/", -1), "/"), "{path1}", // fix windows-specific issue
|
||||
"//?/" + strings.TrimSuffix(strings.Replace(b.path2, slash, "/", -1), "/"), "{path2}",
|
||||
strings.TrimSuffix(b.path1, slash), "{path1}", // ensure it's still recognized without trailing slash
|
||||
strings.TrimSuffix(b.path2, slash), "{path2}",
|
||||
b.workDir, "{workdir}",
|
||||
b.sessionName, "{session}",
|
||||
}
|
||||
if fixSlash {
|
||||
@ -1193,6 +1498,10 @@ func (b *bisyncTest) toGolden(name string) string {
|
||||
name = strings.ReplaceAll(name, b.canonPath1, goldenCanonBase)
|
||||
name = strings.ReplaceAll(name, b.canonPath2, goldenCanonBase)
|
||||
name = strings.TrimSuffix(name, ".sav")
|
||||
|
||||
// normalize unicode so tets are runnable on macOS
|
||||
name = norm.NFC.String(name)
|
||||
|
||||
return name
|
||||
}
|
||||
|
||||
@ -1213,8 +1522,23 @@ func (b *bisyncTest) ensureDir(parent, dir string, optional bool) string {
|
||||
func (b *bisyncTest) listDir(dir string) (names []string) {
|
||||
files, err := os.ReadDir(dir)
|
||||
require.NoError(b.t, err)
|
||||
ignoreIt := func(file string) bool {
|
||||
ignoreList := []string{
|
||||
// ".lst-control", ".lst-dry-control", ".lst-old", ".lst-dry-old",
|
||||
".DS_Store",
|
||||
}
|
||||
for _, s := range ignoreList {
|
||||
if strings.Contains(file, s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
for _, file := range files {
|
||||
names = append(names, filepath.Base(file.Name()))
|
||||
if ignoreIt(file.Name()) {
|
||||
continue
|
||||
}
|
||||
names = append(names, filepath.Base(norm.NFC.String(file.Name())))
|
||||
}
|
||||
// Sort files to ensure comparability.
|
||||
sort.Strings(names)
|
||||
@ -1230,7 +1554,7 @@ func fileType(fileName string) string {
|
||||
return "log"
|
||||
}
|
||||
switch filepath.Ext(fileName) {
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new":
|
||||
case ".lst", ".lst-new", ".lst-err", ".lst-dry", ".lst-dry-new", ".lst-old", ".lst-dry-old", ".lst-control", ".lst-dry-control":
|
||||
return "listing"
|
||||
case ".que":
|
||||
return "queue"
|
||||
@ -1254,3 +1578,36 @@ func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
||||
require.NoError(b.t, err, "writing log file")
|
||||
}
|
||||
}
|
||||
|
||||
// account for filename encoding differences between remotes by normalizing to OS encoding
|
||||
func normalizeEncoding(s string) string {
|
||||
if s == "" || s == "." {
|
||||
return s
|
||||
}
|
||||
nameVal, err := strconv.Unquote(s)
|
||||
if err != nil {
|
||||
nameVal = s
|
||||
}
|
||||
nameVal = filepath.Clean(nameVal)
|
||||
nameVal = encoder.OS.FromStandardPath(nameVal)
|
||||
return strconv.Quote(encoder.OS.ToStandardPath(filepath.ToSlash(nameVal)))
|
||||
}
|
||||
|
||||
func stringToHash(s string) string {
|
||||
ht := hash.MD5
|
||||
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hash unsupported: %v", err)
|
||||
}
|
||||
|
||||
_, err = hasher.Write([]byte(s))
|
||||
if err != nil {
|
||||
fs.Errorf(s, "failed to write to hasher: %v", err)
|
||||
}
|
||||
|
||||
sum, err := hasher.SumString(ht, false)
|
||||
if err != nil {
|
||||
fs.Errorf(s, "hasher returned an error: %v", err)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
271
cmd/bisync/checkfn.go
Normal file
271
cmd/bisync/checkfn.go
Normal file
@ -0,0 +1,271 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var hashType hash.Type
|
||||
var fsrc, fdst fs.Fs
|
||||
var fcrypt *crypt.Fs
|
||||
|
||||
// WhichCheck determines which CheckFn we should use based on the Fs types
|
||||
// It is more robust and accurate than Check because
|
||||
// it will fallback to CryptCheck or DownloadCheck instead of --size-only!
|
||||
// it returns the *operations.CheckOpt with the CheckFn set.
|
||||
func WhichCheck(ctx context.Context, opt *operations.CheckOpt) *operations.CheckOpt {
|
||||
ci := fs.GetConfig(ctx)
|
||||
common := opt.Fsrc.Hashes().Overlap(opt.Fdst.Hashes())
|
||||
|
||||
// note that ci.IgnoreChecksum doesn't change the behavior of Check -- it's just a way to opt-out of cryptcheck/download
|
||||
if common.Count() > 0 || ci.SizeOnly || ci.IgnoreChecksum {
|
||||
// use normal check
|
||||
opt.Check = CheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
FsrcCrypt, srcIsCrypt := opt.Fsrc.(*crypt.Fs)
|
||||
FdstCrypt, dstIsCrypt := opt.Fdst.(*crypt.Fs)
|
||||
|
||||
if (srcIsCrypt && dstIsCrypt) || (!srcIsCrypt && dstIsCrypt) {
|
||||
// if both are crypt or only dst is crypt
|
||||
hashType = FdstCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use cryptcheck
|
||||
fsrc = opt.Fsrc
|
||||
fdst = opt.Fdst
|
||||
fcrypt = FdstCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = CryptCheckFn
|
||||
return opt
|
||||
}
|
||||
} else if srcIsCrypt && !dstIsCrypt {
|
||||
// if only src is crypt
|
||||
hashType = FsrcCrypt.UnWrap().Hashes().GetOne()
|
||||
if hashType != hash.None {
|
||||
// use reverse cryptcheck
|
||||
fsrc = opt.Fdst
|
||||
fdst = opt.Fsrc
|
||||
fcrypt = FsrcCrypt
|
||||
fs.Infof(fdst, "Crypt detected! Using cryptcheck instead of check. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = ReverseCryptCheckFn
|
||||
return opt
|
||||
}
|
||||
}
|
||||
|
||||
// if we've gotten this far, niether check or cryptcheck will work, so use --download
|
||||
fs.Infof(fdst, "Can't compare hashes, so using check --download for safety. (Use --size-only or --ignore-checksum to disable)")
|
||||
opt.Check = DownloadCheckFn
|
||||
return opt
|
||||
}
|
||||
|
||||
// CheckFn is a slightly modified version of Check
|
||||
func CheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
same, ht, err := operations.CheckHashes(ctx, src, dst)
|
||||
if err != nil {
|
||||
return true, false, err
|
||||
}
|
||||
if ht == hash.None {
|
||||
return false, true, nil
|
||||
}
|
||||
if !same {
|
||||
err = fmt.Errorf("%v differ", ht)
|
||||
fs.Errorf(src, "%v", err)
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// CryptCheckFn is a slightly modified version of CryptCheck
|
||||
func CryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
cryptDst := dst.(*crypt.Object)
|
||||
underlyingDst := cryptDst.UnWrap()
|
||||
underlyingHash, err := underlyingDst.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error reading hash from underlying %v: %w", underlyingDst, err)
|
||||
}
|
||||
if underlyingHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
cryptHash, err := fcrypt.ComputeHash(ctx, cryptDst, src, hashType)
|
||||
if err != nil {
|
||||
return true, false, fmt.Errorf("error computing hash: %w", err)
|
||||
}
|
||||
if cryptHash == "" {
|
||||
return false, true, nil
|
||||
}
|
||||
if cryptHash != underlyingHash {
|
||||
err = fmt.Errorf("hashes differ (%s:%s) %q vs (%s:%s) %q", fdst.Name(), fdst.Root(), cryptHash, fsrc.Name(), fsrc.Root(), underlyingHash)
|
||||
fs.Debugf(src, err.Error())
|
||||
// using same error msg as CheckFn so integration tests match
|
||||
err = fmt.Errorf("%v differ", hashType)
|
||||
fs.Errorf(src, err.Error())
|
||||
return true, false, nil
|
||||
}
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
// ReverseCryptCheckFn is like CryptCheckFn except src and dst are switched
|
||||
// result: src is crypt, dst is non-crypt
|
||||
func ReverseCryptCheckFn(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
return CryptCheckFn(ctx, src, dst)
|
||||
}
|
||||
|
||||
// DownloadCheckFn is a slightly modified version of Check with --download
|
||||
func DownloadCheckFn(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) {
|
||||
differ, err = operations.CheckIdenticalDownload(ctx, a, b)
|
||||
if err != nil {
|
||||
return true, true, fmt.Errorf("failed to download: %w", err)
|
||||
}
|
||||
return differ, false, nil
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
opt = WhichCheck(ctxCheck, opt)
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.CheckFn(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// WhichEqual is similar to WhichCheck, but checks a single object.
|
||||
// Returns true if the objects are equal, false if they differ or if we don't know
|
||||
func WhichEqual(ctx context.Context, src, dst fs.Object, Fsrc, Fdst fs.Fs) bool {
|
||||
opt, close, checkopterr := check.GetCheckOpt(Fsrc, Fdst)
|
||||
if checkopterr != nil {
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt = WhichCheck(ctx, opt)
|
||||
differ, noHash, err := opt.Check(ctx, dst, src)
|
||||
if err != nil {
|
||||
fs.Errorf(src, "failed to check: %v", err)
|
||||
return false
|
||||
}
|
||||
if noHash {
|
||||
fs.Errorf(src, "failed to check as hash is missing")
|
||||
return false
|
||||
}
|
||||
return !differ
|
||||
}
|
||||
|
||||
// Replaces the standard Equal func with one that also considers checksum
|
||||
// Note that it also updates the modtime the same way as Sync
|
||||
func (b *bisyncRun) EqualFn(ctx context.Context) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.CheckSum = false // force checksum off so modtime is evaluated if needed
|
||||
// modtime and size settings should already be set correctly for Equal
|
||||
var equalFn operations.EqualFn = func(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool {
|
||||
fs.Debugf(src, "evaluating...")
|
||||
equal := false
|
||||
logger, _ := operations.GetLogger(ctx)
|
||||
// temporarily unset logger, we don't want Equal to duplicate it
|
||||
noop := func(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||
fs.Debugf(src, "equal skipped")
|
||||
}
|
||||
ctxNoLogger := operations.WithLogger(ctx, noop)
|
||||
|
||||
timeSizeEqualFn := func() (equal bool, skipHash bool) { return operations.Equal(ctxNoLogger, src, dst), false } // normally use Equal()
|
||||
if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller {
|
||||
timeSizeEqualFn = func() (equal bool, skipHash bool) { return b.resyncTimeSizeEqual(ctxNoLogger, src, dst) } // but override for --resync-mode older, larger, smaller
|
||||
}
|
||||
skipHash := false // (note that we might skip it anyway based on compare/ht settings)
|
||||
equal, skipHash = timeSizeEqualFn()
|
||||
if equal && !skipHash {
|
||||
whichHashType := func(f fs.Info) hash.Type {
|
||||
ht := getHashType(f.Name())
|
||||
if ht == hash.None && b.opt.Compare.SlowHashSyncOnly && !b.opt.Resync {
|
||||
ht = f.Hashes().GetOne()
|
||||
}
|
||||
return ht
|
||||
}
|
||||
srcHash, _ := src.Hash(ctx, whichHashType(src.Fs()))
|
||||
dstHash, _ := dst.Hash(ctx, whichHashType(dst.Fs()))
|
||||
srcHash, _ = tryDownloadHash(ctx, src, srcHash)
|
||||
dstHash, _ = tryDownloadHash(ctx, dst, dstHash)
|
||||
equal = !hashDiffers(srcHash, dstHash, whichHashType(src.Fs()), whichHashType(dst.Fs()), src.Size(), dst.Size())
|
||||
}
|
||||
if equal {
|
||||
logger(ctx, operations.Match, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are equal")
|
||||
return true
|
||||
}
|
||||
logger(ctx, operations.Differ, src, dst, nil)
|
||||
fs.Debugf(src, "EqualFn: files are NOT equal")
|
||||
return false
|
||||
}
|
||||
return operations.WithEqualFn(ctx, equalFn)
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncTimeSizeEqual(ctxNoLogger context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, skipHash bool) {
|
||||
switch b.opt.ResyncMode {
|
||||
case PreferLarger, PreferSmaller:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if sizeDiffers(path1.Size(), path2.Size()) {
|
||||
winningPath := b.resolveLargerSmaller(path1.Size(), path2.Size(), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// don't need to check/update modtime here, as sizes definitely differ and something will be transferred
|
||||
return b.resyncWinningPathToEqual(winningPath), b.resyncWinningPathToEqual(winningPath) // skip hash check if true
|
||||
}
|
||||
// sizes equal or don't know, so continue to checking time/hash, if applicable
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
case PreferOlder:
|
||||
// note that arg order is path1, path2, regardless of src/dst
|
||||
path1, path2 := b.resyncWhichIsWhich(src, dst)
|
||||
if timeDiffers(ctxNoLogger, path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Fs(), path2.Fs()) {
|
||||
winningPath := b.resolveNewerOlder(path1.ModTime(ctxNoLogger), path2.ModTime(ctxNoLogger), path1.Remote(), path2.Remote(), b.opt.ResyncMode)
|
||||
// if src is winner, proceed with equal to check size/hash and possibly just update dest modtime instead of transferring
|
||||
if !b.resyncWinningPathToEqual(winningPath) {
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
||||
// if dst is winner (and definitely unequal), do not proceed further as we want dst to overwrite src regardless of size difference, and we do not want dest modtime updated
|
||||
return true, true
|
||||
}
|
||||
// times equal or don't know, so continue to checking size/hash, if applicable
|
||||
}
|
||||
return operations.Equal(ctxNoLogger, src, dst), false // note we're back to src/dst, not path1/path2
|
||||
}
|
@ -25,9 +25,13 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// TestFunc allows mocking errors during tests
|
||||
type TestFunc func()
|
||||
|
||||
// Options keep bisync options
|
||||
type Options struct {
|
||||
Resync bool
|
||||
Resync bool // whether or not this is a resync
|
||||
ResyncMode Prefer // which mode to use for resync
|
||||
CheckAccess bool
|
||||
CheckFilename string
|
||||
CheckSync CheckSyncMode
|
||||
@ -37,11 +41,25 @@ type Options struct {
|
||||
Force bool
|
||||
FiltersFile string
|
||||
Workdir string
|
||||
OrigBackupDir string
|
||||
BackupDir1 string
|
||||
BackupDir2 string
|
||||
DryRun bool
|
||||
NoCleanup bool
|
||||
SaveQueues bool // save extra debugging files (test only flag)
|
||||
IgnoreListingChecksum bool
|
||||
Resilient bool
|
||||
Recover bool
|
||||
TestFn TestFunc // test-only option, for mocking errors
|
||||
Compare CompareOpt
|
||||
CompareFlag string
|
||||
DebugName string
|
||||
MaxLock time.Duration
|
||||
ConflictResolve Prefer
|
||||
ConflictLoser ConflictLoserAction
|
||||
ConflictSuffixFlag string
|
||||
ConflictSuffix1 string
|
||||
ConflictSuffix2 string
|
||||
}
|
||||
|
||||
// Default values
|
||||
@ -99,9 +117,14 @@ func (x *CheckSyncMode) Type() string {
|
||||
var Opt Options
|
||||
|
||||
func init() {
|
||||
Opt.MaxLock = 0
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Path1 files may overwrite Path2 versions. Consider using --verbose or --dry-run first.", "")
|
||||
// when adding new flags, remember to also update the rc params:
|
||||
// cmd/bisync/rc.go cmd/bisync/help.go (not docs/content/rc.md)
|
||||
// and the Command line syntax section of docs/content/bisync.md (it doesn't update automatically)
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resync, "resync", "1", Opt.Resync, "Performs the resync run. Equivalent to --resync-mode path1. Consider using --verbose or --dry-run first.", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ResyncMode, "resync-mode", "", "During resync, prefer the version that is: path1, path2, newer, older, larger, smaller (default: path1 if --resync, otherwise none for no resync.)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.CheckAccess, "check-access", "", Opt.CheckAccess, makeHelp("Ensure expected {CHECKFILE} files are found on both Path1 and Path2 filesystems, else abort."), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CheckFilename, "check-filename", "", Opt.CheckFilename, makeHelp("Filename for --check-access (default: {CHECKFILE})"), "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Force, "force", "", Opt.Force, "Bypass --max-delete safety check and run the sync. Consider using with --verbose", "")
|
||||
@ -110,10 +133,24 @@ func init() {
|
||||
flags.BoolVarP(cmdFlags, &Opt.RemoveEmptyDirs, "remove-empty-dirs", "", Opt.RemoveEmptyDirs, "Remove ALL empty directories at the final cleanup step.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.FiltersFile, "filters-file", "", Opt.FiltersFile, "Read filtering patterns from a file", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.Workdir, "workdir", "", Opt.Workdir, makeHelp("Use custom working dir - useful for testing. (default: {WORKDIR})"), "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir1, "backup-dir1", "", Opt.BackupDir1, "--backup-dir for Path1. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.BackupDir2, "backup-dir2", "", Opt.BackupDir2, "--backup-dir for Path2. Must be a non-overlapping path on the same remote.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.DebugName, "debugname", "", Opt.DebugName, "Debug by tracking one file at various points throughout a bisync run (when -v or -vv)", "")
|
||||
flags.BoolVarP(cmdFlags, &tzLocal, "localtime", "", tzLocal, "Use local time in listings (default: UTC)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.NoCleanup, "no-cleanup", "", Opt.NoCleanup, "Retain working files (useful for troubleshooting and testing).", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.IgnoreListingChecksum, "ignore-listing-checksum", "", Opt.IgnoreListingChecksum, "Do not use checksums for listings (add --ignore-checksum to additionally skip post-copy checksum checks)", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Resilient, "resilient", "", Opt.Resilient, "Allow future runs to retry after certain less-serious errors, instead of requiring --resync. Use at your own risk!", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Recover, "recover", "", Opt.Recover, "Automatically recover from interruptions without requiring --resync.", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.CompareFlag, "compare", "", Opt.CompareFlag, "Comma-separated list of bisync-specific compare options ex. 'size,modtime,checksum' (default: 'size,modtime')", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.NoSlowHash, "no-slow-hash", "", Opt.Compare.NoSlowHash, "Ignore listing checksums only on backends where they are slow", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.SlowHashSyncOnly, "slow-hash-sync-only", "", Opt.Compare.SlowHashSyncOnly, "Ignore slow checksums for listings and deltas, but still consider them during sync calls.", "")
|
||||
flags.BoolVarP(cmdFlags, &Opt.Compare.DownloadHash, "download-hash", "", Opt.Compare.DownloadHash, "Compute hash by downloading when otherwise unavailable. (warning: may be slow and use lots of data!)", "")
|
||||
flags.DurationVarP(cmdFlags, &Opt.MaxLock, "max-lock", "", Opt.MaxLock, "Consider lock files older than this to be expired (default: 0 (never expire)) (minimum: 2m)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictResolve, "conflict-resolve", "", "Automatically resolve conflicts by preferring the version that is: "+ConflictResolveList+" (default: none)", "")
|
||||
flags.FVarP(cmdFlags, &Opt.ConflictLoser, "conflict-loser", "", "Action to take on the loser of a sync conflict (when there is a winner) or on both files (when there is no winner): "+ConflictLoserList+" (default: num)", "")
|
||||
flags.StringVarP(cmdFlags, &Opt.ConflictSuffixFlag, "conflict-suffix", "", Opt.ConflictSuffixFlag, "Suffix to use when renaming a --conflict-loser. Can be either one string or two comma-separated strings to assign different suffixes to Path1/Path2. (default: 'conflict')", "")
|
||||
_ = cmdFlags.MarkHidden("debugname")
|
||||
_ = cmdFlags.MarkHidden("localtime")
|
||||
}
|
||||
|
||||
// bisync command definition
|
||||
@ -124,8 +161,11 @@ var commandDefinition = &cobra.Command{
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.58",
|
||||
"groups": "Filter,Copy,Important",
|
||||
"status": "Beta",
|
||||
},
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
// NOTE: avoid putting too much handling here, as it won't apply to the rc.
|
||||
// Generally it's best to put init-type stuff in Bisync() (operations.go)
|
||||
cmd.CheckArgs(2, 2, command, args)
|
||||
fs1, file1, fs2, file2 := cmd.NewFsSrcDstFiles(args)
|
||||
if file1 != "" || file2 != "" {
|
||||
@ -149,7 +189,7 @@ var commandDefinition = &cobra.Command{
|
||||
}
|
||||
}
|
||||
|
||||
fs.Logf(nil, "bisync is EXPERIMENTAL. Don't use in production!")
|
||||
fs.Logf(nil, "bisync is IN BETA. Don't use in production!")
|
||||
cmd.Run(false, true, command, func() error {
|
||||
err := Bisync(ctx, fs1, fs2, &opt)
|
||||
if err == ErrBisyncAborted {
|
||||
|
309
cmd/bisync/compare.go
Normal file
309
cmd/bisync/compare.go
Normal file
@ -0,0 +1,309 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
mutex "sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// CompareOpt describes the Compare options in force
|
||||
type CompareOpt = struct {
|
||||
Modtime bool
|
||||
Size bool
|
||||
Checksum bool
|
||||
HashType1 hash.Type
|
||||
HashType2 hash.Type
|
||||
NoSlowHash bool
|
||||
SlowHashSyncOnly bool
|
||||
SlowHashDetected bool
|
||||
DownloadHash bool
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setCompareDefaults(ctx context.Context) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
|
||||
// defaults
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Checksum = false
|
||||
|
||||
if ci.SizeOnly {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else if ci.CheckSum && !b.opt.IgnoreListingChecksum {
|
||||
b.opt.Compare.Size = true
|
||||
b.opt.Compare.Modtime = false
|
||||
b.opt.Compare.Checksum = true
|
||||
}
|
||||
|
||||
if ci.IgnoreSize {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
|
||||
err = b.setFromCompareFlag(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.fs1.Features().SlowHash || b.fs2.Features().SlowHash {
|
||||
b.opt.Compare.SlowHashDetected = true
|
||||
}
|
||||
if b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
b.setHashType(ci)
|
||||
}
|
||||
|
||||
// Checks and Warnings
|
||||
if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected && b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.Dim, "Ignoring checksums during --resync as --slow-hash-sync-only is set."))
|
||||
ci.CheckSum = false
|
||||
// note not setting b.opt.Compare.Checksum = false as we still want to build listings on the non-slow side, if any
|
||||
} else if b.opt.Compare.Checksum && !ci.CheckSum {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Checksums will be compared for deltas but not during sync as --checksum is not set."))
|
||||
}
|
||||
if b.opt.Compare.Modtime && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Modtime compare was requested but at least one remote does not support it. It is recommended to use --checksum or --size-only instead."))
|
||||
}
|
||||
if (ci.CheckSum || b.opt.Compare.Checksum) && b.opt.IgnoreListingChecksum {
|
||||
if (b.opt.Compare.HashType1 == hash.None || b.opt.Compare.HashType2 == hash.None) && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, `WARNING: Checksum compare was requested but at least one remote does not support checksums (or checksums are being ignored) and --ignore-listing-checksum is set.
|
||||
Ignoring Checksums globally and falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime). Path1 (%s): %s, Path2 (%s): %s`),
|
||||
b.fs1.String(), b.opt.Compare.HashType1.String(), b.fs2.String(), b.opt.Compare.HashType2.String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
b.opt.Compare.Checksum = false
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksum for deltas as --ignore-listing-checksum is set"))
|
||||
// note: --checksum will still affect the internal sync calls
|
||||
}
|
||||
}
|
||||
if !ci.CheckSum && !b.opt.Compare.Checksum && !b.opt.IgnoreListingChecksum {
|
||||
fs.Infof(nil, Color(terminal.Dim, "Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set."))
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
if !b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
return errors.New(Color(terminal.RedFg, "must set a Compare method. (size, modtime, and checksum can't all be false.)"))
|
||||
}
|
||||
|
||||
notSupported := func(label string, value bool, opt *bool) {
|
||||
if value {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: %s is set but bisync does not support it. It will be ignored."), label)
|
||||
*opt = false
|
||||
}
|
||||
}
|
||||
notSupported("--update", ci.UpdateOlder, &ci.UpdateOlder)
|
||||
notSupported("--no-check-dest", ci.NoCheckDest, &ci.NoCheckDest)
|
||||
notSupported("--no-traverse", ci.NoTraverse, &ci.NoTraverse)
|
||||
// TODO: thorough search for other flags that should be on this list...
|
||||
|
||||
prettyprint(b.opt.Compare, "Bisyncing with Comparison Settings", fs.LogLevelInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// returns true if the sizes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func sizeDiffers(a, b int64) bool {
|
||||
if a < 0 || b < 0 {
|
||||
return false
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// returns true if the hashes are definitely different.
|
||||
// returns false if equal, or if either is unknown.
|
||||
func hashDiffers(a, b string, ht1, ht2 hash.Type, size1, size2 int64) bool {
|
||||
if a == "" || b == "" {
|
||||
if ht1 != hash.None && ht2 != hash.None && !(size1 <= 0 || size2 <= 0) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: hash unexpectedly blank despite Fs support (%s, %s) (you may need to --resync!)"), a, b)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if ht1 != ht2 {
|
||||
if !(downloadHash && ((ht1 == hash.MD5 && ht2 == hash.None) || (ht1 == hash.None && ht2 == hash.MD5))) {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "WARNING: Can't compare hashes of different types (%s, %s)"), ht1.String(), ht2.String())
|
||||
return false
|
||||
}
|
||||
}
|
||||
return a != b
|
||||
}
|
||||
|
||||
// chooses hash type, giving priority to types both sides have in common
|
||||
func (b *bisyncRun) setHashType(ci *fs.ConfigInfo) {
|
||||
downloadHash = b.opt.Compare.DownloadHash
|
||||
if b.opt.Compare.NoSlowHash && b.opt.Compare.SlowHashDetected {
|
||||
fs.Infof(nil, "Not checking for common hash as at least one slow hash detected.")
|
||||
} else {
|
||||
common := b.fs1.Hashes().Overlap(b.fs2.Hashes())
|
||||
if common.Count() > 0 && common.GetOne() != hash.None {
|
||||
ht := common.GetOne()
|
||||
b.opt.Compare.HashType1 = ht
|
||||
b.opt.Compare.HashType2 = ht
|
||||
if !b.opt.Compare.SlowHashSyncOnly || !b.opt.Compare.SlowHashDetected {
|
||||
return
|
||||
}
|
||||
} else if b.opt.Compare.SlowHashSyncOnly && b.opt.Compare.SlowHashDetected {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "Ignoring --slow-hash-sync-only and falling back to --no-slow-hash as Path1 and Path2 have no hashes in common."))
|
||||
b.opt.Compare.SlowHashSyncOnly = false
|
||||
b.opt.Compare.NoSlowHash = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
}
|
||||
|
||||
if !b.opt.Compare.DownloadHash && !b.opt.Compare.SlowHashSyncOnly {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "--checksum is in use but Path1 and Path2 have no hashes in common; falling back to --compare modtime,size for sync. (Use --compare size or --size-only to ignore modtime)"))
|
||||
fs.Infof("Path1 hashes", "%v", b.fs1.Hashes().String())
|
||||
fs.Infof("Path2 hashes", "%v", b.fs2.Hashes().String())
|
||||
b.opt.Compare.Modtime = true
|
||||
b.opt.Compare.Size = true
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs1.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path1. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType1 = b.fs1.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType1 != hash.None {
|
||||
fs.Logf(b.fs1, Color(terminal.YellowFg, "will use %s for same-side diffs on Path1 only"), b.opt.Compare.HashType1)
|
||||
}
|
||||
}
|
||||
if (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.fs2.Features().SlowHash {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Slow hash detected on Path2. Will ignore checksum due to slow-hash settings"))
|
||||
b.opt.Compare.HashType1 = hash.None
|
||||
} else {
|
||||
b.opt.Compare.HashType2 = b.fs2.Hashes().GetOne()
|
||||
if b.opt.Compare.HashType2 != hash.None {
|
||||
fs.Logf(b.fs2, Color(terminal.YellowFg, "will use %s for same-side diffs on Path2 only"), b.opt.Compare.HashType2)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.HashType1 == hash.None && b.opt.Compare.HashType2 == hash.None && !b.opt.Compare.DownloadHash {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: Ignoring checksums globally as hashes are ignored or unavailable on both sides."))
|
||||
b.opt.Compare.Checksum = false
|
||||
ci.CheckSum = false
|
||||
b.opt.IgnoreListingChecksum = true
|
||||
}
|
||||
}
|
||||
|
||||
// returns true if the times are definitely different (by more than the modify window).
|
||||
// returns false if equal, within modify window, or if either is unknown.
|
||||
// considers precision per-Fs.
|
||||
func timeDiffers(ctx context.Context, a, b time.Time, fsA, fsB fs.Info) bool {
|
||||
modifyWindow := fs.GetModifyWindow(ctx, fsA, fsB)
|
||||
if modifyWindow == fs.ModTimeNotSupported {
|
||||
return false
|
||||
}
|
||||
if a.IsZero() || b.IsZero() {
|
||||
fs.Logf(fsA, "Fs supports modtime, but modtime is missing")
|
||||
return false
|
||||
}
|
||||
dt := b.Sub(a)
|
||||
if dt < modifyWindow && dt > -modifyWindow {
|
||||
fs.Debugf(a, "modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow)
|
||||
return false
|
||||
}
|
||||
|
||||
fs.Debugf(a, "Modification times differ by %s: %v, %v", dt, a, b)
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
|
||||
if b.opt.CompareFlag == "" {
|
||||
return nil
|
||||
}
|
||||
var CompareFlag CompareOpt // for exlcusions
|
||||
opts := strings.Split(b.opt.CompareFlag, ",")
|
||||
for _, opt := range opts {
|
||||
switch strings.ToLower(strings.TrimSpace(opt)) {
|
||||
case "size":
|
||||
b.opt.Compare.Size = true
|
||||
CompareFlag.Size = true
|
||||
case "modtime":
|
||||
b.opt.Compare.Modtime = true
|
||||
CompareFlag.Modtime = true
|
||||
case "checksum":
|
||||
b.opt.Compare.Checksum = true
|
||||
CompareFlag.Checksum = true
|
||||
default:
|
||||
return fmt.Errorf(Color(terminal.RedFg, "unknown compare option: %s (must be size, modtime, or checksum)"), opt)
|
||||
}
|
||||
}
|
||||
|
||||
// exclusions (override defaults, only if --compare != "")
|
||||
if !CompareFlag.Size {
|
||||
b.opt.Compare.Size = false
|
||||
}
|
||||
if !CompareFlag.Modtime {
|
||||
b.opt.Compare.Modtime = false
|
||||
}
|
||||
if !CompareFlag.Checksum {
|
||||
b.opt.Compare.Checksum = false
|
||||
}
|
||||
|
||||
// override sync flags to match
|
||||
ci := fs.GetConfig(ctx)
|
||||
if b.opt.Compare.Checksum {
|
||||
ci.CheckSum = true
|
||||
}
|
||||
if b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.CheckSum = false
|
||||
}
|
||||
if !b.opt.Compare.Size {
|
||||
ci.IgnoreSize = true
|
||||
}
|
||||
if !b.opt.Compare.Modtime {
|
||||
ci.UseServerModTime = true
|
||||
}
|
||||
if b.opt.Compare.Size && !b.opt.Compare.Modtime && !b.opt.Compare.Checksum {
|
||||
ci.SizeOnly = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// downloadHash is true if we should attempt to compute hash by downloading when otherwise unavailable
|
||||
var downloadHash bool
|
||||
var downloadHashWarn mutex.Once
|
||||
var firstDownloadHash mutex.Once
|
||||
|
||||
func tryDownloadHash(ctx context.Context, o fs.DirEntry, hashVal string) (string, error) {
|
||||
if hashVal != "" || !downloadHash {
|
||||
return hashVal, nil
|
||||
}
|
||||
obj, ok := o.(fs.Object)
|
||||
if !ok {
|
||||
fs.Infof(o, "failed to download hash -- not an fs.Object")
|
||||
return hashVal, fs.ErrorObjectNotFound
|
||||
}
|
||||
if o.Size() < 0 {
|
||||
downloadHashWarn.Do(func() {
|
||||
fs.Logf(o, Color(terminal.YellowFg, "Skipping hash download as checksum not reliable with files of unknown length."))
|
||||
})
|
||||
fs.Debugf(o, "Skipping hash download as checksum not reliable with files of unknown length.")
|
||||
return hashVal, hash.ErrUnsupported
|
||||
}
|
||||
|
||||
firstDownloadHash.Do(func() {
|
||||
fs.Infof(obj.Fs().Name(), Color(terminal.Dim, "Downloading hashes..."))
|
||||
})
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "computing hash with --download-hash")
|
||||
defer func() {
|
||||
tr.Done(ctx, nil)
|
||||
}()
|
||||
|
||||
sum, err := operations.HashSum(ctx, hash.MD5, false, true, obj)
|
||||
if err != nil {
|
||||
fs.Infof(o, "DownloadHash -- hash: %v, err: %v", sum, err)
|
||||
} else {
|
||||
fs.Debugf(o, "DownloadHash -- hash: %v", sum)
|
||||
}
|
||||
return sum, err
|
||||
}
|
@ -3,19 +3,18 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/cmd/check"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// delta
|
||||
@ -26,14 +25,17 @@ const (
|
||||
deltaNew delta = 1 << iota
|
||||
deltaNewer
|
||||
deltaOlder
|
||||
deltaSize
|
||||
deltaLarger
|
||||
deltaSmaller
|
||||
deltaHash
|
||||
deltaDeleted
|
||||
)
|
||||
|
||||
const (
|
||||
deltaModified delta = deltaNewer | deltaOlder | deltaSize | deltaHash | deltaDeleted
|
||||
deltaOther delta = deltaNew | deltaNewer | deltaOlder
|
||||
deltaSize delta = deltaLarger | deltaSmaller
|
||||
deltaTime delta = deltaNewer | deltaOlder
|
||||
deltaModified delta = deltaTime | deltaSize | deltaHash
|
||||
deltaOther delta = deltaNew | deltaTime | deltaSize | deltaHash
|
||||
)
|
||||
|
||||
func (d delta) is(cond delta) bool {
|
||||
@ -43,6 +45,9 @@ func (d delta) is(cond delta) bool {
|
||||
// deltaSet
|
||||
type deltaSet struct {
|
||||
deltas map[string]delta
|
||||
size map[string]int64
|
||||
time map[string]time.Time
|
||||
hash map[string]string
|
||||
opt *Options
|
||||
fs fs.Fs // base filesystem
|
||||
msg string // filesystem name for logging
|
||||
@ -74,71 +79,77 @@ func (ds *deltaSet) printStats() {
|
||||
}
|
||||
nAll := len(ds.deltas)
|
||||
nNew := 0
|
||||
nMod := 0
|
||||
nTime := 0
|
||||
nNewer := 0
|
||||
nOlder := 0
|
||||
nSize := 0
|
||||
nLarger := 0
|
||||
nSmaller := 0
|
||||
nHash := 0
|
||||
nDeleted := 0
|
||||
for _, d := range ds.deltas {
|
||||
if d.is(deltaNew) {
|
||||
nNew++
|
||||
}
|
||||
if d.is(deltaModified) {
|
||||
nMod++
|
||||
}
|
||||
if d.is(deltaTime) {
|
||||
nTime++
|
||||
}
|
||||
if d.is(deltaNewer) {
|
||||
nNewer++
|
||||
}
|
||||
if d.is(deltaOlder) {
|
||||
nOlder++
|
||||
}
|
||||
if d.is(deltaSize) {
|
||||
nSize++
|
||||
}
|
||||
if d.is(deltaLarger) {
|
||||
nLarger++
|
||||
}
|
||||
if d.is(deltaSmaller) {
|
||||
nSmaller++
|
||||
}
|
||||
if d.is(deltaHash) {
|
||||
nHash++
|
||||
}
|
||||
if d.is(deltaDeleted) {
|
||||
nDeleted++
|
||||
}
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: %4d new, %4d newer, %4d older, %4d deleted",
|
||||
ds.msg, nAll, nNew, nNewer, nOlder, nDeleted)
|
||||
}
|
||||
|
||||
// check potential conflicts (to avoid renaming if already identical)
|
||||
func (b *bisyncRun) checkconflicts(ctxCheck context.Context, filterCheck *filter.Filter, fs1, fs2 fs.Fs) (bilib.Names, error) {
|
||||
matches := bilib.Names{}
|
||||
if filterCheck.HaveFilesFrom() {
|
||||
fs.Debugf(nil, "There are potential conflicts to check.")
|
||||
|
||||
opt, close, checkopterr := check.GetCheckOpt(b.fs1, b.fs2)
|
||||
if checkopterr != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
fs.Debugf(nil, "GetCheckOpt error: %v", checkopterr)
|
||||
return matches, checkopterr
|
||||
}
|
||||
defer close()
|
||||
|
||||
opt.Match = new(bytes.Buffer)
|
||||
|
||||
// TODO: consider using custom CheckFn to act like cryptcheck, if either fs is a crypt remote and -c has been passed
|
||||
// note that cryptCheck() is not currently exported
|
||||
|
||||
fs.Infof(nil, "Checking potential conflicts...")
|
||||
check := operations.Check(ctxCheck, opt)
|
||||
fs.Infof(nil, "Finished checking the potential conflicts. %s", check)
|
||||
|
||||
//reset error count, because we don't want to count check errors as bisync errors
|
||||
accounting.Stats(ctxCheck).ResetErrors()
|
||||
|
||||
//return the list of identical files to check against later
|
||||
if len(fmt.Sprint(opt.Match)) > 0 {
|
||||
matches = bilib.ToNames(strings.Split(fmt.Sprint(opt.Match), "\n"))
|
||||
}
|
||||
if matches.NotEmpty() {
|
||||
fs.Debugf(nil, "The following potential conflicts were determined to be identical. %v", matches)
|
||||
} else {
|
||||
fs.Debugf(nil, "None of the conflicts were determined to be identical.")
|
||||
}
|
||||
|
||||
if nAll != nNew+nMod+nDeleted {
|
||||
fs.Errorf(nil, "something doesn't add up! %4d != %4d + %4d + %4d", nAll, nNew, nMod, nDeleted)
|
||||
}
|
||||
fs.Infof(nil, "%s: %4d changes: "+Color(terminal.GreenFg, "%4d new")+", "+Color(terminal.YellowFg, "%4d modified")+", "+Color(terminal.RedFg, "%4d deleted"),
|
||||
ds.msg, nAll, nNew, nMod, nDeleted)
|
||||
if nMod > 0 {
|
||||
details := []string{}
|
||||
if nTime > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d newer"), nNewer))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d older"), nOlder))
|
||||
}
|
||||
if nSize > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d larger"), nLarger))
|
||||
details = append(details, fmt.Sprintf(Color(terminal.BlueFg, "%4d smaller"), nSmaller))
|
||||
}
|
||||
if nHash > 0 {
|
||||
details = append(details, fmt.Sprintf(Color(terminal.CyanFg, "%4d hash differs"), nHash))
|
||||
}
|
||||
if (nNewer+nOlder != nTime) || (nLarger+nSmaller != nSize) || (nMod > nTime+nSize+nHash) {
|
||||
fs.Errorf(nil, "something doesn't add up!")
|
||||
}
|
||||
|
||||
fs.Infof(nil, "(%s: %s)", Color(terminal.YellowFg, "Modified"), strings.Join(details, ", "))
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// findDeltas
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newListing, msg string) (ds *deltaSet, err error) {
|
||||
var old, now *fileList
|
||||
func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing string, now *fileList, msg string) (ds *deltaSet, err error) {
|
||||
var old *fileList
|
||||
newListing := oldListing + "-new"
|
||||
|
||||
old, err = b.loadListing(oldListing)
|
||||
if err != nil {
|
||||
@ -150,7 +161,6 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
return
|
||||
}
|
||||
|
||||
now, err = b.makeListing(fctx, f, newListing)
|
||||
if err == nil {
|
||||
err = b.checkListing(now, newListing, "current "+msg)
|
||||
}
|
||||
@ -160,6 +170,9 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
|
||||
ds = &deltaSet{
|
||||
deltas: map[string]delta{},
|
||||
size: map[string]int64{},
|
||||
time: map[string]time.Time{},
|
||||
hash: map[string]string{},
|
||||
fs: f,
|
||||
msg: msg,
|
||||
oldCount: len(old.list),
|
||||
@ -168,26 +181,75 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
}
|
||||
|
||||
for _, file := range old.list {
|
||||
// REMEMBER: this section is only concerned with comparing listings from the same side (not different sides)
|
||||
d := deltaZero
|
||||
s := int64(0)
|
||||
h := ""
|
||||
var t time.Time
|
||||
if !now.has(file) {
|
||||
b.indent(msg, file, "File was deleted")
|
||||
b.indent(msg, file, Color(terminal.RedFg, "File was deleted"))
|
||||
ds.deleted++
|
||||
d |= deltaDeleted
|
||||
} else {
|
||||
if old.getTime(file) != now.getTime(file) {
|
||||
if old.beforeOther(now, file) {
|
||||
b.indent(msg, file, "File is newer")
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
b.indent(msg, file, "File is OLDER")
|
||||
d |= deltaOlder
|
||||
// skip dirs here, as we only care if they are new/deleted, not newer/older
|
||||
if !now.isDir(file) {
|
||||
whatchanged := []string{}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getSize(file), now.getSize(file))
|
||||
if now.getSize(file) > old.getSize(file) {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (larger)"))
|
||||
d |= deltaLarger
|
||||
} else {
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "size (smaller)"))
|
||||
d |= deltaSmaller
|
||||
}
|
||||
s = now.getSize(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(fctx, old.getTime(file), now.getTime(file), f, f) {
|
||||
if old.beforeOther(now, file) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (newer)"))
|
||||
d |= deltaNewer
|
||||
} else { // Current version is older than prior sync.
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getTime(file), now.getTime(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "time (older)"))
|
||||
d |= deltaOlder
|
||||
}
|
||||
t = now.getTime(file)
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
if hashDiffers(old.getHash(file), now.getHash(file), old.hash, now.hash, old.getSize(file), now.getSize(file)) {
|
||||
fs.Debugf(file, "(old: %v current: %v)", old.getHash(file), now.getHash(file))
|
||||
whatchanged = append(whatchanged, Color(terminal.MagentaFg, "hash"))
|
||||
d |= deltaHash
|
||||
h = now.getHash(file)
|
||||
}
|
||||
}
|
||||
// concat changes and print log
|
||||
if d.is(deltaModified) {
|
||||
summary := fmt.Sprintf(Color(terminal.YellowFg, "File changed: %s"), strings.Join(whatchanged, ", "))
|
||||
b.indent(msg, file, summary)
|
||||
}
|
||||
}
|
||||
// TODO Compare sizes and hashes
|
||||
}
|
||||
|
||||
if d.is(deltaModified) {
|
||||
ds.deltas[file] = d
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = s
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = t
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = h
|
||||
}
|
||||
} else if d.is(deltaDeleted) {
|
||||
ds.deltas[file] = d
|
||||
} else {
|
||||
// Once we've found at least one unchanged file,
|
||||
// we know that not everything has changed,
|
||||
@ -198,8 +260,17 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
|
||||
for _, file := range now.list {
|
||||
if !old.has(file) {
|
||||
b.indent(msg, file, "File is new")
|
||||
b.indent(msg, file, Color(terminal.GreenFg, "File is new"))
|
||||
ds.deltas[file] = deltaNew
|
||||
if b.opt.Compare.Size {
|
||||
ds.size[file] = now.getSize(file)
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
ds.time[file] = now.getTime(file)
|
||||
}
|
||||
if b.opt.Compare.Checksum {
|
||||
ds.hash[file] = now.getHash(file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -217,7 +288,7 @@ func (b *bisyncRun) findDeltas(fctx context.Context, f fs.Fs, oldListing, newLis
|
||||
}
|
||||
|
||||
// applyDeltas
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, err error) {
|
||||
func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (changes1, changes2 bool, results2to1, results1to2 []Results, queues queues, err error) {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
@ -226,9 +297,17 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
delete1 := bilib.Names{}
|
||||
delete2 := bilib.Names{}
|
||||
handled := bilib.Names{}
|
||||
renameSkipped := bilib.Names{}
|
||||
deletedonboth := bilib.Names{}
|
||||
skippedDirs1 := newFileList()
|
||||
skippedDirs2 := newFileList()
|
||||
b.renames = renames{}
|
||||
|
||||
ctxMove := b.opt.setDryRun(ctx)
|
||||
|
||||
// update AliasMap for deleted files, as march does not know about them
|
||||
b.updateAliases(ctx, ds1, ds2)
|
||||
|
||||
// efficient isDir check
|
||||
// we load the listing just once and store only the dirs
|
||||
dirs1, dirs1Err := b.listDirsOnly(1)
|
||||
@ -259,14 +338,32 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
ctxCheck, filterCheck := filter.AddConfig(ctxNew)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
d1 := ds1.deltas[file]
|
||||
if d1.is(deltaOther) {
|
||||
d2 := ds2.deltas[file]
|
||||
d2, in2 := ds2.deltas[file]
|
||||
file2 := file
|
||||
if !in2 && file != alias {
|
||||
d2 = ds2.deltas[alias]
|
||||
file2 = alias
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
if err := filterCheck.AddFile(file); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
// if size or hash differ, skip this, as we already know they're not equal
|
||||
if (b.opt.Compare.Size && sizeDiffers(ds1.size[file], ds2.size[file2])) ||
|
||||
(b.opt.Compare.Checksum && hashDiffers(ds1.hash[file], ds2.hash[file2], b.opt.Compare.HashType1, b.opt.Compare.HashType2, ds1.size[file], ds2.size[file2])) {
|
||||
fs.Debugf(file, "skipping equality check as size/hash definitely differ")
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", file)
|
||||
checkit := func(filename string) {
|
||||
if err := filterCheck.AddFile(filename); err != nil {
|
||||
fs.Debugf(nil, "Non-critical error adding file to list of potential conflicts to check: %s", err)
|
||||
} else {
|
||||
fs.Debugf(nil, "Added file to list of potential conflicts to check: %s", filename)
|
||||
}
|
||||
}
|
||||
checkit(file)
|
||||
if file != alias {
|
||||
checkit(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -276,12 +373,17 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
matches, err := b.checkconflicts(ctxCheck, filterCheck, b.fs1, b.fs2)
|
||||
|
||||
for _, file := range ds1.sort() {
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + file
|
||||
p2 := path2 + file
|
||||
p2 := path2 + alias
|
||||
d1 := ds1.deltas[file]
|
||||
|
||||
if d1.is(deltaOther) {
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
if !in2 && file != alias {
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(file)
|
||||
@ -293,30 +395,46 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
b.indent("!WARNING", file, "New or changed in both paths")
|
||||
|
||||
//if files are identical, leave them alone instead of renaming
|
||||
if dirs1.has(file) && dirs2.has(file) {
|
||||
fs.Debugf(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
if (dirs1.has(file) || dirs1.has(alias)) && (dirs2.has(file) || dirs2.has(alias)) {
|
||||
fs.Infof(nil, "This is a directory, not a file. Skipping equality check and will not rename: %s", file)
|
||||
ls1.getPut(file, skippedDirs1)
|
||||
ls2.getPut(file, skippedDirs2)
|
||||
b.debugFn(file, func() {
|
||||
b.debug(file, fmt.Sprintf("deltas dir: %s, ls1 has name?: %v, ls2 has name?: %v", file, ls1.has(b.DebugName), ls2.has(b.DebugName)))
|
||||
})
|
||||
} else {
|
||||
equal := matches.Has(file)
|
||||
if !equal {
|
||||
equal = matches.Has(alias)
|
||||
}
|
||||
if equal {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
if ciCheck.FixCase && file != alias {
|
||||
// the content is equal but filename still needs to be FixCase'd, so copy1to2
|
||||
// the Path1 version is deemed "correct" in this scenario
|
||||
fs.Infof(alias, "Files are equal but will copy anyway to fix case to %s", file)
|
||||
copy1to2.Add(file)
|
||||
} else if b.opt.Compare.Modtime && timeDiffers(ctx, ls1.getTime(ls1.getTryAlias(file, alias)), ls2.getTime(ls2.getTryAlias(file, alias)), b.fs1, b.fs2) {
|
||||
fs.Infof(file, "Files are equal but will copy anyway to update modtime (will not rename)")
|
||||
if ls1.getTime(ls1.getTryAlias(file, alias)).Before(ls2.getTime(ls2.getTryAlias(file, alias))) {
|
||||
// Path2 is newer
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(ls2.getTryAlias(file, alias))
|
||||
} else {
|
||||
// Path1 is newer
|
||||
b.indent("Path1", p2, "Queue copy to Path2")
|
||||
copy1to2.Add(ls1.getTryAlias(file, alias))
|
||||
}
|
||||
} else {
|
||||
fs.Infof(nil, "Files are equal! Skipping: %s", file)
|
||||
renameSkipped.Add(file)
|
||||
renameSkipped.Add(alias)
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(nil, "Files are NOT equal: %s", file)
|
||||
b.indent("!Path1", p1+"..path1", "Renaming Path1 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs1, b.fs1, file+"..path1", file); err != nil {
|
||||
err = fmt.Errorf("path1 rename failed for %s: %w", p1, err)
|
||||
b.critical = true
|
||||
err = b.resolve(ctxMove, path1, path2, file, alias, &renameSkipped, ©1to2, ©2to1, ds1, ds2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b.indent("!Path1", p2+"..path1", "Queue copy to Path2")
|
||||
copy1to2.Add(file + "..path1")
|
||||
|
||||
b.indent("!Path2", p2+"..path2", "Renaming Path2 copy")
|
||||
if err = operations.MoveFile(ctxMove, b.fs2, b.fs2, file+"..path2", file); err != nil {
|
||||
err = fmt.Errorf("path2 rename failed for %s: %w", file, err)
|
||||
return
|
||||
}
|
||||
b.indent("!Path2", p1+"..path2", "Queue copy to Path1")
|
||||
copy2to1.Add(file + "..path2")
|
||||
}
|
||||
}
|
||||
handled.Add(file)
|
||||
@ -324,24 +442,37 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
} else {
|
||||
// Path1 deleted
|
||||
d2, in2 := ds2.deltas[file]
|
||||
// try looking under alternate name
|
||||
fs.Debugf(file, "alias: %s, in2: %v", alias, in2)
|
||||
if !in2 && file != alias {
|
||||
fs.Debugf(file, "looking for alias: %s", alias)
|
||||
d2, in2 = ds2.deltas[alias]
|
||||
if in2 {
|
||||
fs.Debugf(file, "detected alias: %s", alias)
|
||||
}
|
||||
}
|
||||
if !in2 {
|
||||
b.indent("Path2", p2, "Queue delete")
|
||||
delete2.Add(file)
|
||||
copy1to2.Add(file)
|
||||
} else if d2.is(deltaOther) {
|
||||
b.indent("Path2", p1, "Queue copy to Path1")
|
||||
copy2to1.Add(file)
|
||||
handled.Add(file)
|
||||
} else if d2.is(deltaDeleted) {
|
||||
handled.Add(file)
|
||||
deletedonboth.Add(file)
|
||||
deletedonboth.Add(alias)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range ds2.sort() {
|
||||
p1 := path1 + file
|
||||
alias := b.aliases.Alias(file)
|
||||
p1 := path1 + alias
|
||||
d2 := ds2.deltas[file]
|
||||
|
||||
if handled.Has(file) {
|
||||
if handled.Has(file) || handled.Has(alias) {
|
||||
continue
|
||||
}
|
||||
if d2.is(deltaOther) {
|
||||
@ -351,58 +482,68 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
||||
// Deleted
|
||||
b.indent("Path1", p1, "Queue delete")
|
||||
delete1.Add(file)
|
||||
copy2to1.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
// Do the batch operation
|
||||
if copy2to1.NotEmpty() {
|
||||
if copy2to1.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes1 = true
|
||||
b.indent("Path2", "Path1", "Do queued copies to")
|
||||
err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
if err != nil {
|
||||
ctx = b.setBackupDir(ctx, 1)
|
||||
results2to1, err = b.fastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1")
|
||||
|
||||
// retries, if any
|
||||
results2to1, err = b.retryFastCopy(ctx, b.fs2, b.fs1, copy2to1, "copy2to1", results2to1, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs1, copy2to1, dirs2, &results2to1, "make")
|
||||
}
|
||||
|
||||
if copy1to2.NotEmpty() {
|
||||
if copy1to2.NotEmpty() && !b.InGracefulShutdown {
|
||||
changes2 = true
|
||||
b.indent("Path1", "Path2", "Do queued copies to")
|
||||
err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
if err != nil {
|
||||
ctx = b.setBackupDir(ctx, 2)
|
||||
results1to2, err = b.fastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2")
|
||||
|
||||
// retries, if any
|
||||
results1to2, err = b.retryFastCopy(ctx, b.fs1, b.fs2, copy1to2, "copy1to2", results1to2, err)
|
||||
|
||||
if !b.InGracefulShutdown && err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//copy empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, "make")
|
||||
b.syncEmptyDirs(ctx, b.fs2, copy1to2, dirs1, &results1to2, "make")
|
||||
}
|
||||
|
||||
if delete1.NotEmpty() {
|
||||
changes1 = true
|
||||
b.indent("", "Path1", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs1, delete1, "delete1")
|
||||
if err != nil {
|
||||
if delete1.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete1, "delete1"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path2 to path1 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, "remove")
|
||||
b.syncEmptyDirs(ctx, b.fs1, delete1, dirs1, &results2to1, "remove")
|
||||
}
|
||||
|
||||
if delete2.NotEmpty() {
|
||||
changes2 = true
|
||||
b.indent("", "Path2", "Do queued deletes on")
|
||||
err = b.fastDelete(ctx, b.fs2, delete2, "delete2")
|
||||
if err != nil {
|
||||
if delete2.NotEmpty() && !b.InGracefulShutdown {
|
||||
if err = b.saveQueue(delete2, "delete2"); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//propagate deletions of empty dirs from path1 to path2 (if --create-empty-src-dirs)
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, "remove")
|
||||
b.syncEmptyDirs(ctx, b.fs2, delete2, dirs2, &results1to2, "remove")
|
||||
}
|
||||
|
||||
queues.copy1to2 = copy1to2
|
||||
queues.copy2to1 = copy2to1
|
||||
queues.renameSkipped = renameSkipped
|
||||
queues.deletedonboth = deletedonboth
|
||||
queues.skippedDirs1 = skippedDirs1
|
||||
queues.skippedDirs2 = skippedDirs2
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -424,3 +565,65 @@ func (ds *deltaSet) excessDeletes() bool {
|
||||
maxDelete, ds.deleted, ds.oldCount, ds.msg, quotePath(bilib.FsPath(ds.fs)))
|
||||
return true
|
||||
}
|
||||
|
||||
// normally we build the AliasMap from march results,
|
||||
// however, march does not know about deleted files, so need to manually check them for aliases
|
||||
func (b *bisyncRun) updateAliases(ctx context.Context, ds1, ds2 *deltaSet) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
// skip if not needed
|
||||
if ci.NoUnicodeNormalization && !ci.IgnoreCaseSync && !b.fs1.Features().CaseInsensitive && !b.fs2.Features().CaseInsensitive {
|
||||
return
|
||||
}
|
||||
if ds1.deleted < 1 && ds2.deleted < 1 {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "Updating AliasMap")
|
||||
|
||||
transform := func(s string) string {
|
||||
if !ci.NoUnicodeNormalization {
|
||||
s = norm.NFC.String(s)
|
||||
}
|
||||
// note: march only checks the dest, but we check both here
|
||||
if ci.IgnoreCaseSync || b.fs1.Features().CaseInsensitive || b.fs2.Features().CaseInsensitive {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
delMap1 := map[string]string{} // [transformedname]originalname
|
||||
delMap2 := map[string]string{} // [transformedname]originalname
|
||||
fullMap1 := map[string]string{} // [transformedname]originalname
|
||||
fullMap2 := map[string]string{} // [transformedname]originalname
|
||||
|
||||
for _, name := range ls1.list {
|
||||
fullMap1[transform(name)] = name
|
||||
}
|
||||
for _, name := range ls2.list {
|
||||
fullMap2[transform(name)] = name
|
||||
}
|
||||
|
||||
addDeletes := func(ds *deltaSet, delMap, fullMap map[string]string) {
|
||||
for _, file := range ds.sort() {
|
||||
d := ds.deltas[file]
|
||||
if d.is(deltaDeleted) {
|
||||
delMap[transform(file)] = file
|
||||
fullMap[transform(file)] = file
|
||||
}
|
||||
}
|
||||
}
|
||||
addDeletes(ds1, delMap1, fullMap1)
|
||||
addDeletes(ds2, delMap2, fullMap2)
|
||||
|
||||
addAliases := func(delMap, fullMap map[string]string) {
|
||||
for transformedname, name := range delMap {
|
||||
matchedName, found := fullMap[transformedname]
|
||||
if found && name != matchedName {
|
||||
fs.Debugf(name, "adding alias %s", matchedName)
|
||||
b.aliases.Add(name, matchedName)
|
||||
}
|
||||
}
|
||||
}
|
||||
addAliases(delMap1, fullMap2)
|
||||
addAliases(delMap2, fullMap1)
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ func makeHelp(help string) string {
|
||||
"|", "`",
|
||||
"{MAXDELETE}", strconv.Itoa(DefaultMaxDelete),
|
||||
"{CHECKFILE}", DefaultCheckFilename,
|
||||
"{WORKDIR}", DefaultWorkdir,
|
||||
// "{WORKDIR}", DefaultWorkdir,
|
||||
)
|
||||
return replacer.Replace(help)
|
||||
}
|
||||
@ -37,7 +37,9 @@ var rcHelp = makeHelp(`This takes the following parameters
|
||||
- ignoreListingChecksum - Do not use checksums for listings
|
||||
- resilient - Allow future runs to retry after certain less-serious errors, instead of requiring resync.
|
||||
Use at your own risk!
|
||||
- workdir - server directory for history files (default: {WORKDIR})
|
||||
- workdir - server directory for history files (default: |~/.cache/rclone/bisync|)
|
||||
- backupdir1 - --backup-dir for Path1. Must be a non-overlapping path on the same remote.
|
||||
- backupdir2 - --backup-dir for Path2. Must be a non-overlapping path on the same remote.
|
||||
- noCleanup - retain working files
|
||||
|
||||
See [bisync command help](https://rclone.org/commands/rclone_bisync/)
|
||||
@ -54,5 +56,10 @@ On each successive run it will:
|
||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is **in beta** and is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
|
@ -5,18 +5,23 @@ package bisync
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// ListingHeader defines first line of a listing
|
||||
@ -32,7 +37,7 @@ const ListingHeader = "# bisync listing v1 from"
|
||||
// id: "-" (reserved)
|
||||
const lineFormat = "%s %8d %s %s %s %q\n"
|
||||
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
var lineRegex = regexp.MustCompile(`^(\S) +(-?\d+) (\S+) (\S+) (\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d\.\d{9}[+-]\d{4}) (".+")$`)
|
||||
|
||||
// timeFormat defines time format used in listings
|
||||
const timeFormat = "2006-01-02T15:04:05.000000000-0700"
|
||||
@ -65,27 +70,73 @@ func newFileList() *fileList {
|
||||
}
|
||||
|
||||
func (ls *fileList) empty() bool {
|
||||
if ls == nil {
|
||||
return true
|
||||
}
|
||||
return len(ls.list) == 0
|
||||
}
|
||||
|
||||
func (ls *fileList) has(file string) bool {
|
||||
if file == "" {
|
||||
fs.Debugf(nil, "called ls.has() with blank string")
|
||||
return false
|
||||
}
|
||||
_, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
_, found = ls.info[file]
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func (ls *fileList) get(file string) *fileInfo {
|
||||
return ls.info[file]
|
||||
info, found := ls.info[file]
|
||||
if !found {
|
||||
//try unquoting
|
||||
file, _ = strconv.Unquote(`"` + file + `"`)
|
||||
info = ls.info[fmt.Sprint(file)]
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, time time.Time, hash, id string, flags string) {
|
||||
// copy file from ls to dest
|
||||
func (ls *fileList) getPut(file string, dest *fileList) {
|
||||
f := ls.get(file)
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
|
||||
func (ls *fileList) getPutAll(dest *fileList) {
|
||||
for file, f := range ls.info {
|
||||
dest.put(file, f.size, f.time, f.hash, f.id, f.flags)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) remove(file string) {
|
||||
if ls.has(file) {
|
||||
ls.list = slices.Delete(ls.list, slices.Index(ls.list, file), slices.Index(ls.list, file)+1)
|
||||
delete(ls.info, file)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) put(file string, size int64, modtime time.Time, hash, id string, flags string) {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
fi.size = size
|
||||
fi.time = time
|
||||
// if already have higher precision of same time, avoid overwriting it
|
||||
if fi.time != modtime {
|
||||
if modtime.Before(fi.time) && fi.time.Sub(modtime) < time.Second {
|
||||
modtime = fi.time
|
||||
}
|
||||
}
|
||||
fi.time = modtime
|
||||
fi.hash = hash
|
||||
fi.id = id
|
||||
fi.flags = flags
|
||||
} else {
|
||||
fi = &fileInfo{
|
||||
size: size,
|
||||
time: time,
|
||||
time: modtime,
|
||||
hash: hash,
|
||||
id: id,
|
||||
flags: flags,
|
||||
@ -95,6 +146,15 @@ func (ls *fileList) put(file string, size int64, time time.Time, hash, id string
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *fileList) getTryAlias(file, alias string) string {
|
||||
if ls.has(file) {
|
||||
return file
|
||||
} else if ls.has(alias) {
|
||||
return alias
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ls *fileList) getTime(file string) time.Time {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
@ -103,6 +163,59 @@ func (ls *fileList) getTime(file string) time.Time {
|
||||
return fi.time
|
||||
}
|
||||
|
||||
func (ls *fileList) getSize(file string) int64 {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return 0
|
||||
}
|
||||
return fi.size
|
||||
}
|
||||
|
||||
func (ls *fileList) getHash(file string) string {
|
||||
fi := ls.get(file)
|
||||
if fi == nil {
|
||||
return ""
|
||||
}
|
||||
return fi.hash
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fileInfoEqual(file1, file2 string, ls1, ls2 *fileList) bool {
|
||||
equal := true
|
||||
if ls1.isDir(file1) && ls2.isDir(file2) {
|
||||
return equal
|
||||
}
|
||||
if b.opt.Compare.Size {
|
||||
if sizeDiffers(ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Size not equal in listing. Path1: %v, Path2: %v", ls1.getSize(file1), ls2.getSize(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Modtime {
|
||||
if timeDiffers(b.fctx, ls1.getTime(file1), ls2.getTime(file2), b.fs1, b.fs2) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Modtime not equal in listing. Path1: %v, Path2: %v", ls1.getTime(file1), ls2.getTime(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
if b.opt.Compare.Checksum && !ignoreListingChecksum {
|
||||
if hashDiffers(ls1.getHash(file1), ls2.getHash(file2), b.opt.Compare.HashType1, b.opt.Compare.HashType2, ls1.getSize(file1), ls2.getSize(file2)) {
|
||||
b.indent("ERROR", file1, fmt.Sprintf("Checksum not equal in listing. Path1: %v, Path2: %v", ls1.getHash(file1), ls2.getHash(file2)))
|
||||
equal = false
|
||||
}
|
||||
}
|
||||
return equal
|
||||
}
|
||||
|
||||
// also returns false if not found
|
||||
func (ls *fileList) isDir(file string) bool {
|
||||
fi := ls.get(file)
|
||||
if fi != nil {
|
||||
if fi.flags == "d" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ls *fileList) beforeOther(other *fileList, file string) bool {
|
||||
thisTime := ls.getTime(file)
|
||||
thatTime := other.getTime(file)
|
||||
@ -120,12 +233,20 @@ func (ls *fileList) afterTime(file string, time time.Time) bool {
|
||||
return fi.time.After(time)
|
||||
}
|
||||
|
||||
// sort by path name
|
||||
func (ls *fileList) sort() {
|
||||
sort.SliceStable(ls.list, func(i, j int) bool {
|
||||
return ls.list[i] < ls.list[j]
|
||||
})
|
||||
}
|
||||
|
||||
// save will save listing to a file.
|
||||
func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
file, err := os.Create(listing)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ls.sort()
|
||||
|
||||
hashName := ""
|
||||
if ls.hash != hash.None {
|
||||
@ -172,7 +293,6 @@ func (ls *fileList) save(ctx context.Context, listing string) error {
|
||||
|
||||
// loadListing will load listing from a file.
|
||||
// The key is the path to the file relative to the Path1/Path2 base.
|
||||
// File size of -1, as for Google Docs, prints a warning and won't be loaded.
|
||||
func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
file, err := os.Open(listing)
|
||||
if err != nil {
|
||||
@ -241,6 +361,24 @@ func (b *bisyncRun) loadListing(listing string) (*fileList, error) {
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// saveOldListings saves the most recent successful listing, in case we need to rollback on error
|
||||
func (b *bisyncRun) saveOldListings() {
|
||||
b.handleErr(b.listing1, "error saving old Path1 listing", bilib.CopyFileIfExists(b.listing1, b.listing1+"-old"), true, true)
|
||||
b.handleErr(b.listing2, "error saving old Path2 listing", bilib.CopyFileIfExists(b.listing2, b.listing2+"-old"), true, true)
|
||||
}
|
||||
|
||||
// replaceCurrentListings saves both ".lst-new" listings as ".lst"
|
||||
func (b *bisyncRun) replaceCurrentListings() {
|
||||
b.handleErr(b.newListing1, "error replacing Path1 listing", bilib.CopyFileIfExists(b.newListing1, b.listing1), true, true)
|
||||
b.handleErr(b.newListing2, "error replacing Path2 listing", bilib.CopyFileIfExists(b.newListing2, b.listing2), true, true)
|
||||
}
|
||||
|
||||
// revertToOldListings reverts to the most recent successful listing
|
||||
func (b *bisyncRun) revertToOldListings() {
|
||||
b.handleErr(b.listing1, "error reverting to old Path1 listing", bilib.CopyFileIfExists(b.listing1+"-old", b.listing1), true, true)
|
||||
b.handleErr(b.listing2, "error reverting to old Path2 listing", bilib.CopyFileIfExists(b.listing2+"-old", b.listing2), true, true)
|
||||
}
|
||||
|
||||
func parseHash(str string) (string, string, error) {
|
||||
if str == "-" {
|
||||
return "", "", nil
|
||||
@ -254,71 +392,6 @@ func parseHash(str string) (string, string, error) {
|
||||
return "", "", fmt.Errorf("invalid hash %q", str)
|
||||
}
|
||||
|
||||
// makeListing will produce listing from directory tree and write it to a file
|
||||
func (b *bisyncRun) makeListing(ctx context.Context, f fs.Fs, listing string) (ls *fileList, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
depth := ci.MaxDepth
|
||||
hashType := hash.None
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
// Currently bisync just honors --ignore-listing-checksum
|
||||
// (note that this is different from --ignore-checksum)
|
||||
// TODO add full support for checksums and related flags
|
||||
hashType = f.Hashes().GetOne()
|
||||
}
|
||||
ls = newFileList()
|
||||
ls.hash = hashType
|
||||
var lock sync.Mutex
|
||||
listType := walk.ListObjects
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
listType = walk.ListAll
|
||||
}
|
||||
err = walk.ListR(ctx, f, "", false, depth, listType, func(entries fs.DirEntries) error {
|
||||
var firstErr error
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
//tr := accounting.Stats(ctx).NewCheckingTransfer(o) // TODO
|
||||
var (
|
||||
hashVal string
|
||||
hashErr error
|
||||
)
|
||||
if hashType != hash.None {
|
||||
hashVal, hashErr = o.Hash(ctx, hashType)
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
}
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
//tr.Done(ctx, nil) // TODO
|
||||
})
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
entries.ForDir(func(o fs.Directory) {
|
||||
var (
|
||||
hashVal string
|
||||
)
|
||||
time := o.ModTime(ctx).In(TZ)
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
lock.Lock()
|
||||
//record size as 0 instead of -1, so bisync doesn't think it's a google doc
|
||||
ls.put(o.Remote(), 0, time, hashVal, id, flags)
|
||||
lock.Unlock()
|
||||
})
|
||||
}
|
||||
return firstErr
|
||||
})
|
||||
if err == nil {
|
||||
err = ls.save(ctx, listing)
|
||||
}
|
||||
if err != nil {
|
||||
b.abort = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// checkListing verifies that listing is not empty (unless resynching)
|
||||
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
|
||||
if b.opt.Resync || !ls.empty() {
|
||||
@ -376,3 +449,439 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
|
||||
return dirsonly, err
|
||||
}
|
||||
|
||||
// ConvertPrecision returns the Modtime rounded to Dest's precision if lower, otherwise unchanged
|
||||
// Need to use the other fs's precision (if lower) when copying
|
||||
// Note: we need to use Truncate rather than Round so that After() is reliable.
|
||||
// (2023-11-02 20:22:45.552679442 +0000 < UTC 2023-11-02 20:22:45.553 +0000 UTC)
|
||||
func ConvertPrecision(Modtime time.Time, dst fs.Fs) time.Time {
|
||||
DestPrecision := dst.Precision()
|
||||
|
||||
// In case it's wrapping an Fs with lower precision, try unwrapping and use the lowest.
|
||||
if Modtime.Truncate(DestPrecision).After(Modtime.Truncate(fs.UnWrapFs(dst).Precision())) {
|
||||
DestPrecision = fs.UnWrapFs(dst).Precision()
|
||||
}
|
||||
|
||||
if Modtime.After(Modtime.Truncate(DestPrecision)) {
|
||||
return Modtime.Truncate(DestPrecision)
|
||||
}
|
||||
return Modtime
|
||||
}
|
||||
|
||||
// modifyListing will modify the listing based on the results of the sync
|
||||
func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, results []Results, queues queues, is1to2 bool) (err error) {
|
||||
queue := queues.copy2to1
|
||||
direction := "2to1"
|
||||
if is1to2 {
|
||||
queue = queues.copy1to2
|
||||
direction = "1to2"
|
||||
}
|
||||
|
||||
fs.Debugf(nil, "updating %s", direction)
|
||||
prettyprint(results, "results", fs.LogLevelDebug)
|
||||
prettyprint(queue, "queue", fs.LogLevelDebug)
|
||||
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
srcList, err := b.loadListing(srcListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
dstList, err := b.loadListing(dstListing)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read prior listing: %w", err)
|
||||
}
|
||||
// set list hash type
|
||||
if b.opt.Resync && !b.opt.IgnoreListingChecksum {
|
||||
if is1to2 {
|
||||
srcList.hash = b.opt.Compare.HashType1
|
||||
dstList.hash = b.opt.Compare.HashType2
|
||||
} else {
|
||||
srcList.hash = b.opt.Compare.HashType2
|
||||
dstList.hash = b.opt.Compare.HashType1
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && srcList.hash == hash.None {
|
||||
srcList.hash = hash.MD5
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && dstList.hash == hash.None {
|
||||
dstList.hash = hash.MD5
|
||||
}
|
||||
}
|
||||
|
||||
b.debugFn(b.DebugName, func() {
|
||||
var rs ResultsSlice = results
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, results has name?: %v", direction, rs.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("modifyListing direction: %s, srcList has name?: %v, dstList has name?: %v", direction, srcList.has(b.DebugName), dstList.has(b.DebugName)))
|
||||
})
|
||||
|
||||
srcWinners := newFileList()
|
||||
dstWinners := newFileList()
|
||||
errors := newFileList()
|
||||
ctxRecheck, filterRecheck := filter.AddConfig(ctx)
|
||||
|
||||
for _, result := range results {
|
||||
if result.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if result.AltName != "" {
|
||||
b.aliases.Add(result.Name, result.AltName)
|
||||
}
|
||||
|
||||
if result.Flags == "d" && !b.opt.CreateEmptySrcDirs {
|
||||
continue
|
||||
}
|
||||
|
||||
// build src winners list
|
||||
if result.IsSrc && result.Src != "" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
srcWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, src), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to src", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build dst winners list
|
||||
if result.IsWinner && result.Winner.Side != "none" && (result.Winner.Err == nil || result.Flags == "d") {
|
||||
dstWinners.put(result.Name, result.Size, ConvertPrecision(result.Modtime, dst), result.Hash, "-", result.Flags)
|
||||
prettyprint(result, "winner: copy to dst", fs.LogLevelDebug)
|
||||
}
|
||||
|
||||
// build errors list
|
||||
if result.Err != nil || result.Winner.Err != nil {
|
||||
errors.put(result.Name, result.Size, result.Modtime, result.Hash, "-", result.Flags)
|
||||
if err := filterRecheck.AddFile(result.Name); err != nil {
|
||||
fs.Debugf(result.Name, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ci := fs.GetConfig(ctx)
|
||||
updateLists := func(side string, winners, list *fileList) {
|
||||
for _, queueFile := range queue.ToList() {
|
||||
if !winners.has(queueFile) && list.has(queueFile) && !errors.has(queueFile) {
|
||||
// removals from side
|
||||
list.remove(queueFile)
|
||||
fs.Debugf(nil, "decision: removed from %s: %v", side, queueFile)
|
||||
} else if winners.has(queueFile) {
|
||||
// copies to side
|
||||
new := winners.get(queueFile)
|
||||
|
||||
// handle normalization
|
||||
if side == "dst" {
|
||||
alias := b.aliases.Alias(queueFile)
|
||||
if alias != queueFile {
|
||||
// use the (non-identical) existing name, unless --fix-case
|
||||
if ci.FixCase {
|
||||
fs.Debugf(direction, "removing %s and adding %s as --fix-case was specified", alias, queueFile)
|
||||
list.remove(alias)
|
||||
} else {
|
||||
fs.Debugf(direction, "casing/unicode difference detected. using %s instead of %s", alias, queueFile)
|
||||
queueFile = alias
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
list.put(queueFile, new.size, new.time, new.hash, new.id, new.flags)
|
||||
fs.Debugf(nil, "decision: copied to %s: %v", side, queueFile)
|
||||
} else {
|
||||
fs.Debugf(queueFile, "file in queue but missing from %s transfers", side)
|
||||
if err := filterRecheck.AddFile(queueFile); err != nil {
|
||||
fs.Debugf(queueFile, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
updateLists("src", srcWinners, srcList)
|
||||
updateLists("dst", dstWinners, dstList)
|
||||
|
||||
// account for "deltaOthers" we handled separately
|
||||
if queues.deletedonboth.NotEmpty() {
|
||||
for file := range queues.deletedonboth {
|
||||
srcList.remove(file)
|
||||
dstList.remove(file)
|
||||
}
|
||||
}
|
||||
if b.renames.NotEmpty() && !b.opt.DryRun {
|
||||
// renamed on src and copied to dst
|
||||
for _, rename := range b.renames {
|
||||
srcOldName, srcNewName, dstOldName, dstNewName := rename.getNames(is1to2)
|
||||
fs.Debugf(nil, "%s: srcOldName: %v srcNewName: %v dstOldName: %v dstNewName: %v", direction, srcOldName, srcNewName, dstOldName, dstNewName)
|
||||
// we'll handle the other side when we go the other direction
|
||||
var new *fileInfo
|
||||
// we prefer to get the info from the newNamed versions
|
||||
// since they were actually copied as opposed to operations.MoveFile()'d.
|
||||
// the size/time/hash info is therefore fresher on the renames
|
||||
// but we'll settle for the original if we have to.
|
||||
if srcList.has(srcNewName) {
|
||||
new = srcList.get(srcNewName)
|
||||
} else if srcList.has(dstNewName) {
|
||||
new = srcList.get(dstNewName)
|
||||
} else if srcList.has(srcOldName) {
|
||||
new = srcList.get(srcOldName)
|
||||
} else {
|
||||
// something's odd, so let's recheck
|
||||
if err := filterRecheck.AddFile(srcOldName); err != nil {
|
||||
fs.Debugf(srcOldName, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, ConvertPrecision(new.time, src), new.hash, new.id, new.flags)
|
||||
}
|
||||
if srcNewName != srcOldName {
|
||||
srcList.remove(srcOldName)
|
||||
}
|
||||
if srcNewName != dstOldName {
|
||||
dstList.remove(dstOldName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recheck the ones we skipped because they were equal
|
||||
// we never got their info because they were never synced.
|
||||
// TODO: add flag to skip this? (since it re-lists)
|
||||
if queues.renameSkipped.NotEmpty() {
|
||||
skippedList := queues.renameSkipped.ToList()
|
||||
for _, file := range skippedList {
|
||||
if err := filterRecheck.AddFile(file); err != nil {
|
||||
fs.Debugf(file, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// skipped dirs -- nothing to recheck, just add them
|
||||
// (they are not necessarily there already, if they are new)
|
||||
path1List := srcList
|
||||
path2List := dstList
|
||||
if !is1to2 {
|
||||
path1List = dstList
|
||||
path2List = srcList
|
||||
}
|
||||
if !queues.skippedDirs1.empty() {
|
||||
queues.skippedDirs1.getPutAll(path1List)
|
||||
}
|
||||
if !queues.skippedDirs2.empty() {
|
||||
queues.skippedDirs2.getPutAll(path2List)
|
||||
}
|
||||
|
||||
if filterRecheck.HaveFilesFrom() {
|
||||
// also include any aliases
|
||||
recheckFiles := filterRecheck.Files()
|
||||
for recheckFile := range recheckFiles {
|
||||
alias := b.aliases.Alias(recheckFile)
|
||||
if recheckFile != alias {
|
||||
if err := filterRecheck.AddFile(alias); err != nil {
|
||||
fs.Debugf(alias, "error adding file to recheck filter: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.recheck(ctxRecheck, src, dst, srcList, dstList, is1to2)
|
||||
}
|
||||
|
||||
if b.InGracefulShutdown {
|
||||
var toKeep []string
|
||||
var toRollback []string
|
||||
fs.Debugf(direction, "stats for %s", direction)
|
||||
trs := accounting.Stats(ctx).Transferred()
|
||||
for _, tr := range trs {
|
||||
b.debugFn(tr.Name, func() {
|
||||
prettyprint(tr, tr.Name, fs.LogLevelInfo)
|
||||
})
|
||||
if tr.Error == nil && tr.Bytes > 0 || tr.Size <= 0 {
|
||||
prettyprint(tr, "keeping: "+tr.Name, fs.LogLevelDebug)
|
||||
toKeep = append(toKeep, tr.Name)
|
||||
}
|
||||
}
|
||||
// Dirs (for the unlikely event that the shutdown was triggered post-sync during syncEmptyDirs)
|
||||
for _, r := range results {
|
||||
if r.Origin == "syncEmptyDirs" {
|
||||
if srcWinners.has(r.Name) || dstWinners.has(r.Name) {
|
||||
toKeep = append(toKeep, r.Name)
|
||||
fs.Infof(r.Name, "keeping empty dir")
|
||||
}
|
||||
}
|
||||
}
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
combinedList := Concat(oldSrc.list, oldDst.list, srcList.list, dstList.list)
|
||||
for _, f := range combinedList {
|
||||
if !slices.Contains(toKeep, f) && !slices.Contains(toKeep, b.aliases.Alias(f)) && !b.opt.DryRun {
|
||||
toRollback = append(toRollback, f)
|
||||
}
|
||||
}
|
||||
b.prepareRollback(toRollback, srcList, dstList, is1to2)
|
||||
prettyprint(oldSrc.list, "oldSrc", fs.LogLevelDebug)
|
||||
prettyprint(oldDst.list, "oldDst", fs.LogLevelDebug)
|
||||
prettyprint(srcList.list, "srcList", fs.LogLevelDebug)
|
||||
prettyprint(dstList.list, "dstList", fs.LogLevelDebug)
|
||||
|
||||
// clear stats so we only do this once
|
||||
accounting.MaxCompletedTransfers = 0
|
||||
accounting.Stats(ctx).PruneTransfers()
|
||||
}
|
||||
|
||||
if b.DebugName != "" {
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save srcList has it?: %v", direction, srcList.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("%s pre-save dstList has it?: %v", direction, dstList.has(b.DebugName)))
|
||||
}
|
||||
// update files
|
||||
err = srcList.save(ctx, srcListing)
|
||||
b.handleErr(srcList, "error saving srcList from modifyListing", err, true, true)
|
||||
err = dstList.save(ctx, dstListing)
|
||||
b.handleErr(dstList, "error saving dstList from modifyListing", err, true, true)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// recheck the ones we're not sure about
|
||||
func (b *bisyncRun) recheck(ctxRecheck context.Context, src, dst fs.Fs, srcList, dstList *fileList, is1to2 bool) {
|
||||
var srcObjs []fs.Object
|
||||
var dstObjs []fs.Object
|
||||
var resolved []string
|
||||
var toRollback []string
|
||||
|
||||
if err := operations.ListFn(ctxRecheck, src, func(obj fs.Object) {
|
||||
srcObjs = append(srcObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(src, "error recchecking src obj: %v", err)
|
||||
}
|
||||
if err := operations.ListFn(ctxRecheck, dst, func(obj fs.Object) {
|
||||
dstObjs = append(dstObjs, obj)
|
||||
}); err != nil {
|
||||
fs.Debugf(dst, "error recchecking dst obj: %v", err)
|
||||
}
|
||||
|
||||
putObj := func(obj fs.Object, list *fileList) {
|
||||
hashVal := ""
|
||||
if !b.opt.IgnoreListingChecksum {
|
||||
hashType := list.hash
|
||||
if hashType != hash.None {
|
||||
hashVal, _ = obj.Hash(ctxRecheck, hashType)
|
||||
}
|
||||
hashVal, _ = tryDownloadHash(ctxRecheck, obj, hashVal)
|
||||
}
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = obj.ModTime(ctxRecheck).In(TZ)
|
||||
}
|
||||
list.put(obj.Remote(), obj.Size(), modtime, hashVal, "-", "-")
|
||||
}
|
||||
|
||||
for _, srcObj := range srcObjs {
|
||||
fs.Debugf(srcObj, "rechecking")
|
||||
for _, dstObj := range dstObjs {
|
||||
if srcObj.Remote() == dstObj.Remote() || srcObj.Remote() == b.aliases.Alias(dstObj.Remote()) {
|
||||
// note: unlike Equal(), WhichEqual() does not update the modtime in dest if sums match but modtimes don't.
|
||||
if b.opt.DryRun || WhichEqual(ctxRecheck, srcObj, dstObj, src, dst) {
|
||||
putObj(srcObj, srcList)
|
||||
putObj(dstObj, dstList)
|
||||
resolved = append(resolved, srcObj.Remote())
|
||||
} else {
|
||||
fs.Infof(srcObj, "files not equal on recheck: %v %v", srcObj, dstObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
// if srcObj not resolved by now (either because no dstObj match or files not equal),
|
||||
// roll it back to old version, so it gets retried next time.
|
||||
// skip and error during --resync, as rollback is not possible
|
||||
if !slices.Contains(resolved, srcObj.Remote()) && !b.opt.DryRun {
|
||||
if b.opt.Resync {
|
||||
err = errors.New("no dstObj match or files not equal")
|
||||
b.handleErr(srcObj, "Unable to rollback during --resync", err, true, false)
|
||||
} else {
|
||||
toRollback = append(toRollback, srcObj.Remote())
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(toRollback) > 0 {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getListingNames(is1to2 bool) (srcListing string, dstListing string) {
|
||||
if is1to2 {
|
||||
return b.listing1, b.listing2
|
||||
}
|
||||
return b.listing2, b.listing1
|
||||
}
|
||||
|
||||
func (b *bisyncRun) rollback(item string, oldList, newList *fileList) {
|
||||
alias := b.aliases.Alias(item)
|
||||
if oldList.has(item) {
|
||||
oldList.getPut(item, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", item)
|
||||
} else if oldList.has(alias) {
|
||||
oldList.getPut(alias, newList)
|
||||
fs.Debugf(nil, "adding to newlist: %s", alias)
|
||||
} else {
|
||||
fs.Debugf(nil, "removing from newlist: %s (has it?: %v)", item, newList.has(item))
|
||||
prettyprint(newList.list, "newList", fs.LogLevelDebug)
|
||||
newList.remove(item)
|
||||
newList.remove(alias)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) prepareRollback(toRollback []string, srcList, dstList *fileList, is1to2 bool) {
|
||||
if len(toRollback) > 0 {
|
||||
oldSrc, oldDst := b.getOldLists(is1to2)
|
||||
if b.critical {
|
||||
return
|
||||
}
|
||||
|
||||
fs.Debugf("new lists", "src: (%v), dest: (%v)", len(srcList.list), len(dstList.list))
|
||||
|
||||
for _, item := range toRollback {
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("pre-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
b.rollback(item, oldSrc, srcList)
|
||||
b.rollback(item, oldDst, dstList)
|
||||
b.debugFn(item, func() {
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldSrc has it?: %v", oldSrc.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback oldDst has it?: %v", oldDst.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback srcList has it?: %v", srcList.has(item)))
|
||||
b.debug(item, fmt.Sprintf("post-rollback dstList has it?: %v", dstList.has(item)))
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) getOldLists(is1to2 bool) (*fileList, *fileList) {
|
||||
srcListing, dstListing := b.getListingNames(is1to2)
|
||||
oldSrc, err := b.loadListing(srcListing + "-old")
|
||||
b.handleErr(oldSrc, "error loading old src listing", err, true, true)
|
||||
oldDst, err := b.loadListing(dstListing + "-old")
|
||||
b.handleErr(oldDst, "error loading old dst listing", err, true, true)
|
||||
fs.Debugf("get old lists", "is1to2: %v, oldsrc: %s (%v), olddest: %s (%v)", is1to2, srcListing+"-old", len(oldSrc.list), dstListing+"-old", len(oldDst.list))
|
||||
return oldSrc, oldDst
|
||||
}
|
||||
|
||||
// Concat returns a new slice concatenating the passed in slices.
|
||||
func Concat[S ~[]E, E any](ss ...S) S {
|
||||
size := 0
|
||||
for _, s := range ss {
|
||||
size += len(s)
|
||||
if size < 0 {
|
||||
panic("len out of range")
|
||||
}
|
||||
}
|
||||
newslice := slices.Grow[S](nil, size)
|
||||
for _, s := range ss {
|
||||
newslice = append(newslice, s...)
|
||||
}
|
||||
return newslice
|
||||
}
|
||||
|
154
cmd/bisync/lockfile.go
Normal file
154
cmd/bisync/lockfile.go
Normal file
@ -0,0 +1,154 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
const basicallyforever = 200 * 365 * 24 * time.Hour
|
||||
|
||||
var stopRenewal func()
|
||||
|
||||
var data = struct {
|
||||
Session string
|
||||
PID string
|
||||
TimeRenewed time.Time
|
||||
TimeExpires time.Time
|
||||
}{}
|
||||
|
||||
func (b *bisyncRun) setLockFile() error {
|
||||
b.lockFile = ""
|
||||
b.setLockFileExpiration()
|
||||
if !b.opt.DryRun {
|
||||
b.lockFile = b.basePath + ".lck"
|
||||
if bilib.FileExists(b.lockFile) {
|
||||
if !b.lockFileIsExpired() {
|
||||
errTip := Color(terminal.MagentaFg, "Tip: this indicates that another bisync run (of these same paths) either is still running or was interrupted before completion. \n")
|
||||
errTip += Color(terminal.MagentaFg, "If you're SURE you want to override this safety feature, you can delete the lock file with the following command, then run bisync again: \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.HiRedFg, "rclone deletefile \"%s\""), b.lockFile)
|
||||
return fmt.Errorf(Color(terminal.RedFg, "prior lock file found: %s \n")+errTip, Color(terminal.HiYellowFg, b.lockFile))
|
||||
}
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = os.WriteFile(b.lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf(Color(terminal.RedFg, "cannot create lock file: %s: %w"), b.lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", b.lockFile)
|
||||
b.renewLockFile()
|
||||
stopRenewal = b.startLockRenewal()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) removeLockFile() {
|
||||
if b.lockFile != "" {
|
||||
stopRenewal()
|
||||
errUnlock := os.Remove(b.lockFile)
|
||||
if errUnlock == nil {
|
||||
fs.Debugf(nil, "Lock file removed: %s", b.lockFile)
|
||||
} else if err == nil {
|
||||
err = errUnlock
|
||||
} else {
|
||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", b.lockFile, errUnlock)
|
||||
}
|
||||
b.lockFile = "" // block removing it again
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setLockFileExpiration() {
|
||||
if b.opt.MaxLock > 0 && b.opt.MaxLock < 2*time.Minute {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "--max-lock cannot be shorter than 2 minutes (unless 0.) Changing --max-lock from %v to %v"), b.opt.MaxLock, 2*time.Minute)
|
||||
b.opt.MaxLock = 2 * time.Minute
|
||||
} else if b.opt.MaxLock <= 0 {
|
||||
b.opt.MaxLock = basicallyforever
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) renewLockFile() {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
|
||||
data.Session = b.basePath
|
||||
data.PID = strconv.Itoa(os.Getpid())
|
||||
data.TimeRenewed = time.Now()
|
||||
data.TimeExpires = time.Now().Add(b.opt.MaxLock)
|
||||
|
||||
// save data file
|
||||
df, err := os.Create(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error renewing lock file", err, true, true)
|
||||
b.handleErr(b.lockFile, "error encoding JSON to lock file", json.NewEncoder(df).Encode(data), true, true)
|
||||
b.handleErr(b.lockFile, "error closing lock file", df.Close(), true, true)
|
||||
if b.opt.MaxLock < basicallyforever {
|
||||
fs.Infof(nil, Color(terminal.HiBlueFg, "lock file renewed for %v. New expiration: %v"), b.opt.MaxLock, data.TimeExpires)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) lockFileIsExpired() bool {
|
||||
if b.lockFile != "" && bilib.FileExists(b.lockFile) {
|
||||
rdf, err := os.Open(b.lockFile)
|
||||
b.handleErr(b.lockFile, "error reading lock file", err, true, true)
|
||||
dec := json.NewDecoder(rdf)
|
||||
for {
|
||||
if err := dec.Decode(&data); err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
b.handleErr(b.lockFile, "error closing file", rdf.Close(), true, true)
|
||||
if !data.TimeExpires.IsZero() && data.TimeExpires.Before(time.Now()) {
|
||||
fs.Infof(b.lockFile, Color(terminal.GreenFg, "Lock file found, but it expired at %v. Will delete it and proceed."), data.TimeExpires)
|
||||
markFailed(b.listing1) // listing is untrusted so force revert to prior (if --recover) or create new ones (if --resync)
|
||||
markFailed(b.listing2)
|
||||
return true
|
||||
}
|
||||
fs.Infof(b.lockFile, Color(terminal.RedFg, "Valid lock file found. Expires at %v. (%v from now)"), data.TimeExpires, time.Since(data.TimeExpires).Abs().Round(time.Second))
|
||||
prettyprint(data, "Lockfile info", fs.LogLevelInfo)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// StartLockRenewal renews the lockfile every --max-lock minus one minute.
|
||||
//
|
||||
// It returns a func which should be called to stop the renewal.
|
||||
func (b *bisyncRun) startLockRenewal() func() {
|
||||
if b.opt.MaxLock <= 0 || b.opt.MaxLock >= basicallyforever || b.lockFile == "" {
|
||||
return func() {}
|
||||
}
|
||||
stopLockRenewal := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ticker := time.NewTicker(b.opt.MaxLock - time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
b.renewLockFile()
|
||||
case <-stopLockRenewal:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return func() {
|
||||
close(stopLockRenewal)
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func markFailed(file string) {
|
||||
failFile := file + "-err"
|
||||
if bilib.FileExists(file) {
|
||||
_ = os.Remove(failFile)
|
||||
_ = os.Rename(file, failFile)
|
||||
}
|
||||
}
|
@ -1,12 +1,15 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) {
|
||||
@ -25,12 +28,27 @@ func (b *bisyncRun) indent(tag, file, msg string) {
|
||||
tag = tag[1:]
|
||||
logf = fs.Logf
|
||||
}
|
||||
logf(nil, "- %-9s%-35s - %s", tag, msg, escapePath(file, false))
|
||||
|
||||
if b.opt.DryRun {
|
||||
logf = fs.Logf
|
||||
}
|
||||
|
||||
if tag == "Path1" {
|
||||
tag = Color(terminal.CyanFg, "Path1")
|
||||
} else {
|
||||
tag = Color(terminal.BlueFg, tag)
|
||||
}
|
||||
msg = Color(terminal.MagentaFg, msg)
|
||||
msg = strings.Replace(msg, "Queue copy to", Color(terminal.GreenFg, "Queue copy to"), -1)
|
||||
msg = strings.Replace(msg, "Queue delete", Color(terminal.RedFg, "Queue delete"), -1)
|
||||
file = Color(terminal.CyanFg, escapePath(file, false))
|
||||
logf(nil, "- %-18s%-43s - %s", tag, msg, file)
|
||||
}
|
||||
|
||||
// escapePath will escape control characters in path.
|
||||
// It won't quote just due to backslashes on Windows.
|
||||
func escapePath(path string, forceQuotes bool) string {
|
||||
path = encode(path)
|
||||
test := path
|
||||
if runtime.GOOS == "windows" {
|
||||
test = strings.ReplaceAll(path, "\\", "/")
|
||||
@ -47,3 +65,31 @@ func escapePath(path string, forceQuotes bool) string {
|
||||
func quotePath(path string) string {
|
||||
return escapePath(path, true)
|
||||
}
|
||||
|
||||
var Colors bool // Colors controls whether terminal colors are enabled
|
||||
|
||||
// Color handles terminal colors for bisync
|
||||
func Color(style string, s string) string {
|
||||
if !Colors {
|
||||
return s
|
||||
}
|
||||
terminal.Start()
|
||||
return style + s + terminal.Reset
|
||||
}
|
||||
|
||||
func encode(s string) string {
|
||||
return encoder.OS.ToStandardPath(encoder.OS.FromStandardPath(s))
|
||||
}
|
||||
|
||||
// prettyprint formats JSON for improved readability in debug logs
|
||||
func prettyprint(in any, label string, level fs.LogLevel) {
|
||||
inBytes, err := json.MarshalIndent(in, "", "\t")
|
||||
if err != nil {
|
||||
fs.Debugf(nil, "failed to marshal input: %v", err)
|
||||
}
|
||||
if level == fs.LogLevelDebug {
|
||||
fs.Debugf(nil, "%s: \n%s\n", label, string(inBytes))
|
||||
} else if level == fs.LogLevelInfo {
|
||||
fs.Infof(nil, "%s: \n%s\n", label, string(inBytes))
|
||||
}
|
||||
}
|
||||
|
247
cmd/bisync/march.go
Normal file
247
cmd/bisync/march.go
Normal file
@ -0,0 +1,247 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/march"
|
||||
)
|
||||
|
||||
var ls1 = newFileList()
|
||||
var ls2 = newFileList()
|
||||
var err error
|
||||
var firstErr error
|
||||
var marchAliasLock sync.Mutex
|
||||
var marchLsLock sync.Mutex
|
||||
var marchErrLock sync.Mutex
|
||||
var marchCtx context.Context
|
||||
|
||||
func (b *bisyncRun) makeMarchListing(ctx context.Context) (*fileList, *fileList, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
marchCtx = ctx
|
||||
b.setupListing()
|
||||
fs.Debugf(b, "starting to march!")
|
||||
|
||||
// set up a march over fdst (Path2) and fsrc (Path1)
|
||||
m := &march.March{
|
||||
Ctx: ctx,
|
||||
Fdst: b.fs2,
|
||||
Fsrc: b.fs1,
|
||||
Dir: "",
|
||||
NoTraverse: false,
|
||||
Callback: b,
|
||||
DstIncludeAll: false,
|
||||
NoCheckDest: false,
|
||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||
}
|
||||
err = m.Run(ctx)
|
||||
|
||||
fs.Debugf(b, "march completed. err: %v", err)
|
||||
if err == nil {
|
||||
err = firstErr
|
||||
}
|
||||
if err != nil {
|
||||
b.handleErr("march", "error during march", err, true, true)
|
||||
b.abort = true
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// save files
|
||||
if b.opt.Compare.DownloadHash && ls1.hash == hash.None {
|
||||
ls1.hash = hash.MD5
|
||||
}
|
||||
if b.opt.Compare.DownloadHash && ls2.hash == hash.None {
|
||||
ls2.hash = hash.MD5
|
||||
}
|
||||
err = ls1.save(ctx, b.newListing1)
|
||||
b.handleErr(ls1, "error saving ls1 from march", err, true, true)
|
||||
err = ls2.save(ctx, b.newListing2)
|
||||
b.handleErr(ls2, "error saving ls2 from march", err, true, true)
|
||||
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// SrcOnly have an object which is on path1 only
|
||||
func (b *bisyncRun) SrcOnly(o fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o, "path1 only")
|
||||
b.parse(o, true)
|
||||
return isDir(o)
|
||||
}
|
||||
|
||||
// DstOnly have an object which is on path2 only
|
||||
func (b *bisyncRun) DstOnly(o fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o, "path2 only")
|
||||
b.parse(o, false)
|
||||
return isDir(o)
|
||||
}
|
||||
|
||||
// Match is called when object exists on both path1 and path2 (whether equal or not)
|
||||
func (b *bisyncRun) Match(ctx context.Context, o2, o1 fs.DirEntry) (recurse bool) {
|
||||
fs.Debugf(o1, "both path1 and path2")
|
||||
marchAliasLock.Lock()
|
||||
b.aliases.Add(o1.Remote(), o2.Remote())
|
||||
marchAliasLock.Unlock()
|
||||
b.parse(o1, true)
|
||||
b.parse(o2, false)
|
||||
return isDir(o1)
|
||||
}
|
||||
|
||||
func isDir(e fs.DirEntry) bool {
|
||||
switch x := e.(type) {
|
||||
case fs.Object:
|
||||
fs.Debugf(x, "is Object")
|
||||
return false
|
||||
case fs.Directory:
|
||||
fs.Debugf(x, "is Dir")
|
||||
return true
|
||||
default:
|
||||
fs.Debugf(e, "is unknown")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *bisyncRun) parse(e fs.DirEntry, isPath1 bool) {
|
||||
switch x := e.(type) {
|
||||
case fs.Object:
|
||||
b.ForObject(x, isPath1)
|
||||
case fs.Directory:
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
b.ForDir(x, isPath1)
|
||||
}
|
||||
default:
|
||||
fs.Debugf(e, "is unknown")
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) setupListing() {
|
||||
ls1 = newFileList()
|
||||
ls2 = newFileList()
|
||||
|
||||
// note that --ignore-listing-checksum is different from --ignore-checksum
|
||||
// and we already checked it when we set b.opt.Compare.HashType1 and 2
|
||||
ls1.hash = b.opt.Compare.HashType1
|
||||
ls2.hash = b.opt.Compare.HashType2
|
||||
}
|
||||
|
||||
func (b *bisyncRun) ForObject(o fs.Object, isPath1 bool) {
|
||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing file - "+whichPath(isPath1))
|
||||
defer func() {
|
||||
tr.Done(marchCtx, nil)
|
||||
}()
|
||||
var (
|
||||
hashVal string
|
||||
hashErr error
|
||||
)
|
||||
ls := whichLs(isPath1)
|
||||
hashType := ls.hash
|
||||
if hashType != hash.None {
|
||||
hashVal, hashErr = o.Hash(marchCtx, hashType)
|
||||
marchErrLock.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
marchErrLock.Unlock()
|
||||
}
|
||||
hashVal, hashErr = tryDownloadHash(marchCtx, o, hashVal)
|
||||
marchErrLock.Lock()
|
||||
if firstErr == nil {
|
||||
firstErr = hashErr
|
||||
}
|
||||
if firstErr != nil {
|
||||
b.handleErr(hashType, "error hashing during march", firstErr, false, true)
|
||||
}
|
||||
marchErrLock.Unlock()
|
||||
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = o.ModTime(marchCtx).In(TZ)
|
||||
}
|
||||
id := "" // TODO: ID(o)
|
||||
flags := "-" // "-" for a file and "d" for a directory
|
||||
marchLsLock.Lock()
|
||||
ls.put(o.Remote(), o.Size(), modtime, hashVal, id, flags)
|
||||
marchLsLock.Unlock()
|
||||
}
|
||||
|
||||
func (b *bisyncRun) ForDir(o fs.Directory, isPath1 bool) {
|
||||
tr := accounting.Stats(marchCtx).NewCheckingTransfer(o, "listing dir - "+whichPath(isPath1))
|
||||
defer func() {
|
||||
tr.Done(marchCtx, nil)
|
||||
}()
|
||||
ls := whichLs(isPath1)
|
||||
var modtime time.Time
|
||||
if b.opt.Compare.Modtime {
|
||||
modtime = o.ModTime(marchCtx).In(TZ)
|
||||
}
|
||||
id := "" // TODO
|
||||
flags := "d" // "-" for a file and "d" for a directory
|
||||
marchLsLock.Lock()
|
||||
ls.put(o.Remote(), -1, modtime, "", id, flags)
|
||||
marchLsLock.Unlock()
|
||||
}
|
||||
|
||||
func whichLs(isPath1 bool) *fileList {
|
||||
ls := ls1
|
||||
if !isPath1 {
|
||||
ls = ls2
|
||||
}
|
||||
return ls
|
||||
}
|
||||
|
||||
func whichPath(isPath1 bool) string {
|
||||
s := "Path1"
|
||||
if !isPath1 {
|
||||
s = "Path2"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (b *bisyncRun) findCheckFiles(ctx context.Context) (*fileList, *fileList, error) {
|
||||
ctxCheckFile, filterCheckFile := filter.AddConfig(ctx)
|
||||
b.handleErr(b.opt.CheckFilename, "error adding CheckFilename to filter", filterCheckFile.Add(true, b.opt.CheckFilename), true, true)
|
||||
b.handleErr(b.opt.CheckFilename, "error adding ** exclusion to filter", filterCheckFile.Add(false, "**"), true, true)
|
||||
ci := fs.GetConfig(ctxCheckFile)
|
||||
marchCtx = ctxCheckFile
|
||||
|
||||
b.setupListing()
|
||||
fs.Debugf(b, "starting to march!")
|
||||
|
||||
// set up a march over fdst (Path2) and fsrc (Path1)
|
||||
m := &march.March{
|
||||
Ctx: ctxCheckFile,
|
||||
Fdst: b.fs2,
|
||||
Fsrc: b.fs1,
|
||||
Dir: "",
|
||||
NoTraverse: false,
|
||||
Callback: b,
|
||||
DstIncludeAll: false,
|
||||
NoCheckDest: false,
|
||||
NoUnicodeNormalization: ci.NoUnicodeNormalization,
|
||||
}
|
||||
err = m.Run(ctxCheckFile)
|
||||
|
||||
fs.Debugf(b, "march completed. err: %v", err)
|
||||
if err == nil {
|
||||
err = firstErr
|
||||
}
|
||||
if err != nil {
|
||||
b.handleErr("march", "error during findCheckFiles", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
|
||||
return ls1, ls2, err
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func ID(o fs.Object) string {
|
||||
do, ok := o.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
@ -9,15 +9,18 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"runtime"
|
||||
"strings"
|
||||
gosync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// ErrBisyncAborted signals that bisync is aborted and forces exit code 2
|
||||
@ -25,23 +28,49 @@ var ErrBisyncAborted = errors.New("bisync aborted")
|
||||
|
||||
// bisyncRun keeps bisync runtime state
|
||||
type bisyncRun struct {
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
opt *Options
|
||||
fs1 fs.Fs
|
||||
fs2 fs.Fs
|
||||
abort bool
|
||||
critical bool
|
||||
retryable bool
|
||||
basePath string
|
||||
workDir string
|
||||
listing1 string
|
||||
listing2 string
|
||||
newListing1 string
|
||||
newListing2 string
|
||||
aliases bilib.AliasMap
|
||||
opt *Options
|
||||
octx context.Context
|
||||
fctx context.Context
|
||||
InGracefulShutdown bool
|
||||
CleanupCompleted bool
|
||||
SyncCI *fs.ConfigInfo
|
||||
CancelSync context.CancelFunc
|
||||
DebugName string
|
||||
lockFile string
|
||||
renames renames
|
||||
resyncIs1to2 bool
|
||||
}
|
||||
|
||||
type queues struct {
|
||||
copy1to2 bilib.Names
|
||||
copy2to1 bilib.Names
|
||||
renameSkipped bilib.Names // not renamed because it was equal
|
||||
skippedDirs1 *fileList
|
||||
skippedDirs2 *fileList
|
||||
deletedonboth bilib.Names
|
||||
}
|
||||
|
||||
// Bisync handles lock file, performs bisync run and checks exit status
|
||||
func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
defer resetGlobals()
|
||||
opt := *optArg // ensure that input is never changed
|
||||
b := &bisyncRun{
|
||||
fs1: fs1,
|
||||
fs2: fs2,
|
||||
opt: &opt,
|
||||
fs1: fs1,
|
||||
fs2: fs2,
|
||||
opt: &opt,
|
||||
DebugName: opt.DebugName,
|
||||
}
|
||||
|
||||
if opt.CheckFilename == "" {
|
||||
@ -50,14 +79,23 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
if opt.Workdir == "" {
|
||||
opt.Workdir = DefaultWorkdir
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
opt.OrigBackupDir = ci.BackupDir
|
||||
|
||||
if !opt.DryRun && !opt.Force {
|
||||
if fs1.Precision() == fs.ModTimeNotSupported {
|
||||
return errors.New("modification time support is missing on path1")
|
||||
}
|
||||
if fs2.Precision() == fs.ModTimeNotSupported {
|
||||
return errors.New("modification time support is missing on path2")
|
||||
}
|
||||
if ci.TerminalColorMode == fs.TerminalColorModeAlways || (ci.TerminalColorMode == fs.TerminalColorModeAuto && !log.Redirected()) {
|
||||
Colors = true
|
||||
}
|
||||
|
||||
err = b.setCompareDefaults(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.setResyncDefaults()
|
||||
|
||||
err = b.setResolveDefaults(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.workDir, err = filepath.Abs(opt.Workdir); err != nil {
|
||||
@ -68,41 +106,62 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
}
|
||||
|
||||
// Produce a unique name for the sync operation
|
||||
b.basePath = filepath.Join(b.workDir, bilib.SessionName(b.fs1, b.fs2))
|
||||
listing1 := b.basePath + ".path1.lst"
|
||||
listing2 := b.basePath + ".path2.lst"
|
||||
b.basePath = bilib.BasePath(ctx, b.workDir, b.fs1, b.fs2)
|
||||
b.listing1 = b.basePath + ".path1.lst"
|
||||
b.listing2 = b.basePath + ".path2.lst"
|
||||
b.newListing1 = b.listing1 + "-new"
|
||||
b.newListing2 = b.listing2 + "-new"
|
||||
b.aliases = bilib.AliasMap{}
|
||||
|
||||
err = b.checkSyntax()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle lock file
|
||||
lockFile := ""
|
||||
if !opt.DryRun {
|
||||
lockFile = b.basePath + ".lck"
|
||||
if bilib.FileExists(lockFile) {
|
||||
return fmt.Errorf("prior lock file found: %s", lockFile)
|
||||
}
|
||||
|
||||
pidStr := []byte(strconv.Itoa(os.Getpid()))
|
||||
if err = os.WriteFile(lockFile, pidStr, bilib.PermSecure); err != nil {
|
||||
return fmt.Errorf("cannot create lock file: %s: %w", lockFile, err)
|
||||
}
|
||||
fs.Debugf(nil, "Lock file created: %s", lockFile)
|
||||
err = b.setLockFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Handle SIGINT
|
||||
var finaliseOnce gosync.Once
|
||||
markFailed := func(file string) {
|
||||
failFile := file + "-err"
|
||||
if bilib.FileExists(file) {
|
||||
_ = os.Remove(failFile)
|
||||
_ = os.Rename(file, failFile)
|
||||
}
|
||||
}
|
||||
|
||||
finalise := func() {
|
||||
finaliseOnce.Do(func() {
|
||||
if atexit.Signalled() {
|
||||
fs.Logf(nil, "Bisync interrupted. Must run --resync to recover.")
|
||||
markFailed(listing1)
|
||||
markFailed(listing2)
|
||||
_ = os.Remove(lockFile)
|
||||
if b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "No need to gracefully shutdown during --resync (just run it again.)"))
|
||||
} else {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Attempting to gracefully shutdown. (Send exit signal again for immediate un-graceful shutdown.)"))
|
||||
b.InGracefulShutdown = true
|
||||
if b.SyncCI != nil {
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "Telling Sync to wrap up early."))
|
||||
b.SyncCI.MaxTransfer = 1
|
||||
b.SyncCI.MaxDuration = 1 * time.Second
|
||||
b.SyncCI.CutoffMode = fs.CutoffModeSoft
|
||||
gracePeriod := 30 * time.Second // TODO: flag to customize this?
|
||||
if !waitFor("Canceling Sync if not done in", gracePeriod, func() bool { return b.CleanupCompleted }) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Canceling sync and cleaning up"))
|
||||
b.CancelSync()
|
||||
waitFor("Aborting Bisync if not done in", 60*time.Second, func() bool { return b.CleanupCompleted })
|
||||
}
|
||||
} else {
|
||||
// we haven't started to sync yet, so we're good.
|
||||
// no need to worry about the listing files, as we haven't overwritten them yet.
|
||||
b.CleanupCompleted = true
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
if !b.CleanupCompleted {
|
||||
if !b.opt.Resync {
|
||||
fs.Logf(nil, Color(terminal.HiRedFg, "Graceful shutdown failed."))
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync interrupted. Must run --resync to recover."))
|
||||
}
|
||||
markFailed(b.listing1)
|
||||
markFailed(b.listing2)
|
||||
}
|
||||
b.removeLockFile()
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -110,53 +169,55 @@ func Bisync(ctx context.Context, fs1, fs2 fs.Fs, optArg *Options) (err error) {
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// run bisync
|
||||
err = b.runLocked(ctx, listing1, listing2)
|
||||
err = b.runLocked(ctx)
|
||||
|
||||
if lockFile != "" {
|
||||
errUnlock := os.Remove(lockFile)
|
||||
if errUnlock == nil {
|
||||
fs.Debugf(nil, "Lock file removed: %s", lockFile)
|
||||
} else if err == nil {
|
||||
err = errUnlock
|
||||
} else {
|
||||
fs.Errorf(nil, "cannot remove lockfile %s: %v", lockFile, errUnlock)
|
||||
b.removeLockFile()
|
||||
|
||||
b.CleanupCompleted = true
|
||||
if b.InGracefulShutdown {
|
||||
if err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful {
|
||||
err = nil
|
||||
b.critical = false
|
||||
}
|
||||
if err == nil {
|
||||
fs.Logf(nil, Color(terminal.GreenFg, "Graceful shutdown completed successfully."))
|
||||
}
|
||||
}
|
||||
|
||||
if b.critical {
|
||||
if b.retryable && b.opt.Resilient {
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Error is retryable without --resync due to --resilient mode.")
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.YellowFg, "Bisync aborted. Error is retryable without --resync due to --resilient mode."))
|
||||
} else {
|
||||
if bilib.FileExists(listing1) {
|
||||
_ = os.Rename(listing1, listing1+"-err")
|
||||
if bilib.FileExists(b.listing1) {
|
||||
_ = os.Rename(b.listing1, b.listing1+"-err")
|
||||
}
|
||||
if bilib.FileExists(listing2) {
|
||||
_ = os.Rename(listing2, listing2+"-err")
|
||||
if bilib.FileExists(b.listing2) {
|
||||
_ = os.Rename(b.listing2, b.listing2+"-err")
|
||||
}
|
||||
fs.Errorf(nil, "Bisync critical error: %v", err)
|
||||
fs.Errorf(nil, "Bisync aborted. Must run --resync to recover.")
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync critical error: %v"), err)
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "Bisync aborted. Must run --resync to recover."))
|
||||
}
|
||||
return ErrBisyncAborted
|
||||
}
|
||||
if b.abort {
|
||||
fs.Logf(nil, "Bisync aborted. Please try again.")
|
||||
if b.abort && !b.InGracefulShutdown {
|
||||
fs.Logf(nil, Color(terminal.RedFg, "Bisync aborted. Please try again."))
|
||||
}
|
||||
if err == nil {
|
||||
fs.Infof(nil, "Bisync successful")
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Bisync successful"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// runLocked performs a full bisync run
|
||||
func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (err error) {
|
||||
func (b *bisyncRun) runLocked(octx context.Context) (err error) {
|
||||
opt := b.opt
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
|
||||
if opt.CheckSync == CheckSyncOnly {
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(listing1, listing2); err != nil {
|
||||
if err = b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
}
|
||||
@ -167,14 +228,16 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
|
||||
if opt.DryRun {
|
||||
// In --dry-run mode, preserve original listings and save updates to the .lst-dry files
|
||||
origListing1 := listing1
|
||||
origListing2 := listing2
|
||||
listing1 += "-dry"
|
||||
listing2 += "-dry"
|
||||
if err := bilib.CopyFileIfExists(origListing1, listing1); err != nil {
|
||||
origListing1 := b.listing1
|
||||
origListing2 := b.listing2
|
||||
b.listing1 += "-dry"
|
||||
b.listing2 += "-dry"
|
||||
b.newListing1 = b.listing1 + "-new"
|
||||
b.newListing2 = b.listing2 + "-new"
|
||||
if err := bilib.CopyFileIfExists(origListing1, b.listing1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := bilib.CopyFileIfExists(origListing2, listing2); err != nil {
|
||||
if err := bilib.CopyFileIfExists(origListing2, b.listing2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -186,24 +249,65 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
b.retryable = true
|
||||
return
|
||||
}
|
||||
b.octx = octx
|
||||
b.fctx = fctx
|
||||
|
||||
// overlapping paths check
|
||||
err = b.overlappingPathsCheck(fctx, b.fs1, b.fs2)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate Path1 and Path2 listings and copy any unique Path2 files to Path1
|
||||
if opt.Resync {
|
||||
return b.resync(octx, fctx, listing1, listing2)
|
||||
return b.resync(octx, fctx)
|
||||
}
|
||||
|
||||
// Check for existence of prior Path1 and Path2 listings
|
||||
if !bilib.FileExists(listing1) || !bilib.FileExists(listing2) {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
if !bilib.FileExists(b.listing1) || !bilib.FileExists(b.listing2) {
|
||||
if b.opt.Recover && bilib.FileExists(b.listing1+"-old") && bilib.FileExists(b.listing2+"-old") {
|
||||
errTip := fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s"), Color(terminal.HiBlueFg, b.listing2))
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "Listings not found. Reverting to prior backup as --recover is set. \n")+errTip)
|
||||
if opt.CheckSync != CheckSyncFalse {
|
||||
// Run CheckSync to ensure old listing is valid (garbage in, garbage out!)
|
||||
fs.Infof(nil, "Validating backup listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err = b.checkSync(b.listing1+"-old", b.listing2+"-old"); err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.GreenFg, "Backup listing is valid."))
|
||||
}
|
||||
b.revertToOldListings()
|
||||
} else {
|
||||
// On prior critical error abort, the prior listings are renamed to .lst-err to lock out further runs
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
errTip := Color(terminal.MagentaFg, "Tip: here are the filenames we were looking for. Do they exist? \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path1: %s\n"), Color(terminal.HiBlueFg, b.listing1))
|
||||
errTip += fmt.Sprintf(Color(terminal.CyanFg, "Path2: %s\n"), Color(terminal.HiBlueFg, b.listing2))
|
||||
errTip += Color(terminal.MagentaFg, "Try running this command to inspect the work dir: \n")
|
||||
errTip += fmt.Sprintf(Color(terminal.HiCyanFg, "rclone lsl \"%s\""), b.workDir)
|
||||
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run \n" + errTip)
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Building Path1 and Path2 listings")
|
||||
ls1, ls2, err = b.makeMarchListing(fctx)
|
||||
if err != nil || accounting.Stats(fctx).Errored() {
|
||||
fs.Errorf(nil, Color(terminal.RedFg, "There were errors while building listings. Aborting as it is too dangerous to continue."))
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return errors.New("cannot find prior Path1 or Path2 listings, likely due to critical error on prior run")
|
||||
return err
|
||||
}
|
||||
|
||||
// Check for Path1 deltas relative to the prior sync
|
||||
fs.Infof(nil, "Path1 checking for diffs")
|
||||
newListing1 := listing1 + "-new"
|
||||
ds1, err := b.findDeltas(fctx, b.fs1, listing1, newListing1, "Path1")
|
||||
ds1, err := b.findDeltas(fctx, b.fs1, b.listing1, ls1, "Path1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -211,8 +315,7 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
|
||||
// Check for Path2 deltas relative to the prior sync
|
||||
fs.Infof(nil, "Path2 checking for diffs")
|
||||
newListing2 := listing2 + "-new"
|
||||
ds2, err := b.findDeltas(fctx, b.fs2, listing2, newListing2, "Path2")
|
||||
ds2, err := b.findDeltas(fctx, b.fs2, b.listing2, ls2, "Path2")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -256,38 +359,63 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
|
||||
// Determine and apply changes to Path1 and Path2
|
||||
noChanges := ds1.empty() && ds2.empty()
|
||||
changes1 := false
|
||||
changes2 := false
|
||||
changes1 := false // 2to1
|
||||
changes2 := false // 1to2
|
||||
results2to1 := []Results{}
|
||||
results1to2 := []Results{}
|
||||
|
||||
queues := queues{}
|
||||
|
||||
if noChanges {
|
||||
fs.Infof(nil, "No changes found")
|
||||
} else {
|
||||
fs.Infof(nil, "Applying changes")
|
||||
changes1, changes2, err = b.applyDeltas(octx, ds1, ds2)
|
||||
changes1, changes2, results2to1, results1to2, queues, err = b.applyDeltas(octx, ds1, ds2)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
if b.InGracefulShutdown && (err == context.Canceled || err == accounting.ErrorMaxTransferLimitReachedGraceful || strings.Contains(err.Error(), "context canceled")) {
|
||||
fs.Infof(nil, "Ignoring sync error due to Graceful Shutdown: %v", err)
|
||||
} else {
|
||||
b.critical = true
|
||||
// b.retryable = true // not sure about this one
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up and check listings integrity
|
||||
fs.Infof(nil, "Updating listings")
|
||||
var err1, err2 error
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
l2, _ := b.loadListing(b.listing2)
|
||||
newl1, _ := b.loadListing(b.newListing1)
|
||||
newl2, _ := b.loadListing(b.newListing2)
|
||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
||||
b.debug(b.DebugName, fmt.Sprintf("pre-saveOldListings, newls1 has name?: %v, newls2 has name?: %v", newl1.has(b.DebugName), newl2.has(b.DebugName)))
|
||||
}
|
||||
b.saveOldListings()
|
||||
// save new listings
|
||||
// NOTE: "changes" in this case does not mean this run vs. last run, it means start of this run vs. end of this run.
|
||||
// i.e. whether we can use the March lst-new as this side's lst without modifying it.
|
||||
if noChanges {
|
||||
err1 = bilib.CopyFileIfExists(newListing1, listing1)
|
||||
err2 = bilib.CopyFileIfExists(newListing2, listing2)
|
||||
b.replaceCurrentListings()
|
||||
} else {
|
||||
if changes1 {
|
||||
_, err1 = b.makeListing(fctx, b.fs1, listing1)
|
||||
if changes1 || b.InGracefulShutdown { // 2to1
|
||||
err1 = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false)
|
||||
} else {
|
||||
err1 = bilib.CopyFileIfExists(newListing1, listing1)
|
||||
err1 = bilib.CopyFileIfExists(b.newListing1, b.listing1)
|
||||
}
|
||||
if changes2 {
|
||||
_, err2 = b.makeListing(fctx, b.fs2, listing2)
|
||||
if changes2 || b.InGracefulShutdown { // 1to2
|
||||
err2 = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true)
|
||||
} else {
|
||||
err2 = bilib.CopyFileIfExists(newListing2, listing2)
|
||||
err2 = bilib.CopyFileIfExists(b.newListing2, b.listing2)
|
||||
}
|
||||
}
|
||||
if b.DebugName != "" {
|
||||
l1, _ := b.loadListing(b.listing1)
|
||||
l2, _ := b.loadListing(b.listing2)
|
||||
b.debug(b.DebugName, fmt.Sprintf("post-modifyListing, ls1 has name?: %v, ls2 has name?: %v", l1.has(b.DebugName), l2.has(b.DebugName)))
|
||||
}
|
||||
err = err1
|
||||
if err == nil {
|
||||
err = err2
|
||||
@ -299,13 +427,13 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
}
|
||||
|
||||
if !opt.NoCleanup {
|
||||
_ = os.Remove(newListing1)
|
||||
_ = os.Remove(newListing2)
|
||||
_ = os.Remove(b.newListing1)
|
||||
_ = os.Remove(b.newListing2)
|
||||
}
|
||||
|
||||
if opt.CheckSync == CheckSyncTrue && !opt.DryRun {
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err := b.checkSync(listing1, listing2); err != nil {
|
||||
if err := b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
@ -314,7 +442,9 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
// Optional rmdirs for empty directories
|
||||
if opt.RemoveEmptyDirs {
|
||||
fs.Infof(nil, "Removing empty directories")
|
||||
fctx = b.setBackupDir(fctx, 1)
|
||||
err1 := operations.Rmdirs(fctx, b.fs1, "", true)
|
||||
fctx = b.setBackupDir(fctx, 2)
|
||||
err2 := operations.Rmdirs(fctx, b.fs2, "", true)
|
||||
err := err1
|
||||
if err == nil {
|
||||
@ -330,135 +460,6 @@ func (b *bisyncRun) runLocked(octx context.Context, listing1, listing2 string) (
|
||||
return nil
|
||||
}
|
||||
|
||||
// resync implements the --resync mode.
|
||||
// It will generate path1 and path2 listings
|
||||
// and copy any unique path2 files to path1.
|
||||
func (b *bisyncRun) resync(octx, fctx context.Context, listing1, listing2 string) error {
|
||||
fs.Infof(nil, "Copying unique Path2 files to Path1")
|
||||
|
||||
newListing1 := listing1 + "-new"
|
||||
filesNow1, err := b.makeListing(fctx, b.fs1, newListing1)
|
||||
if err == nil {
|
||||
err = b.checkListing(filesNow1, newListing1, "current Path1")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newListing2 := listing2 + "-new"
|
||||
filesNow2, err := b.makeListing(fctx, b.fs2, newListing2)
|
||||
if err == nil {
|
||||
err = b.checkListing(filesNow2, newListing2, "current Path2")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copy2to1 := []string{}
|
||||
for _, file := range filesNow2.list {
|
||||
if !filesNow1.has(file) {
|
||||
b.indent("Path2", file, "Resync will copy to Path1")
|
||||
copy2to1 = append(copy2to1, file)
|
||||
}
|
||||
}
|
||||
|
||||
if len(copy2to1) > 0 {
|
||||
b.indent("Path2", "Path1", "Resync is doing queued copies to")
|
||||
// octx does not have extra filters!
|
||||
err = b.fastCopy(octx, b.fs2, b.fs1, bilib.ToNames(copy2to1), "resync-copy2to1")
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resynching Path1 to Path2")
|
||||
ctxRun := b.opt.setDryRun(fctx)
|
||||
// fctx has our extra filters added!
|
||||
ctxSync, filterSync := filter.AddConfig(ctxRun)
|
||||
if filterSync.Opt.MinSize == -1 {
|
||||
// prevent overwriting Google Doc files (their size is -1)
|
||||
filterSync.Opt.MinSize = 0
|
||||
}
|
||||
if err = sync.CopyDir(ctxSync, b.fs2, b.fs1, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CreateEmptySrcDirs {
|
||||
// copy Path2 back to Path1, for empty dirs
|
||||
// the fastCopy above cannot include directories, because it relies on --files-from for filtering,
|
||||
// so instead we'll copy them here, relying on fctx for our filtering.
|
||||
|
||||
// This preserves the original resync order for backward compatibility. It is essentially:
|
||||
// rclone copy Path2 Path1 --ignore-existing
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
|
||||
// although if we were starting from scratch, it might be cleaner and faster to just do:
|
||||
// rclone copy Path2 Path1 --create-empty-src-dirs
|
||||
// rclone copy Path1 Path2 --create-empty-src-dirs
|
||||
|
||||
fs.Infof(nil, "Resynching Path2 to Path1 (for empty dirs)")
|
||||
|
||||
//note copy (not sync) and dst comes before src
|
||||
if err = sync.CopyDir(ctxSync, b.fs1, b.fs2, b.opt.CreateEmptySrcDirs); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
if _, err = b.makeListing(fctx, b.fs1, listing1); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = b.makeListing(fctx, b.fs2, listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.opt.NoCleanup {
|
||||
_ = os.Remove(newListing1)
|
||||
_ = os.Remove(newListing2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkSync validates listings
|
||||
func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
||||
files1, err := b.loadListing(listing1)
|
||||
@ -472,17 +473,22 @@ func (b *bisyncRun) checkSync(listing1, listing2 string) error {
|
||||
|
||||
ok := true
|
||||
for _, file := range files1.list {
|
||||
if !files2.has(file) {
|
||||
if !files2.has(file) && !files2.has(b.aliases.Alias(file)) {
|
||||
b.indent("ERROR", file, "Path1 file not found in Path2")
|
||||
ok = false
|
||||
} else {
|
||||
if !b.fileInfoEqual(file, files2.getTryAlias(file, b.aliases.Alias(file)), files1, files2) {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, file := range files2.list {
|
||||
if !files1.has(file) {
|
||||
if !files1.has(file) && !files1.has(b.aliases.Alias(file)) {
|
||||
b.indent("ERROR", file, "Path2 file not found in Path1")
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return errors.New("path1 and path2 are out of sync, run --resync to recover")
|
||||
}
|
||||
@ -498,6 +504,9 @@ func (b *bisyncRun) checkAccess(checkFiles1, checkFiles2 bilib.Names) error {
|
||||
numChecks1 := len(checkFiles1)
|
||||
numChecks2 := len(checkFiles2)
|
||||
if numChecks1 == 0 || numChecks1 != numChecks2 {
|
||||
if numChecks1 == 0 && numChecks2 == 0 {
|
||||
fs.Logf("--check-access", Color(terminal.RedFg, "Failed to find any files named %s\n More info: %s"), Color(terminal.CyanFg, opt.CheckFilename), Color(terminal.BlueFg, "https://rclone.org/bisync/#check-access"))
|
||||
}
|
||||
fs.Errorf(nil, "%s Path1 count %d, Path2 count %d - %s", prefix, numChecks1, numChecks2, opt.CheckFilename)
|
||||
ok = false
|
||||
}
|
||||
@ -522,3 +531,142 @@ func (b *bisyncRun) checkAccess(checkFiles1, checkFiles2 bilib.Names) error {
|
||||
fs.Infof(nil, "Found %d matching %q files on both paths", numChecks1, opt.CheckFilename)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) testFn() {
|
||||
if b.opt.TestFn != nil {
|
||||
b.opt.TestFn()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) {
|
||||
if err != nil {
|
||||
if retryable {
|
||||
b.retryable = true
|
||||
}
|
||||
if critical {
|
||||
b.critical = true
|
||||
b.abort = true
|
||||
fs.Errorf(o, "%s: %v", msg, err)
|
||||
} else {
|
||||
fs.Infof(o, "%s: %v", msg, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setBackupDir overrides --backup-dir with path-specific version, if set, in each direction
|
||||
func (b *bisyncRun) setBackupDir(ctx context.Context, destPath int) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.BackupDir = b.opt.OrigBackupDir
|
||||
if destPath == 1 && b.opt.BackupDir1 != "" {
|
||||
ci.BackupDir = b.opt.BackupDir1
|
||||
}
|
||||
if destPath == 2 && b.opt.BackupDir2 != "" {
|
||||
ci.BackupDir = b.opt.BackupDir2
|
||||
}
|
||||
fs.Debugf(ci.BackupDir, "updated backup-dir for Path%d", destPath)
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (b *bisyncRun) overlappingPathsCheck(fctx context.Context, fs1, fs2 fs.Fs) error {
|
||||
if operations.OverlappingFilterCheck(fctx, fs2, fs1) {
|
||||
err = fmt.Errorf(Color(terminal.RedFg, "Overlapping paths detected. Cannot bisync between paths that overlap, unless excluded by filters."))
|
||||
return err
|
||||
}
|
||||
// need to test our BackupDirs too, as sync will be fooled by our --files-from filters
|
||||
testBackupDir := func(ctx context.Context, destPath int) error {
|
||||
src := fs1
|
||||
dst := fs2
|
||||
if destPath == 1 {
|
||||
src = fs2
|
||||
dst = fs1
|
||||
}
|
||||
ctxBackupDir := b.setBackupDir(ctx, destPath)
|
||||
ci := fs.GetConfig(ctxBackupDir)
|
||||
if ci.BackupDir != "" {
|
||||
// operations.BackupDir should return an error if not properly excluded
|
||||
_, err = operations.BackupDir(fctx, dst, src, "")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = testBackupDir(fctx, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = testBackupDir(fctx, 2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) checkSyntax() error {
|
||||
// check for odd number of quotes in path, usually indicating an escaping issue
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
if strings.Count(path1, `"`)%2 != 0 || strings.Count(path2, `"`)%2 != 0 {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `detected an odd number of quotes in your path(s). This is usually a mistake indicating incorrect escaping.
|
||||
Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2)
|
||||
}
|
||||
// check for other syntax issues
|
||||
_, err = os.Stat(b.basePath)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "syntax is incorrect") {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `syntax error detected in your path(s). Please check your command and try again.
|
||||
Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v error: %v`), path1, path2, err)
|
||||
}
|
||||
}
|
||||
if runtime.GOOS == "windows" && (strings.Contains(path1, " --") || strings.Contains(path2, " --")) {
|
||||
return fmt.Errorf(Color(terminal.RedFg, `detected possible flags in your path(s). This is usually a mistake indicating incorrect escaping or quoting (possibly closing quote is missing?).
|
||||
Please check your command and try again. Note that on Windows, quoted paths must not have a trailing slash, or it will be interpreted as escaping the quote. path1: %v path2: %v`), path1, path2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) debug(nametocheck, msgiftrue string) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fs.Infof(Color(terminal.MagentaBg, "DEBUGNAME "+b.DebugName), Color(terminal.MagentaBg, msgiftrue))
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bisyncRun) debugFn(nametocheck string, fn func()) {
|
||||
if b.DebugName != "" && b.DebugName == nametocheck {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
||||
const individualWait = 1 * time.Second
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
fs.Infof(nil, Color(terminal.YellowFg, "%s: %vs"), msg, int(totalWait/individualWait)-i)
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// mainly to make sure tests don't interfere with each other when running more than one
|
||||
func resetGlobals() {
|
||||
downloadHash = false
|
||||
logger = operations.NewLoggerOpt()
|
||||
ignoreListingChecksum = false
|
||||
ignoreListingModtime = false
|
||||
hashTypes = nil
|
||||
queueCI = nil
|
||||
hashType = 0
|
||||
fsrc, fdst = nil, nil
|
||||
fcrypt = nil
|
||||
Opt = Options{}
|
||||
once = gosync.Once{}
|
||||
downloadHashWarn = gosync.Once{}
|
||||
firstDownloadHash = gosync.Once{}
|
||||
ls1 = newFileList()
|
||||
ls2 = newFileList()
|
||||
err = nil
|
||||
firstErr = nil
|
||||
marchCtx = nil
|
||||
}
|
||||
|
@ -2,67 +2,310 @@ package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
mutex "sync" // renamed as "sync" already in use
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fs/sync"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) error {
|
||||
// Results represents a pair of synced files, as reported by the LoggerFn
|
||||
// Bisync uses this to determine what happened during the sync, and modify the listings accordingly
|
||||
type Results struct {
|
||||
Src string
|
||||
Dst string
|
||||
Name string
|
||||
AltName string
|
||||
Size int64
|
||||
Modtime time.Time
|
||||
Hash string
|
||||
Flags string
|
||||
Sigil operations.Sigil
|
||||
Err error
|
||||
Winner operations.Winner
|
||||
IsWinner bool
|
||||
IsSrc bool
|
||||
IsDst bool
|
||||
Origin string
|
||||
}
|
||||
|
||||
// ResultsSlice is a slice of Results (obviously)
|
||||
type ResultsSlice []Results
|
||||
|
||||
func (rs *ResultsSlice) has(name string) bool {
|
||||
for _, r := range *rs {
|
||||
if r.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
logger = operations.NewLoggerOpt()
|
||||
lock mutex.Mutex
|
||||
once mutex.Once
|
||||
ignoreListingChecksum bool
|
||||
ignoreListingModtime bool
|
||||
hashTypes map[string]hash.Type
|
||||
queueCI *fs.ConfigInfo
|
||||
)
|
||||
|
||||
// allows us to get the right hashtype during the LoggerFn without knowing whether it's Path1/Path2
|
||||
func getHashType(fname string) hash.Type {
|
||||
ht, ok := hashTypes[fname]
|
||||
if ok {
|
||||
return ht
|
||||
}
|
||||
return hash.None
|
||||
}
|
||||
|
||||
// FsPathIfAny handles type assertions and returns a formatted bilib.FsPath if valid, otherwise ""
|
||||
func FsPathIfAny(x fs.DirEntry) string {
|
||||
obj, ok := x.(fs.Object)
|
||||
if x != nil && ok {
|
||||
return bilib.FsPath(obj.Fs())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func resultName(result Results, side, src, dst fs.DirEntry) string {
|
||||
if side != nil {
|
||||
return side.Remote()
|
||||
} else if result.IsSrc && dst != nil {
|
||||
return dst.Remote()
|
||||
} else if src != nil {
|
||||
return src.Remote()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// returns the opposite side's name, only if different
|
||||
func altName(name string, src, dst fs.DirEntry) string {
|
||||
if src != nil && dst != nil {
|
||||
if src.Remote() != dst.Remote() {
|
||||
switch name {
|
||||
case src.Remote():
|
||||
return dst.Remote()
|
||||
case dst.Remote():
|
||||
return src.Remote()
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// WriteResults is Bisync's LoggerFn
|
||||
func WriteResults(ctx context.Context, sigil operations.Sigil, src, dst fs.DirEntry, err error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
opt := operations.GetLoggerOpt(ctx)
|
||||
result := Results{
|
||||
Sigil: sigil,
|
||||
Src: FsPathIfAny(src),
|
||||
Dst: FsPathIfAny(dst),
|
||||
Err: err,
|
||||
Origin: "sync",
|
||||
}
|
||||
|
||||
result.Winner = operations.WinningSide(ctx, sigil, src, dst, err)
|
||||
|
||||
fss := []fs.DirEntry{src, dst}
|
||||
for i, side := range fss {
|
||||
|
||||
result.Name = resultName(result, side, src, dst)
|
||||
result.AltName = altName(result.Name, src, dst)
|
||||
result.IsSrc = i == 0
|
||||
result.IsDst = i == 1
|
||||
result.Flags = "-"
|
||||
if side != nil {
|
||||
result.Size = side.Size()
|
||||
if !ignoreListingModtime {
|
||||
result.Modtime = side.ModTime(ctx).In(TZ)
|
||||
}
|
||||
if !ignoreListingChecksum {
|
||||
sideObj, ok := side.(fs.ObjectInfo)
|
||||
if ok {
|
||||
result.Hash, _ = sideObj.Hash(ctx, getHashType(sideObj.Fs().Name()))
|
||||
result.Hash, _ = tryDownloadHash(ctx, sideObj, result.Hash)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
result.IsWinner = result.Winner.Obj == side
|
||||
|
||||
// used during resync only
|
||||
if err == fs.ErrorIsDir {
|
||||
if src != nil {
|
||||
result.Src = src.Remote()
|
||||
result.Name = src.Remote()
|
||||
} else {
|
||||
result.Dst = dst.Remote()
|
||||
result.Name = dst.Remote()
|
||||
}
|
||||
result.Flags = "d"
|
||||
result.Size = -1
|
||||
}
|
||||
|
||||
prettyprint(result, "writing result", fs.LogLevelDebug)
|
||||
if result.Size < 0 && result.Flags != "d" && ((queueCI.CheckSum && !downloadHash) || queueCI.SizeOnly) {
|
||||
once.Do(func() {
|
||||
fs.Logf(result.Name, Color(terminal.YellowFg, "Files of unknown size (such as Google Docs) do not sync reliably with --checksum or --size-only. Consider using modtime instead (the default) or --drive-skip-gdocs"))
|
||||
})
|
||||
}
|
||||
|
||||
err := json.NewEncoder(opt.JSON).Encode(result)
|
||||
if err != nil {
|
||||
fs.Errorf(result, "Error encoding JSON: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadResults decodes the JSON data from WriteResults
|
||||
func ReadResults(results io.Reader) []Results {
|
||||
dec := json.NewDecoder(results)
|
||||
var slice []Results
|
||||
for {
|
||||
var r Results
|
||||
if err := dec.Decode(&r); err == io.EOF {
|
||||
break
|
||||
}
|
||||
prettyprint(r, "result", fs.LogLevelDebug)
|
||||
slice = append(slice, r)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// for setup code shared by both fastCopy and resyncDir
|
||||
func (b *bisyncRun) preCopy(ctx context.Context) context.Context {
|
||||
queueCI = fs.GetConfig(ctx)
|
||||
ignoreListingChecksum = b.opt.IgnoreListingChecksum
|
||||
ignoreListingModtime = !b.opt.Compare.Modtime
|
||||
hashTypes = map[string]hash.Type{
|
||||
b.fs1.Name(): b.opt.Compare.HashType1,
|
||||
b.fs2.Name(): b.opt.Compare.HashType2,
|
||||
}
|
||||
logger.LoggerFn = WriteResults
|
||||
overridingEqual := false
|
||||
if (b.opt.Compare.Modtime && b.opt.Compare.Checksum) || b.opt.Compare.DownloadHash {
|
||||
overridingEqual = true
|
||||
fs.Debugf(nil, "overriding equal")
|
||||
// otherwise impossible in Sync, so override Equal
|
||||
ctx = b.EqualFn(ctx)
|
||||
}
|
||||
if b.opt.ResyncMode == PreferOlder || b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller {
|
||||
overridingEqual = true
|
||||
fs.Debugf(nil, "overriding equal")
|
||||
ctx = b.EqualFn(ctx)
|
||||
}
|
||||
ctxCopyLogger := operations.WithSyncLogger(ctx, logger)
|
||||
if b.opt.Compare.Checksum && (b.opt.Compare.NoSlowHash || b.opt.Compare.SlowHashSyncOnly) && b.opt.Compare.SlowHashDetected {
|
||||
// set here in case !b.opt.Compare.Modtime
|
||||
queueCI = fs.GetConfig(ctxCopyLogger)
|
||||
if b.opt.Compare.NoSlowHash {
|
||||
queueCI.CheckSum = false
|
||||
}
|
||||
if b.opt.Compare.SlowHashSyncOnly && !overridingEqual {
|
||||
queueCI.CheckSum = true
|
||||
}
|
||||
}
|
||||
return ctxCopyLogger
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string) ([]Results, error) {
|
||||
if b.InGracefulShutdown {
|
||||
return nil, nil
|
||||
}
|
||||
ctx = b.preCopy(ctx)
|
||||
if err := b.saveQueue(files, queueName); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctxCopy, filterCopy := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterCopy.AddFile(file); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
alias := b.aliases.Alias(file)
|
||||
if alias != file {
|
||||
if err := filterCopy.AddFile(alias); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sync.CopyDir(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
b.SyncCI = fs.GetConfig(ctxCopy) // allows us to request graceful shutdown
|
||||
accounting.MaxCompletedTransfers = -1 // we need a complete list in the event of graceful shutdown
|
||||
ctxCopy, b.CancelSync = context.WithCancel(ctxCopy)
|
||||
b.testFn()
|
||||
err := sync.Sync(ctxCopy, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
||||
|
||||
getResults := ReadResults(logger.JSON)
|
||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), queueName)
|
||||
|
||||
lineFormat := "%s %8d %s %s %s %q\n"
|
||||
for _, result := range getResults {
|
||||
fs.Debugf(nil, lineFormat, result.Flags, result.Size, result.Hash, "", result.Modtime, result.Name)
|
||||
}
|
||||
|
||||
return getResults, err
|
||||
}
|
||||
|
||||
func (b *bisyncRun) fastDelete(ctx context.Context, f fs.Fs, files bilib.Names, queueName string) error {
|
||||
if err := b.saveQueue(files, queueName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
transfers := fs.GetConfig(ctx).Transfers
|
||||
|
||||
ctxRun, filterDelete := filter.AddConfig(b.opt.setDryRun(ctx))
|
||||
|
||||
for _, file := range files.ToList() {
|
||||
if err := filterDelete.AddFile(file); err != nil {
|
||||
return err
|
||||
func (b *bisyncRun) retryFastCopy(ctx context.Context, fsrc, fdst fs.Fs, files bilib.Names, queueName string, results []Results, err error) ([]Results, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
if err != nil && b.opt.Resilient && !b.InGracefulShutdown && ci.Retries > 1 {
|
||||
for tries := 1; tries <= ci.Retries; tries++ {
|
||||
fs.Logf(queueName, Color(terminal.YellowFg, "Received error: %v - retrying as --resilient is set. Retry %d/%d"), err, tries, ci.Retries)
|
||||
accounting.GlobalStats().ResetErrors()
|
||||
if retryAfter := accounting.GlobalStats().RetryAfter(); !retryAfter.IsZero() {
|
||||
d := time.Until(retryAfter)
|
||||
if d > 0 {
|
||||
fs.Logf(nil, "Received retry after error - sleeping until %s (%v)", retryAfter.Format(time.RFC3339Nano), d)
|
||||
time.Sleep(d)
|
||||
}
|
||||
}
|
||||
if ci.RetriesInterval > 0 {
|
||||
naptime(ci.RetriesInterval)
|
||||
}
|
||||
results, err = b.fastCopy(ctx, fsrc, fdst, files, queueName)
|
||||
if err == nil || b.InGracefulShutdown {
|
||||
return results, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return results, err
|
||||
}
|
||||
|
||||
objChan := make(fs.ObjectsChan, transfers)
|
||||
errChan := make(chan error, 1)
|
||||
go func() {
|
||||
errChan <- operations.DeleteFiles(ctxRun, objChan)
|
||||
}()
|
||||
err := operations.ListFn(ctxRun, f, func(obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
if files.Has(remote) {
|
||||
objChan <- obj
|
||||
}
|
||||
})
|
||||
close(objChan)
|
||||
opErr := <-errChan
|
||||
if err == nil {
|
||||
err = opErr
|
||||
}
|
||||
return err
|
||||
func (b *bisyncRun) resyncDir(ctx context.Context, fsrc, fdst fs.Fs) ([]Results, error) {
|
||||
ctx = b.preCopy(ctx)
|
||||
|
||||
err := sync.CopyDir(ctx, fdst, fsrc, b.opt.CreateEmptySrcDirs)
|
||||
prettyprint(logger, "logger", fs.LogLevelDebug)
|
||||
|
||||
getResults := ReadResults(logger.JSON)
|
||||
fs.Debugf(nil, "Got %v results for %v", len(getResults), "resync")
|
||||
|
||||
return getResults, err
|
||||
}
|
||||
|
||||
// operation should be "make" or "remove"
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, operation string) {
|
||||
func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bilib.Names, dirsList *fileList, results *[]Results, operation string) {
|
||||
if b.InGracefulShutdown {
|
||||
return
|
||||
}
|
||||
fs.Debugf(nil, "syncing empty dirs")
|
||||
if b.opt.CreateEmptySrcDirs && (!b.opt.Resync || operation == "make") {
|
||||
|
||||
candidatesList := candidates.ToList()
|
||||
@ -73,18 +316,52 @@ func (b *bisyncRun) syncEmptyDirs(ctx context.Context, dst fs.Fs, candidates bil
|
||||
|
||||
for _, s := range candidatesList {
|
||||
var direrr error
|
||||
if dirsList.has(s) { //make sure it's a dir, not a file
|
||||
if dirsList.has(s) { // make sure it's a dir, not a file
|
||||
r := Results{}
|
||||
r.Name = s
|
||||
r.Size = -1
|
||||
r.Modtime = dirsList.getTime(s).In(time.UTC)
|
||||
r.Flags = "d"
|
||||
r.Err = nil
|
||||
r.Origin = "syncEmptyDirs"
|
||||
r.Winner = operations.Winner{ // note: Obj not set
|
||||
Side: "src",
|
||||
Err: nil,
|
||||
}
|
||||
|
||||
rSrc := r
|
||||
rDst := r
|
||||
rSrc.IsSrc = true
|
||||
rSrc.IsDst = false
|
||||
rDst.IsSrc = false
|
||||
rDst.IsDst = true
|
||||
rSrc.IsWinner = true
|
||||
rDst.IsWinner = false
|
||||
|
||||
if operation == "remove" {
|
||||
//note: we need to use Rmdirs instead of Rmdir because directories will fail to delete if they have other empty dirs inside of them.
|
||||
direrr = operations.Rmdirs(ctx, dst, s, false)
|
||||
// directories made empty by the sync will have already been deleted during the sync
|
||||
// this just catches the already-empty ones (excluded from sync by --files-from filter)
|
||||
direrr = operations.TryRmdir(ctx, dst, s)
|
||||
rSrc.Sigil = operations.MissingOnSrc
|
||||
rDst.Sigil = operations.MissingOnSrc
|
||||
rSrc.Dst = s
|
||||
rDst.Dst = s
|
||||
rSrc.Winner.Side = "none"
|
||||
rDst.Winner.Side = "none"
|
||||
} else if operation == "make" {
|
||||
direrr = operations.Mkdir(ctx, dst, s)
|
||||
rSrc.Sigil = operations.MissingOnDst
|
||||
rDst.Sigil = operations.MissingOnDst
|
||||
rSrc.Src = s
|
||||
rDst.Src = s
|
||||
} else {
|
||||
direrr = fmt.Errorf("invalid operation. Expected 'make' or 'remove', received '%q'", operation)
|
||||
}
|
||||
|
||||
if direrr != nil {
|
||||
fs.Debugf(nil, "Error syncing directory: %v", direrr)
|
||||
} else {
|
||||
*results = append(*results, rSrc, rDst)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -98,3 +375,16 @@ func (b *bisyncRun) saveQueue(files bilib.Names, jobName string) error {
|
||||
queueFile := fmt.Sprintf("%s.%s.que", b.basePath, jobName)
|
||||
return files.Save(queueFile)
|
||||
}
|
||||
|
||||
func naptime(totalWait time.Duration) {
|
||||
expireTime := time.Now().Add(totalWait)
|
||||
fs.Logf(nil, "will retry in %v at %v", totalWait, expireTime.Format("2006-01-02 15:04:05 MST"))
|
||||
for i := 0; time.Until(expireTime) > 0; i++ {
|
||||
if i > 0 && i%10 == 0 {
|
||||
fs.Infof(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second))
|
||||
} else {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "retrying in %v..."), time.Until(expireTime).Round(1*time.Second))
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,12 @@ func rcBisync(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
if opt.Workdir, err = in.GetString("workdir"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.BackupDir1, err = in.GetString("backupdir1"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
if opt.BackupDir2, err = in.GetString("backupdir2"); rc.NotErrParamNotFound(err) {
|
||||
return
|
||||
}
|
||||
|
||||
checkSync, err := in.GetString("checkSync")
|
||||
if rc.NotErrParamNotFound(err) {
|
||||
|
450
cmd/bisync/resolve.go
Normal file
450
cmd/bisync/resolve.go
Normal file
@ -0,0 +1,450 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"mime"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// Prefer describes strategies for resolving sync conflicts
|
||||
type Prefer = fs.Enum[preferChoices]
|
||||
|
||||
// Supported --conflict-resolve strategies
|
||||
const (
|
||||
PreferNone Prefer = iota
|
||||
PreferPath1
|
||||
PreferPath2
|
||||
PreferNewer
|
||||
PreferOlder
|
||||
PreferLarger
|
||||
PreferSmaller
|
||||
)
|
||||
|
||||
type preferChoices struct{}
|
||||
|
||||
func (preferChoices) Choices() []string {
|
||||
return []string{
|
||||
PreferNone: "none",
|
||||
PreferNewer: "newer",
|
||||
PreferOlder: "older",
|
||||
PreferLarger: "larger",
|
||||
PreferSmaller: "smaller",
|
||||
PreferPath1: "path1",
|
||||
PreferPath2: "path2",
|
||||
}
|
||||
}
|
||||
|
||||
func (preferChoices) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// ConflictResolveList is a list of --conflict-resolve flag choices used in the help
|
||||
var ConflictResolveList = Opt.ConflictResolve.Help()
|
||||
|
||||
// ConflictLoserAction describes possible actions to take on the loser of a sync conflict
|
||||
type ConflictLoserAction = fs.Enum[conflictLoserChoices]
|
||||
|
||||
// Supported --conflict-loser actions
|
||||
const (
|
||||
ConflictLoserSkip ConflictLoserAction = iota // Reserved as zero but currently unused
|
||||
ConflictLoserNumber // file.conflict1, file.conflict2, file.conflict3, etc.
|
||||
ConflictLoserPathname // file.path1, file.path2
|
||||
ConflictLoserDelete // delete the loser, keep winner only
|
||||
)
|
||||
|
||||
type conflictLoserChoices struct{}
|
||||
|
||||
func (conflictLoserChoices) Choices() []string {
|
||||
return []string{
|
||||
ConflictLoserNumber: "num",
|
||||
ConflictLoserPathname: "pathname",
|
||||
ConflictLoserDelete: "delete",
|
||||
}
|
||||
}
|
||||
|
||||
func (conflictLoserChoices) Type() string {
|
||||
return "ConflictLoserAction"
|
||||
}
|
||||
|
||||
// ConflictLoserList is a list of --conflict-loser flag choices used in the help
|
||||
var ConflictLoserList = Opt.ConflictLoser.Help()
|
||||
|
||||
func (b *bisyncRun) setResolveDefaults(ctx context.Context) error {
|
||||
if b.opt.ConflictLoser == ConflictLoserSkip {
|
||||
b.opt.ConflictLoser = ConflictLoserNumber
|
||||
}
|
||||
if b.opt.ConflictSuffixFlag == "" {
|
||||
b.opt.ConflictSuffixFlag = "conflict"
|
||||
}
|
||||
suffixes := strings.Split(b.opt.ConflictSuffixFlag, ",")
|
||||
if len(suffixes) == 1 {
|
||||
b.opt.ConflictSuffix1 = suffixes[0]
|
||||
b.opt.ConflictSuffix2 = suffixes[0]
|
||||
} else if len(suffixes) == 2 {
|
||||
b.opt.ConflictSuffix1 = suffixes[0]
|
||||
b.opt.ConflictSuffix2 = suffixes[1]
|
||||
} else {
|
||||
return fmt.Errorf("--conflict-suffix cannot have more than 2 comma-separated values. Received %v: %v", len(suffixes), suffixes)
|
||||
}
|
||||
// replace glob variables, if any
|
||||
t := time.Now() // capture static time here so it is the same for all files throughout this run
|
||||
b.opt.ConflictSuffix1 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix1, t)
|
||||
b.opt.ConflictSuffix2 = bilib.AppyTimeGlobs(b.opt.ConflictSuffix2, t)
|
||||
|
||||
// append dot (intentionally allow more than one)
|
||||
b.opt.ConflictSuffix1 = "." + b.opt.ConflictSuffix1
|
||||
b.opt.ConflictSuffix2 = "." + b.opt.ConflictSuffix2
|
||||
|
||||
// checks and warnings
|
||||
if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as at least one remote does not support modtimes."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
} else if (b.opt.ConflictResolve == PreferNewer || b.opt.ConflictResolve == PreferOlder) && !b.opt.Compare.Modtime {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include modtime."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
}
|
||||
if (b.opt.ConflictResolve == PreferLarger || b.opt.ConflictResolve == PreferSmaller) && !b.opt.Compare.Size {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --conflict-resolve %s as --compare does not include size."), b.opt.ConflictResolve.String())
|
||||
b.opt.ConflictResolve = PreferNone
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type renames map[string]renamesInfo // [originalName]newName (remember the originalName may have an alias)
|
||||
// the newName may be the same as the old name (if winner), but should not be blank, unless we're deleting.
|
||||
// the oldNames may not match each other, if we're normalizing case or unicode
|
||||
// all names should be "remotes" (relative names, without base path)
|
||||
type renamesInfo struct {
|
||||
path1 namePair
|
||||
path2 namePair
|
||||
}
|
||||
type namePair struct {
|
||||
oldName string
|
||||
newName string
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resolve(ctxMove context.Context, path1, path2, file, alias string, renameSkipped, copy1to2, copy2to1 *bilib.Names, ds1, ds2 *deltaSet) error {
|
||||
winningPath := 0
|
||||
if b.opt.ConflictResolve != PreferNone {
|
||||
winningPath = b.conflictWinner(ds1, ds2, file, alias)
|
||||
if winningPath > 0 {
|
||||
fs.Infof(file, Color(terminal.GreenFg, "The winner is: Path%d"), winningPath)
|
||||
} else {
|
||||
fs.Infof(file, Color(terminal.RedFg, "A winner could not be determined."))
|
||||
}
|
||||
}
|
||||
|
||||
suff1 := b.opt.ConflictSuffix1 // copy to new var to make sure our changes here don't persist
|
||||
suff2 := b.opt.ConflictSuffix2
|
||||
if b.opt.ConflictLoser == ConflictLoserPathname && b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 {
|
||||
// numerate, but not if user supplied two different suffixes
|
||||
suff1 += "1"
|
||||
suff2 += "2"
|
||||
}
|
||||
|
||||
r := renamesInfo{
|
||||
path1: namePair{
|
||||
oldName: file,
|
||||
newName: SuffixName(ctxMove, file, suff1),
|
||||
},
|
||||
path2: namePair{
|
||||
oldName: alias,
|
||||
newName: SuffixName(ctxMove, alias, suff2),
|
||||
},
|
||||
}
|
||||
|
||||
// handle auto-numbering
|
||||
// note that we still queue copies for both files, whether or not we renamed
|
||||
// we also set these for ConflictLoserDelete in case there is no winner.
|
||||
if b.opt.ConflictLoser == ConflictLoserNumber || b.opt.ConflictLoser == ConflictLoserDelete {
|
||||
num := b.numerate(ctxMove, 1, file, alias)
|
||||
switch winningPath {
|
||||
case 1: // keep path1, rename path2
|
||||
r.path1.newName = r.path1.oldName
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num))
|
||||
case 2: // keep path2, rename path1
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
r.path2.newName = r.path2.oldName
|
||||
default: // no winner, so rename both to different numbers (unless suffixes are already different)
|
||||
if b.opt.ConflictSuffix1 == b.opt.ConflictSuffix2 {
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
// let's just make sure num + 1 is available...
|
||||
num2 := b.numerate(ctxMove, num+1, file, alias)
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num2))
|
||||
} else {
|
||||
// suffixes are different, so numerate independently
|
||||
num = b.numerateSingle(ctxMove, 1, file, alias, 1)
|
||||
r.path1.newName = SuffixName(ctxMove, r.path1.oldName, b.opt.ConflictSuffix1+fmt.Sprint(num))
|
||||
num = b.numerateSingle(ctxMove, 1, file, alias, 2)
|
||||
r.path2.newName = SuffixName(ctxMove, r.path2.oldName, b.opt.ConflictSuffix2+fmt.Sprint(num))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// when winningPath == 0 (no winner), we ignore settings and rename both, do not delete
|
||||
// note also that deletes and renames are mutually exclusive -- we never delete one path and rename the other.
|
||||
if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 1 {
|
||||
// delete 2, copy 1 to 2
|
||||
err = b.delete(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.path2.newName = ""
|
||||
// copy the one that wasn't deleted
|
||||
b.indent("Path1", r.path1.oldName, "Queue copy to Path2")
|
||||
copy1to2.Add(r.path1.oldName)
|
||||
} else if b.opt.ConflictLoser == ConflictLoserDelete && winningPath == 2 {
|
||||
// delete 1, copy 2 to 1
|
||||
err = b.delete(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.path1.newName = ""
|
||||
// copy the one that wasn't deleted
|
||||
b.indent("Path2", r.path2.oldName, "Queue copy to Path1")
|
||||
copy2to1.Add(r.path2.oldName)
|
||||
} else {
|
||||
err = b.rename(ctxMove, r.path1, path1, path2, b.fs1, 1, 2, winningPath, copy1to2, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.rename(ctxMove, r.path2, path2, path1, b.fs2, 2, 1, winningPath, copy2to1, renameSkipped)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
b.renames[r.path1.oldName] = r // note map index is path1's oldName, which may be different from path2 if aliases
|
||||
return nil
|
||||
}
|
||||
|
||||
// SuffixName adds the current --conflict-suffix to the remote, obeying
|
||||
// --suffix-keep-extension if set
|
||||
// It is a close cousin of operations.SuffixName, but we don't want to
|
||||
// use ci.Suffix for this because it might be used for --backup-dir.
|
||||
func SuffixName(ctx context.Context, remote, suffix string) string {
|
||||
if suffix == "" {
|
||||
return remote
|
||||
}
|
||||
ci := fs.GetConfig(ctx)
|
||||
if ci.SuffixKeepExtension {
|
||||
var (
|
||||
base = remote
|
||||
exts = ""
|
||||
first = true
|
||||
ext = path.Ext(remote)
|
||||
)
|
||||
for ext != "" {
|
||||
// Look second and subsequent extensions in mime types.
|
||||
// If they aren't found then don't keep it as an extension.
|
||||
if !first && mime.TypeByExtension(ext) == "" {
|
||||
break
|
||||
}
|
||||
base = base[:len(base)-len(ext)]
|
||||
exts = ext + exts
|
||||
first = false
|
||||
ext = path.Ext(base)
|
||||
}
|
||||
return base + suffix + exts
|
||||
}
|
||||
return remote + suffix
|
||||
}
|
||||
|
||||
// NotEmpty checks whether set is not empty
|
||||
func (r renames) NotEmpty() bool {
|
||||
return len(r) > 0
|
||||
}
|
||||
|
||||
func (ri *renamesInfo) getNames(is1to2 bool) (srcOldName, srcNewName, dstOldName, dstNewName string) {
|
||||
if is1to2 {
|
||||
return ri.path1.oldName, ri.path1.newName, ri.path2.oldName, ri.path2.newName
|
||||
}
|
||||
return ri.path2.oldName, ri.path2.newName, ri.path1.oldName, ri.path1.newName
|
||||
}
|
||||
|
||||
// work out the lowest number that niether side has, return it for suffix
|
||||
func (b *bisyncRun) numerate(ctx context.Context, startnum int, file, alias string) int {
|
||||
for i := startnum; i < math.MaxInt; i++ {
|
||||
iStr := fmt.Sprint(i)
|
||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) {
|
||||
// make sure it still holds true with suffixes switched (it should)
|
||||
if !ls1.has(SuffixName(ctx, file, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls1.has(SuffixName(ctx, alias, b.opt.ConflictSuffix2+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, file, b.opt.ConflictSuffix1+iStr)) &&
|
||||
!ls2.has(SuffixName(ctx, alias, b.opt.ConflictSuffix1+iStr)) {
|
||||
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
||||
}
|
||||
|
||||
// like numerate, but consider only one side's suffix (for when suffixes are different)
|
||||
func (b *bisyncRun) numerateSingle(ctx context.Context, startnum int, file, alias string, path int) int {
|
||||
lsA, lsB := ls1, ls2
|
||||
suffix := b.opt.ConflictSuffix1
|
||||
if path == 2 {
|
||||
lsA, lsB = ls2, ls1
|
||||
suffix = b.opt.ConflictSuffix2
|
||||
}
|
||||
for i := startnum; i < math.MaxInt; i++ {
|
||||
iStr := fmt.Sprint(i)
|
||||
if !lsA.has(SuffixName(ctx, file, suffix+iStr)) &&
|
||||
!lsA.has(SuffixName(ctx, alias, suffix+iStr)) &&
|
||||
!lsB.has(SuffixName(ctx, file, suffix+iStr)) &&
|
||||
!lsB.has(SuffixName(ctx, alias, suffix+iStr)) {
|
||||
fs.Debugf(file, "The first available suffix is: %s", iStr)
|
||||
return i
|
||||
}
|
||||
}
|
||||
return 0 // not really possible, as no one has 9223372036854775807 conflicts, and if they do, they have bigger problems
|
||||
}
|
||||
|
||||
func (b *bisyncRun) rename(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum, winningPath int, q, renameSkipped *bilib.Names) error {
|
||||
if winningPath == thisPathNum {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Not renaming Path%d copy, as it was determined the winner", thisPathNum))
|
||||
} else {
|
||||
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "rename")
|
||||
if !skip {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.newName, fmt.Sprintf("Renaming Path%d copy", thisPathNum))
|
||||
ctx = b.setBackupDir(ctx, thisPathNum) // in case already a file with new name
|
||||
if err = operations.MoveFile(ctx, thisFs, thisFs, thisNamePair.newName, thisNamePair.oldName); err != nil {
|
||||
err = fmt.Errorf("%s rename failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err)
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality)
|
||||
}
|
||||
}
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thatPath+thisNamePair.newName, fmt.Sprintf("Queue copy to Path%d", thatPathNum))
|
||||
q.Add(thisNamePair.newName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) delete(ctx context.Context, thisNamePair namePair, thisPath, thatPath string, thisFs fs.Fs, thisPathNum, thatPathNum int, renameSkipped *bilib.Names) error {
|
||||
skip := operations.SkipDestructive(ctx, thisNamePair.oldName, "delete")
|
||||
if !skip {
|
||||
b.indent(fmt.Sprintf("!Path%d", thisPathNum), thisPath+thisNamePair.oldName, fmt.Sprintf("Deleting Path%d copy", thisPathNum))
|
||||
ctx = b.setBackupDir(ctx, thisPathNum)
|
||||
ci := fs.GetConfig(ctx)
|
||||
var backupDir fs.Fs
|
||||
if ci.BackupDir != "" {
|
||||
backupDir, err = operations.BackupDir(ctx, thisFs, thisFs, thisNamePair.oldName)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
obj, err := thisFs.NewObject(ctx, thisNamePair.oldName)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
if err = operations.DeleteFileWithBackupDir(ctx, obj, backupDir); err != nil {
|
||||
err = fmt.Errorf("%s delete failed for %s: %w", thisPath, thisPath+thisNamePair.oldName, err)
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
renameSkipped.Add(thisNamePair.oldName) // (due to dry-run, not equality)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bisyncRun) conflictWinner(ds1, ds2 *deltaSet, remote1, remote2 string) int {
|
||||
switch b.opt.ConflictResolve {
|
||||
case PreferPath1:
|
||||
return 1
|
||||
case PreferPath2:
|
||||
return 2
|
||||
case PreferNewer, PreferOlder:
|
||||
t1, t2 := ds1.time[remote1], ds2.time[remote2]
|
||||
return b.resolveNewerOlder(t1, t2, remote1, remote2, b.opt.ConflictResolve)
|
||||
case PreferLarger, PreferSmaller:
|
||||
s1, s2 := ds1.size[remote1], ds2.size[remote2]
|
||||
return b.resolveLargerSmaller(s1, s2, remote1, remote2, b.opt.ConflictResolve)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// returns the winning path number, or 0 if winner can't be determined
|
||||
func (b *bisyncRun) resolveNewerOlder(t1, t2 time.Time, remote1, remote2 string, prefer Prefer) int {
|
||||
if fs.GetModifyWindow(b.octx, b.fs1, b.fs2) == fs.ModTimeNotSupported {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one path lacks modtime support.")
|
||||
return 0
|
||||
}
|
||||
if t1.IsZero() || t2.IsZero() {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one modtime is missing. Path1: %v, Path2: %v", t1, t2)
|
||||
return 0
|
||||
}
|
||||
if t1.After(t2) {
|
||||
if prefer == PreferNewer {
|
||||
fs.Infof(remote1, "Path1 is newer. Path1: %v, Path2: %v, Difference: %s", t1, t2, t1.Sub(t2))
|
||||
return 1
|
||||
} else if prefer == PreferOlder {
|
||||
fs.Infof(remote1, "Path2 is older. Path1: %v, Path2: %v, Difference: %s", t1, t2, t1.Sub(t2))
|
||||
return 2
|
||||
}
|
||||
} else if t1.Before(t2) {
|
||||
if prefer == PreferNewer {
|
||||
fs.Infof(remote1, "Path2 is newer. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 2
|
||||
} else if prefer == PreferOlder {
|
||||
fs.Infof(remote1, "Path1 is older. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if t1.Equal(t2) {
|
||||
fs.Infof(remote1, "Winner cannot be determined as times are equal. Path1: %v, Path2: %v, Difference: %s", t1, t2, t2.Sub(t1))
|
||||
return 0
|
||||
}
|
||||
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", t1, t2) // shouldn't happen unless prefer is of wrong type
|
||||
return 0
|
||||
}
|
||||
|
||||
// returns the winning path number, or 0 if winner can't be determined
|
||||
func (b *bisyncRun) resolveLargerSmaller(s1, s2 int64, remote1, remote2 string, prefer Prefer) int {
|
||||
if s1 < 0 || s2 < 0 {
|
||||
fs.Infof(remote1, "Winner cannot be determined as at least one size is unknown. Path1: %v, Path2: %v", s1, s2)
|
||||
return 0
|
||||
}
|
||||
if s1 > s2 {
|
||||
if prefer == PreferLarger {
|
||||
fs.Infof(remote1, "Path1 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 1
|
||||
} else if prefer == PreferSmaller {
|
||||
fs.Infof(remote1, "Path2 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 2
|
||||
}
|
||||
} else if s1 < s2 {
|
||||
if prefer == PreferLarger {
|
||||
fs.Infof(remote1, "Path2 is larger. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1)
|
||||
return 2
|
||||
} else if prefer == PreferSmaller {
|
||||
fs.Infof(remote1, "Path1 is smaller. Path1: %v, Path2: %v, Difference: %v", s1, s2, s2-s1)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if s1 == s2 {
|
||||
fs.Infof(remote1, "Winner cannot be determined as sizes are equal. Path1: %v, Path2: %v, Difference: %v", s1, s2, s1-s2)
|
||||
return 0
|
||||
}
|
||||
fs.Errorf(remote1, "Winner cannot be determined. Path1: %v, Path2: %v", s1, s2) // shouldn't happen unless prefer is of wrong type
|
||||
return 0
|
||||
}
|
226
cmd/bisync/resync.go
Normal file
226
cmd/bisync/resync.go
Normal file
@ -0,0 +1,226 @@
|
||||
package bisync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rclone/rclone/cmd/bisync/bilib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
)
|
||||
|
||||
// for backward compatibility, --resync is now equivalent to --resync-mode path1
|
||||
// and either flag is sufficient without the other.
|
||||
func (b *bisyncRun) setResyncDefaults() {
|
||||
if b.opt.Resync && b.opt.ResyncMode == PreferNone {
|
||||
fs.Debugf(nil, Color(terminal.Dim, "defaulting to --resync-mode path1 as --resync is set"))
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if b.opt.ResyncMode != PreferNone {
|
||||
b.opt.Resync = true
|
||||
Opt.Resync = true // shouldn't be using this one, but set to be safe
|
||||
}
|
||||
|
||||
// checks and warnings
|
||||
if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && (b.fs1.Precision() == fs.ModTimeNotSupported || b.fs2.Precision() == fs.ModTimeNotSupported) {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as at least one remote does not support modtimes."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
} else if (b.opt.ResyncMode == PreferNewer || b.opt.ResyncMode == PreferOlder) && !b.opt.Compare.Modtime {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include modtime."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
if (b.opt.ResyncMode == PreferLarger || b.opt.ResyncMode == PreferSmaller) && !b.opt.Compare.Size {
|
||||
fs.Logf(nil, Color(terminal.YellowFg, "WARNING: ignoring --resync-mode %s as --compare does not include size."), b.opt.ResyncMode.String())
|
||||
b.opt.ResyncMode = PreferPath1
|
||||
}
|
||||
}
|
||||
|
||||
// resync implements the --resync mode.
|
||||
// It will generate path1 and path2 listings,
|
||||
// copy any unique files to the opposite path,
|
||||
// and resolve any differing files according to the --resync-mode.
|
||||
func (b *bisyncRun) resync(octx, fctx context.Context) error {
|
||||
fs.Infof(nil, "Copying Path2 files to Path1")
|
||||
|
||||
// Save blank filelists (will be filled from sync results)
|
||||
var ls1 = newFileList()
|
||||
var ls2 = newFileList()
|
||||
err = ls1.save(fctx, b.newListing1)
|
||||
if err != nil {
|
||||
b.handleErr(ls1, "error saving ls1 from resync", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
err = ls2.save(fctx, b.newListing2)
|
||||
if err != nil {
|
||||
b.handleErr(ls2, "error saving ls2 from resync", err, true, true)
|
||||
b.abort = true
|
||||
}
|
||||
|
||||
// Check access health on the Path1 and Path2 filesystems
|
||||
// enforce even though this is --resync
|
||||
if b.opt.CheckAccess {
|
||||
fs.Infof(nil, "Checking access health")
|
||||
|
||||
filesNow1, filesNow2, err := b.findCheckFiles(fctx)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
|
||||
ds1 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
ds2 := &deltaSet{
|
||||
checkFiles: bilib.Names{},
|
||||
}
|
||||
|
||||
for _, file := range filesNow1.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds1.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range filesNow2.list {
|
||||
if filepath.Base(file) == b.opt.CheckFilename {
|
||||
ds2.checkFiles.Add(file)
|
||||
}
|
||||
}
|
||||
|
||||
err = b.checkAccess(ds1.checkFiles, ds2.checkFiles)
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var results2to1 []Results
|
||||
var results1to2 []Results
|
||||
queues := queues{}
|
||||
|
||||
b.indent("Path2", "Path1", "Resync is copying files to")
|
||||
ctxRun := b.opt.setDryRun(fctx)
|
||||
// fctx has our extra filters added!
|
||||
ctxSync, filterSync := filter.AddConfig(ctxRun)
|
||||
if filterSync.Opt.MinSize == -1 {
|
||||
fs.Debugf(nil, "filterSync.Opt.MinSize: %v", filterSync.Opt.MinSize)
|
||||
}
|
||||
b.resyncIs1to2 = false
|
||||
ctxSync = b.setResyncConfig(ctxSync)
|
||||
ctxSync = b.setBackupDir(ctxSync, 1)
|
||||
// 2 to 1
|
||||
if results2to1, err = b.resyncDir(ctxSync, b.fs2, b.fs1); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
b.indent("Path1", "Path2", "Resync is copying files to")
|
||||
b.resyncIs1to2 = true
|
||||
ctxSync = b.setResyncConfig(ctxSync)
|
||||
ctxSync = b.setBackupDir(ctxSync, 2)
|
||||
// 1 to 2
|
||||
if results1to2, err = b.resyncDir(ctxSync, b.fs1, b.fs2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Infof(nil, "Resync updating listings")
|
||||
b.saveOldListings() // may not exist, as this is --resync
|
||||
b.replaceCurrentListings()
|
||||
|
||||
resultsToQueue := func(results []Results) bilib.Names {
|
||||
names := bilib.Names{}
|
||||
for _, result := range results {
|
||||
if result.Name != "" &&
|
||||
(result.Flags != "d" || b.opt.CreateEmptySrcDirs) &&
|
||||
result.IsSrc && result.Src != "" &&
|
||||
(result.Winner.Err == nil || result.Flags == "d") {
|
||||
names.Add(result.Name)
|
||||
}
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// resync 2to1
|
||||
queues.copy2to1 = resultsToQueue(results2to1)
|
||||
if err = b.modifyListing(fctx, b.fs2, b.fs1, results2to1, queues, false); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
// resync 1to2
|
||||
queues.copy1to2 = resultsToQueue(results1to2)
|
||||
if err = b.modifyListing(fctx, b.fs1, b.fs2, results1to2, queues, true); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
|
||||
if b.opt.CheckSync == CheckSyncTrue && !b.opt.DryRun {
|
||||
path1 := bilib.FsPath(b.fs1)
|
||||
path2 := bilib.FsPath(b.fs2)
|
||||
fs.Infof(nil, "Validating listings for Path1 %s vs Path2 %s", quotePath(path1), quotePath(path2))
|
||||
if err := b.checkSync(b.listing1, b.listing2); err != nil {
|
||||
b.critical = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !b.opt.NoCleanup {
|
||||
_ = os.Remove(b.newListing1)
|
||||
_ = os.Remove(b.newListing2)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
--resync-mode implementation:
|
||||
PreferPath1: set ci.IgnoreExisting true, then false
|
||||
PreferPath2: set ci.IgnoreExisting false, then true
|
||||
PreferNewer: set ci.UpdateOlder in both directions
|
||||
PreferOlder: override EqualFn to implement custom logic
|
||||
PreferLarger: override EqualFn to implement custom logic
|
||||
PreferSmaller: override EqualFn to implement custom logic
|
||||
*/
|
||||
func (b *bisyncRun) setResyncConfig(ctx context.Context) context.Context {
|
||||
ci := fs.GetConfig(ctx)
|
||||
switch b.opt.ResyncMode {
|
||||
case PreferPath1:
|
||||
if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first)
|
||||
ci.IgnoreExisting = true
|
||||
} else { // 1to2
|
||||
ci.IgnoreExisting = false
|
||||
}
|
||||
case PreferPath2:
|
||||
if !b.resyncIs1to2 { // 2to1 (remember 2to1 is first)
|
||||
ci.IgnoreExisting = false
|
||||
} else { // 1to2
|
||||
ci.IgnoreExisting = true
|
||||
}
|
||||
case PreferNewer:
|
||||
ci.UpdateOlder = true
|
||||
}
|
||||
// for older, larger, and smaller, we return it unchanged and handle it later
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (b *bisyncRun) resyncWhichIsWhich(src, dst fs.ObjectInfo) (path1, path2 fs.ObjectInfo) {
|
||||
if b.resyncIs1to2 {
|
||||
return src, dst
|
||||
}
|
||||
return dst, src
|
||||
}
|
||||
|
||||
// equal in this context really means "don't transfer", so we should
|
||||
// return true if the files are actually equal or if dest is winner,
|
||||
// false if src is winner
|
||||
// When can't determine, we end up running the normal Equal() to tie-break (due to our differ functions).
|
||||
func (b *bisyncRun) resyncWinningPathToEqual(winningPath int) bool {
|
||||
if b.resyncIs1to2 {
|
||||
return winningPath != 1
|
||||
}
|
||||
return winningPath != 2
|
||||
}
|
@ -1,9 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
@ -1,9 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
9
cmd/bisync/testdata/test_all_changed/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
9
cmd/bisync/testdata/test_all_changed/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
@ -1,9 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 - - 2004-01-02T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2004-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
@ -1,9 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 md5:294d25b294ff26a5243dba914ac3fbf7 - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 md5:d41d8cd98f00b204e9800998ecf8427e - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
||||
|
9
cmd/bisync/testdata/test_all_changed/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
9
cmd/bisync/testdata/test_all_changed/golden/_testdir_path1.._testdir_path2.path2.lst-old
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy2.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy3.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy4.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.copy5.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 0 - - 2005-01-02T00:00:00.000000000+0000 "subdir/file20.txt"
|
176
cmd/bisync/testdata/test_all_changed/golden/test.log
vendored
176
cmd/bisync/testdata/test_all_changed/golden/test.log
vendored
@ -1,90 +1,138 @@
|
||||
(01) : test all-changed
|
||||
[36m(01) :[0m [34mtest all-changed[0m
|
||||
|
||||
|
||||
(02) : test initial bisync
|
||||
(03) : bisync resync
|
||||
[36m(02) :[0m [34mtest initial bisync[0m
|
||||
[36m(03) :[0m [34mbisync resync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Copying unique Path2 files to Path1
|
||||
INFO : Resynching Path1 to Path2
|
||||
INFO : Copying Path2 files to Path1
|
||||
INFO : - [34mPath2[0m [35mResync is copying files to[0m - [36mPath1[0m
|
||||
INFO : - [36mPath1[0m [35mResync is copying files to[0m - [36mPath2[0m
|
||||
INFO : Resync updating listings
|
||||
INFO : Bisync successful
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
(04) : test change timestamp on all files except RCLONE_TEST
|
||||
(05) : touch-glob 2005-01-02 {path1/} file*
|
||||
(06) : touch-glob 2005-01-02 {path1/}subdir file*
|
||||
[36m(04) :[0m [34mtest change timestamp on all files except RCLONE_TEST[0m
|
||||
[36m(05) :[0m [34mtouch-glob 2005-01-02 {path1/} file*[0m
|
||||
[36m(06) :[0m [34mtouch-glob 2005-01-02 {path1/}subdir file*[0m
|
||||
|
||||
(07) : test sync should pass
|
||||
(08) : bisync
|
||||
[36m(07) :[0m [34mtest sync should pass[0m
|
||||
[36m(08) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File is newer - file1.copy1.txt
|
||||
INFO : - Path1 File is newer - file1.copy2.txt
|
||||
INFO : - Path1 File is newer - file1.copy3.txt
|
||||
INFO : - Path1 File is newer - file1.copy4.txt
|
||||
INFO : - Path1 File is newer - file1.copy5.txt
|
||||
INFO : - Path1 File is newer - file1.txt
|
||||
INFO : - Path1 File is newer - subdir/file20.txt
|
||||
INFO : Path1: 7 changes: 0 new, 7 newer, 0 older, 0 deleted
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 7 changes: [32m 0 new[0m, [33m 7 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 7 newer[0m, [34m 0 older[0m)
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy2.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy3.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy4.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy5.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
INFO : [32mBisync successful[0m
|
||||
|
||||
(09) : test change timestamp on all files including RCLONE_TEST
|
||||
(10) : touch-glob 2004-01-02 {path1/} *
|
||||
(11) : touch-glob 2004-01-02 {path1/}subdir *
|
||||
[36m(09) :[0m [34mtest change timestamp on all files including RCLONE_TEST[0m
|
||||
[36m(10) :[0m [34mtouch-glob 2004-01-02 {path1/} *[0m
|
||||
[36m(11) :[0m [34mtouch-glob 2004-01-02 {path1/}subdir *[0m
|
||||
|
||||
(12) : test sync should fail
|
||||
(13) : bisync
|
||||
[36m(12) :[0m [34mtest sync should fail[0m
|
||||
[36m(13) :[0m [34mbisync[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File is OLDER - file1.copy1.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy2.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy3.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy4.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy5.txt
|
||||
INFO : - Path1 File is OLDER - file1.txt
|
||||
INFO : - Path1 File is OLDER - subdir/file20.txt
|
||||
INFO : - Path1 File is newer - RCLONE_TEST
|
||||
INFO : Path1: 8 changes: 0 new, 1 newer, 7 older, 0 deleted
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mRCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 8 changes: [32m 0 new[0m, [33m 8 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 7 older[0m)
|
||||
INFO : Path2 checking for diffs
|
||||
ERROR : Safety abort: all files were changed on Path1 "{path1/}". Run with --force if desired.
|
||||
NOTICE: Bisync aborted. Please try again.
|
||||
NOTICE: [31mBisync aborted. Please try again.[0m
|
||||
Bisync error: all files were changed
|
||||
|
||||
(14) : test sync with force should pass
|
||||
(15) : bisync force
|
||||
[36m(14) :[0m [34mtest sync with force should pass[0m
|
||||
[36m(15) :[0m [34mbisync force[0m
|
||||
INFO : [2mSetting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.[0m
|
||||
INFO : Bisyncing with Comparison Settings:
|
||||
{
|
||||
"Modtime": true,
|
||||
"Size": true,
|
||||
"Checksum": false,
|
||||
"NoSlowHash": false,
|
||||
"SlowHashSyncOnly": false,
|
||||
"DownloadHash": false
|
||||
}
|
||||
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
|
||||
INFO : Building Path1 and Path2 listings
|
||||
INFO : Path1 checking for diffs
|
||||
INFO : - Path1 File is OLDER - file1.copy1.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy2.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy3.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy4.txt
|
||||
INFO : - Path1 File is OLDER - file1.copy5.txt
|
||||
INFO : - Path1 File is OLDER - file1.txt
|
||||
INFO : - Path1 File is OLDER - subdir/file20.txt
|
||||
INFO : - Path1 File is newer - RCLONE_TEST
|
||||
INFO : Path1: 8 changes: 0 new, 1 newer, 7 older, 0 deleted
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (newer)[0m[0m[0m - [36mRCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36mfile1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[33mFile changed: [35mtime (older)[0m[0m[0m - [36msubdir/file20.txt[0m
|
||||
INFO : Path1: 8 changes: [32m 0 new[0m, [33m 8 modified[0m, [31m 0 deleted[0m
|
||||
INFO : ([33mModified[0m: [36m 1 newer[0m, [34m 7 older[0m)
|
||||
INFO : Path2 checking for diffs
|
||||
INFO : Applying changes
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}RCLONE_TEST
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy2.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy3.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy4.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.copy5.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
|
||||
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
|
||||
INFO : - Path1 Do queued copies to - Path2
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}RCLONE_TEST[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy2.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy3.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy4.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.copy5.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}file1.txt[0m
|
||||
INFO : - [36mPath1[0m [35m[32mQueue copy to[0m Path2[0m - [36m{path2/}subdir/file20.txt[0m
|
||||
INFO : - [36mPath1[0m [35mDo queued copies to[0m - [36mPath2[0m
|
||||
INFO : Updating listings
|
||||
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
|
||||
INFO : Bisync successful
|
||||
INFO : [32mBisync successful[0m
|
||||
|
5
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.copy1to2.que
vendored
Normal file
5
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.copy1to2.que
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
"file11.txt"
|
||||
"file2.txt"
|
||||
"file4.txt"
|
||||
"file5.txt.conflict1"
|
||||
"file7.txt"
|
5
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
5
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.copy2to1.que
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
"file1.txt"
|
||||
"file10.txt"
|
||||
"file3.txt"
|
||||
"file5.txt.conflict2"
|
||||
"file6.txt"
|
1
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.delete1.que
vendored
Normal file
1
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.delete1.que
vendored
Normal file
@ -0,0 +1 @@
|
||||
"file3.txt"
|
@ -1,2 +1 @@
|
||||
"file2.txt"
|
||||
"file4.txt"
|
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-err
vendored
Normal file
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-err
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-new
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
10
cmd/bisync/testdata/test_backupdir/golden/_testdir_path1.._testdir_path2.path1.lst-old
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# bisync listing v1 from test
|
||||
- 109 - - 2000-01-01T00:00:00.000000000+0000 "RCLONE_TEST"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file1.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file10.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file11.txt"
|
||||
- 13 - - 2001-01-02T00:00:00.000000000+0000 "file2.txt"
|
||||
- 39 - - 2001-03-04T00:00:00.000000000+0000 "file5.txt.conflict1"
|
||||
- 39 - - 2001-01-02T00:00:00.000000000+0000 "file5.txt.conflict2"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file6.txt"
|
||||
- 19 - - 2001-01-02T00:00:00.000000000+0000 "file7.txt"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user