2022-08-28 13:21:57 +02:00
|
|
|
// Package union implements a virtual provider to join existing remotes.
|
2018-08-18 02:39:49 +02:00
|
|
|
package union
|
|
|
|
|
|
|
|
import (
|
2019-11-30 15:41:39 +01:00
|
|
|
"bufio"
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
"errors"
|
2018-08-18 02:39:49 +02:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"path"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
2019-11-30 15:41:39 +01:00
|
|
|
"sync"
|
2018-08-18 02:39:49 +02:00
|
|
|
"time"
|
|
|
|
|
2022-04-03 14:39:42 +02:00
|
|
|
"github.com/rclone/rclone/backend/union/common"
|
2019-11-30 15:41:39 +01:00
|
|
|
"github.com/rclone/rclone/backend/union/policy"
|
|
|
|
"github.com/rclone/rclone/backend/union/upstream"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2020-05-09 22:28:30 +02:00
|
|
|
"github.com/rclone/rclone/fs/operations"
|
2020-05-13 14:10:35 +02:00
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2018-08-18 02:39:49 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
|
|
|
fsi := &fs.RegInfo{
|
|
|
|
Name: "union",
|
2019-11-30 15:41:39 +01:00
|
|
|
Description: "Union merges the contents of several upstream fs",
|
2018-08-18 02:39:49 +02:00
|
|
|
NewFs: NewFs,
|
2022-06-29 15:29:15 +02:00
|
|
|
MetadataInfo: &fs.MetadataInfo{
|
|
|
|
Help: `Any metadata supported by the underlying remote is read and written.`,
|
|
|
|
},
|
2018-08-18 02:39:49 +02:00
|
|
|
Options: []fs.Option{{
|
2019-11-30 15:41:39 +01:00
|
|
|
Name: "upstreams",
|
2021-08-16 11:30:01 +02:00
|
|
|
Help: "List of space separated upstreams.\n\nCan be 'upstreama:test/dir upstreamb:', '\"upstreama:test/space:ro dir\" upstreamb:', etc.",
|
2018-08-18 02:39:49 +02:00
|
|
|
Required: true,
|
2019-11-30 15:41:39 +01:00
|
|
|
}, {
|
2021-08-22 19:07:36 +02:00
|
|
|
Name: "action_policy",
|
|
|
|
Help: "Policy to choose upstream on ACTION category.",
|
|
|
|
Default: "epall",
|
2019-11-30 15:41:39 +01:00
|
|
|
}, {
|
2021-08-22 19:07:36 +02:00
|
|
|
Name: "create_policy",
|
|
|
|
Help: "Policy to choose upstream on CREATE category.",
|
|
|
|
Default: "epmfs",
|
2019-11-30 15:41:39 +01:00
|
|
|
}, {
|
2021-08-22 19:07:36 +02:00
|
|
|
Name: "search_policy",
|
|
|
|
Help: "Policy to choose upstream on SEARCH category.",
|
|
|
|
Default: "ff",
|
2019-11-30 15:41:39 +01:00
|
|
|
}, {
|
2021-08-22 19:07:36 +02:00
|
|
|
Name: "cache_time",
|
|
|
|
Help: "Cache time of usage and free space (in seconds).\n\nThis option is only useful when a path preserving policy is used.",
|
|
|
|
Default: 120,
|
2022-04-04 10:55:05 +02:00
|
|
|
}, {
|
|
|
|
Name: "min_free_space",
|
|
|
|
Help: `Minimum viable free space for lfs/eplfs policies.
|
|
|
|
|
|
|
|
If a remote has less than this much free space then it won't be
|
|
|
|
considered for use in lfs or eplfs policies.`,
|
|
|
|
Advanced: true,
|
|
|
|
Default: fs.Gibi,
|
2018-08-18 02:39:49 +02:00
|
|
|
}},
|
|
|
|
}
|
|
|
|
fs.Register(fsi)
|
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
// Fs represents a union of upstreams
|
2018-08-18 02:39:49 +02:00
|
|
|
type Fs struct {
|
2019-11-30 15:41:39 +01:00
|
|
|
name string // name of this remote
|
|
|
|
features *fs.Features // optional features
|
2022-04-03 14:39:42 +02:00
|
|
|
opt common.Options // options for this Fs
|
2019-11-30 15:41:39 +01:00
|
|
|
root string // the path we are working on
|
|
|
|
upstreams []*upstream.Fs // slice of upstreams
|
|
|
|
hashSet hash.Set // intersection of hash types
|
|
|
|
actionPolicy policy.Policy // policy for ACTION
|
|
|
|
createPolicy policy.Policy // policy for CREATE
|
|
|
|
searchPolicy policy.Policy // policy for SEARCH
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
2020-05-25 08:05:53 +02:00
|
|
|
// Wrap candidate objects in to a union Object
|
2019-11-30 15:41:39 +01:00
|
|
|
func (f *Fs) wrapEntries(entries ...upstream.Entry) (entry, error) {
|
|
|
|
e, err := f.searchEntries(entries...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-06-24 16:01:19 +02:00
|
|
|
switch e := e.(type) {
|
2019-11-30 15:41:39 +01:00
|
|
|
case *upstream.Object:
|
|
|
|
return &Object{
|
2022-06-24 16:01:19 +02:00
|
|
|
Object: e,
|
2019-11-30 15:41:39 +01:00
|
|
|
fs: f,
|
|
|
|
co: entries,
|
|
|
|
}, nil
|
|
|
|
case *upstream.Directory:
|
|
|
|
return &Directory{
|
2022-06-24 16:01:19 +02:00
|
|
|
Directory: e,
|
2024-02-27 12:04:38 +01:00
|
|
|
fs: f,
|
2019-11-30 15:41:39 +01:00
|
|
|
cd: entries,
|
|
|
|
}, nil
|
|
|
|
default:
|
2021-11-04 11:12:57 +01:00
|
|
|
return nil, fmt.Errorf("unknown object type %T", e)
|
2018-10-14 16:19:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-18 02:39:49 +02:00
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
return fmt.Sprintf("union root '%s'", f.root)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir removes the root directory of the Fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2019-11-30 15:41:39 +01:00
|
|
|
upstreams, err := f.action(ctx, dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
err := upstreams[i].Rmdir(ctx, dir)
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
return errs.Err()
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2018-10-02 23:04:50 +02:00
|
|
|
return f.hashSet
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
2022-05-13 17:08:52 +02:00
|
|
|
// mkdir makes the directory passed in and returns the upstreams used
|
|
|
|
func (f *Fs) mkdir(ctx context.Context, dir string) ([]*upstream.Fs, error) {
|
2019-11-30 15:41:39 +01:00
|
|
|
upstreams, err := f.create(ctx, dir)
|
2020-10-25 17:02:52 +01:00
|
|
|
if err == fs.ErrorObjectNotFound {
|
2022-05-13 17:08:52 +02:00
|
|
|
parent := parentDir(dir)
|
|
|
|
if dir != parent {
|
|
|
|
upstreams, err = f.mkdir(ctx, parent)
|
2020-10-25 17:02:52 +01:00
|
|
|
} else if dir == "" {
|
|
|
|
// If root dirs not created then create them
|
|
|
|
upstreams, err = f.upstreams, nil
|
2019-11-30 15:41:39 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if err != nil {
|
2022-05-13 17:08:52 +02:00
|
|
|
return nil, err
|
2019-11-30 15:41:39 +01:00
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
err := upstreams[i].Mkdir(ctx, dir)
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
2022-05-13 17:08:52 +02:00
|
|
|
err = errs.Err()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-09 22:17:47 +02:00
|
|
|
// If created roots then choose one
|
|
|
|
if dir == "" {
|
|
|
|
upstreams, err = f.create(ctx, dir)
|
|
|
|
}
|
|
|
|
return upstreams, err
|
2022-05-13 17:08:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mkdir makes the root directory of the Fs object
|
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
|
|
|
_, err := f.mkdir(ctx, dir)
|
|
|
|
return err
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
2024-02-27 12:04:38 +01:00
|
|
|
// MkdirMetadata makes the root directory of the Fs object
|
|
|
|
func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) {
|
|
|
|
upstreams, err := f.create(ctx, dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
entries := make([]upstream.Entry, len(upstreams))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
u := upstreams[i]
|
|
|
|
if do := u.Features().MkdirMetadata; do != nil {
|
|
|
|
newDir, err := do(ctx, dir, metadata)
|
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
} else {
|
|
|
|
entries[i], err = u.WrapEntry(newDir)
|
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// Just do Mkdir on upstreams which don't support MkdirMetadata
|
|
|
|
err := u.Mkdir(ctx, dir)
|
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
err = errs.Err()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
entry, err := f.wrapEntries(entries...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
newDir, ok := entry.(fs.Directory)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("internal error: expecting %T to be an fs.Directory", entry)
|
|
|
|
}
|
|
|
|
return newDir, nil
|
|
|
|
}
|
|
|
|
|
2020-06-04 23:25:14 +02:00
|
|
|
// Purge all files in the directory
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
|
|
|
// Implement this if you have a way of deleting all the files
|
|
|
|
// quicker than just running Remove() on the result of List()
|
|
|
|
//
|
|
|
|
// Return an error if it doesn't exist
|
2020-06-04 23:25:14 +02:00
|
|
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, r := range f.upstreams {
|
|
|
|
if r.Features().Purge == nil {
|
|
|
|
return fs.ErrorCantPurge
|
|
|
|
}
|
|
|
|
}
|
|
|
|
upstreams, err := f.action(ctx, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
2020-06-04 23:25:14 +02:00
|
|
|
err := upstreams[i].Features().Purge(ctx, dir)
|
2021-11-04 11:12:57 +01:00
|
|
|
if errors.Is(err, fs.ErrorDirNotFound) {
|
2020-08-19 19:04:16 +02:00
|
|
|
err = nil
|
|
|
|
}
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
return errs.Err()
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
// Copy src to this remote using server-side copy operations.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// This is stored with the remote path given.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// It returns the destination Object and a possible error.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2019-11-30 15:41:39 +01:00
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
2018-10-02 23:04:50 +02:00
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
2022-06-29 15:29:15 +02:00
|
|
|
o := srcObj.UnWrapUpstream()
|
2020-05-09 22:28:30 +02:00
|
|
|
su := o.UpstreamFs()
|
|
|
|
if su.Features().Copy == nil {
|
2019-11-30 15:41:39 +01:00
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
2020-05-09 22:28:30 +02:00
|
|
|
var du *upstream.Fs
|
|
|
|
for _, u := range f.upstreams {
|
|
|
|
if operations.Same(u.RootFs, su.RootFs) {
|
|
|
|
du = u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if du == nil {
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
|
|
|
if !du.IsCreatable() {
|
2019-11-30 15:41:39 +01:00
|
|
|
return nil, fs.ErrorPermissionDenied
|
|
|
|
}
|
2020-05-09 22:28:30 +02:00
|
|
|
co, err := du.Features().Copy(ctx, o, remote)
|
2019-11-30 15:41:39 +01:00
|
|
|
if err != nil || co == nil {
|
2018-10-14 16:19:02 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-05-09 22:28:30 +02:00
|
|
|
wo, err := f.wrapEntries(du.WrapObject(co))
|
2019-11-30 15:41:39 +01:00
|
|
|
return wo.(*Object), err
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
// Move src to this remote using server-side move operations.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// This is stored with the remote path given.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
// It returns the destination Object and a possible error.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2019-11-30 15:41:39 +01:00
|
|
|
o, ok := src.(*Object)
|
|
|
|
if !ok {
|
2018-10-02 23:04:50 +02:00
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
entries, err := f.actionEntries(o.candidates()...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, e := range entries {
|
2021-09-30 12:11:46 +02:00
|
|
|
if !operations.CanServerSideMove(e.UpstreamFs()) {
|
2019-11-30 15:41:39 +01:00
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
}
|
|
|
|
}
|
|
|
|
objs := make([]*upstream.Object, len(entries))
|
|
|
|
errs := Errors(make([]error, len(entries)))
|
|
|
|
multithread(len(entries), func(i int) {
|
2020-05-09 22:28:30 +02:00
|
|
|
su := entries[i].UpstreamFs()
|
2019-11-30 15:41:39 +01:00
|
|
|
o, ok := entries[i].(*upstream.Object)
|
|
|
|
if !ok {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), fs.ErrorNotAFile)
|
2020-05-09 22:28:30 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
var du *upstream.Fs
|
|
|
|
for _, u := range f.upstreams {
|
|
|
|
if operations.Same(u.RootFs, su.RootFs) {
|
|
|
|
du = u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if du == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), remote, fs.ErrorCantMove)
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
|
|
|
}
|
2021-09-30 12:11:46 +02:00
|
|
|
srcObj := o.UnWrap()
|
|
|
|
duFeatures := du.Features()
|
|
|
|
do := duFeatures.Move
|
|
|
|
if duFeatures.Move == nil {
|
|
|
|
do = duFeatures.Copy
|
|
|
|
}
|
|
|
|
// Do the Move or Copy
|
|
|
|
dstObj, err := do(ctx, srcObj, remote)
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
|
|
|
}
|
2021-11-09 14:00:51 +01:00
|
|
|
if dstObj == nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: destination object not found", su.Name())
|
|
|
|
return
|
|
|
|
}
|
2021-09-30 12:11:46 +02:00
|
|
|
objs[i] = du.WrapObject(dstObj)
|
|
|
|
// Delete the source object if Copy
|
|
|
|
if duFeatures.Move == nil {
|
|
|
|
err = srcObj.Remove(ctx)
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", su.Name(), err)
|
2021-09-30 12:11:46 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
var en []upstream.Entry
|
|
|
|
for _, o := range objs {
|
|
|
|
if o != nil {
|
|
|
|
en = append(en, o)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
e, err := f.wrapEntries(en...)
|
2018-10-14 16:19:02 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
return e.(*Object), errs.Err()
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
2020-10-13 23:43:40 +02:00
|
|
|
// using server-side move operations.
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
//
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2019-11-30 15:41:39 +01:00
|
|
|
sfs, ok := src.(*Fs)
|
2018-10-02 23:04:50 +02:00
|
|
|
if !ok {
|
2019-11-30 15:41:39 +01:00
|
|
|
fs.Debugf(src, "Can't move directory - not same remote type")
|
2018-10-02 23:04:50 +02:00
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
upstreams, err := sfs.action(ctx, srcRemote)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, u := range upstreams {
|
|
|
|
if u.Features().DirMove == nil {
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
su := upstreams[i]
|
|
|
|
var du *upstream.Fs
|
|
|
|
for _, u := range f.upstreams {
|
2020-05-09 22:28:30 +02:00
|
|
|
if operations.Same(u.RootFs, su.RootFs) {
|
2019-11-30 15:41:39 +01:00
|
|
|
du = u
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if du == nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %s: %w", su.Name(), su.Root(), fs.ErrorCantDirMove)
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
err := du.Features().DirMove(ctx, su.Fs, srcRemote, dstRemote)
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", du.Name()+":"+du.Root(), err)
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
errs = errs.FilterNil()
|
|
|
|
if len(errs) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, e := range errs {
|
2021-11-04 11:12:57 +01:00
|
|
|
if !errors.Is(e, fs.ErrorDirExists) {
|
2019-11-30 15:41:39 +01:00
|
|
|
return errs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fs.ErrorDirExists
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
2024-01-13 14:19:37 +01:00
|
|
|
// DirSetModTime sets the directory modtime for dir
|
|
|
|
func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error {
|
|
|
|
upstreams, err := f.action(ctx, dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
errs := Errors(make([]error, len(upstreams)))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
u := upstreams[i]
|
|
|
|
// ignore DirSetModTime on upstreams which don't support it
|
|
|
|
if do := u.Features().DirSetModTime; do != nil {
|
|
|
|
err := do(ctx, dir, modTime)
|
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", upstreams[i].Name(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return errs.Err()
|
|
|
|
}
|
|
|
|
|
2018-10-02 23:04:50 +02:00
|
|
|
// ChangeNotify calls the passed function with a path
|
|
|
|
// that has had changes. If the implementation
|
|
|
|
// uses polling, it should adhere to the given interval.
|
|
|
|
// At least one value will be written to the channel,
|
|
|
|
// specifying the initial value and updated values might
|
|
|
|
// follow. A 0 Duration should pause the polling.
|
2019-02-07 18:41:17 +01:00
|
|
|
// The ChangeNotify implementation must empty the channel
|
|
|
|
// regularly. When the channel gets closed, the implementation
|
2018-10-02 23:04:50 +02:00
|
|
|
// should stop polling and release resources.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) ChangeNotify(ctx context.Context, fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
2019-11-30 15:41:39 +01:00
|
|
|
var uChans []chan time.Duration
|
2018-10-07 11:13:37 +02:00
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, u := range f.upstreams {
|
|
|
|
if ChangeNotify := u.Features().ChangeNotify; ChangeNotify != nil {
|
2018-10-07 11:13:37 +02:00
|
|
|
ch := make(chan time.Duration)
|
2019-11-30 15:41:39 +01:00
|
|
|
uChans = append(uChans, ch)
|
2019-06-17 10:34:30 +02:00
|
|
|
ChangeNotify(ctx, fn, ch)
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
}
|
2018-10-07 11:13:37 +02:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
for i := range ch {
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, c := range uChans {
|
2018-10-07 11:13:37 +02:00
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, c := range uChans {
|
2018-10-07 11:13:37 +02:00
|
|
|
close(c)
|
|
|
|
}
|
|
|
|
}()
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// DirCacheFlush resets the directory cache - used in testing
|
|
|
|
// as an optional interface
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
2019-11-30 15:41:39 +01:00
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
if do := f.upstreams[i].Features().DirCacheFlush; do != nil {
|
|
|
|
do()
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-30 19:30:45 +02:00
|
|
|
// Tee in into n outputs
|
|
|
|
//
|
|
|
|
// When finished read the error from the channel
|
|
|
|
func multiReader(n int, in io.Reader) ([]io.Reader, <-chan error) {
|
|
|
|
readers := make([]io.Reader, n)
|
|
|
|
pipeWriters := make([]*io.PipeWriter, n)
|
|
|
|
writers := make([]io.Writer, n)
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
for i := range writers {
|
|
|
|
r, w := io.Pipe()
|
|
|
|
bw := bufio.NewWriter(w)
|
|
|
|
readers[i], pipeWriters[i], writers[i] = r, w, bw
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
mw := io.MultiWriter(writers...)
|
|
|
|
es := make([]error, 2*n+1)
|
|
|
|
_, copyErr := io.Copy(mw, in)
|
|
|
|
es[2*n] = copyErr
|
|
|
|
// Flush the buffers
|
|
|
|
for i, bw := range writers {
|
|
|
|
es[i] = bw.(*bufio.Writer).Flush()
|
|
|
|
}
|
|
|
|
// Close the underlying pipes
|
|
|
|
for i, pw := range pipeWriters {
|
|
|
|
es[2*i] = pw.CloseWithError(copyErr)
|
|
|
|
}
|
|
|
|
errChan <- Errors(es).Err()
|
|
|
|
}()
|
|
|
|
return readers, errChan
|
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, stream bool, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
srcPath := src.Remote()
|
|
|
|
upstreams, err := f.create(ctx, srcPath)
|
|
|
|
if err == fs.ErrorObjectNotFound {
|
2022-05-13 17:08:52 +02:00
|
|
|
upstreams, err = f.mkdir(ctx, parentDir(srcPath))
|
2019-11-30 15:41:39 +01:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(upstreams) == 1 {
|
|
|
|
u := upstreams[0]
|
|
|
|
var o fs.Object
|
|
|
|
var err error
|
|
|
|
if stream {
|
|
|
|
o, err = u.Features().PutStream(ctx, in, src, options...)
|
|
|
|
} else {
|
|
|
|
o, err = u.Put(ctx, in, src, options...)
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
e, err := f.wrapEntries(u.WrapObject(o))
|
|
|
|
return e.(*Object), err
|
|
|
|
}
|
|
|
|
// Multi-threading
|
2020-08-30 19:30:45 +02:00
|
|
|
readers, errChan := multiReader(len(upstreams), in)
|
|
|
|
errs := Errors(make([]error, len(upstreams)+1))
|
2019-11-30 15:41:39 +01:00
|
|
|
objs := make([]upstream.Entry, len(upstreams))
|
|
|
|
multithread(len(upstreams), func(i int) {
|
|
|
|
u := upstreams[i]
|
|
|
|
var o fs.Object
|
|
|
|
var err error
|
|
|
|
if stream {
|
|
|
|
o, err = u.Features().PutStream(ctx, readers[i], src, options...)
|
|
|
|
} else {
|
|
|
|
o, err = u.Put(ctx, readers[i], src, options...)
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
2022-03-07 19:21:16 +01:00
|
|
|
if len(upstreams) > 1 {
|
|
|
|
// Drain the input buffer to allow other uploads to continue
|
2022-08-20 16:38:02 +02:00
|
|
|
_, _ = io.Copy(io.Discard, readers[i])
|
2022-03-07 19:21:16 +01:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
objs[i] = u.WrapObject(o)
|
|
|
|
})
|
2020-08-30 19:30:45 +02:00
|
|
|
errs[len(upstreams)] = <-errChan
|
2019-11-30 15:41:39 +01:00
|
|
|
err = errs.Err()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
e, err := f.wrapEntries(objs...)
|
|
|
|
return e.(*Object), err
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
// Put in to the remote path with the modTime given of the given size
|
2018-10-02 23:04:50 +02:00
|
|
|
//
|
|
|
|
// May create the object even if it returns an error - if so
|
|
|
|
// will return the object and the error, otherwise will return
|
|
|
|
// nil and the error
|
2019-11-30 15:41:39 +01:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
o, err := f.NewObject(ctx, src.Remote())
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
return o, o.Update(ctx, in, src, options...)
|
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
return f.put(ctx, in, src, false, options...)
|
|
|
|
default:
|
2018-10-14 16:19:02 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
2018-08-18 02:39:49 +02:00
|
|
|
//
|
|
|
|
// May create the object even if it returns an error - if so
|
|
|
|
// will return the object and the error, otherwise will return
|
|
|
|
// nil and the error
|
2019-11-30 15:41:39 +01:00
|
|
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
o, err := f.NewObject(ctx, src.Remote())
|
|
|
|
switch err {
|
|
|
|
case nil:
|
|
|
|
return o, o.Update(ctx, in, src, options...)
|
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
return f.put(ctx, in, src, true, options...)
|
|
|
|
default:
|
2018-10-14 16:19:02 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// About gets quota information from the Fs
|
|
|
|
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|
|
|
usage := &fs.Usage{
|
|
|
|
Total: new(int64),
|
|
|
|
Used: new(int64),
|
|
|
|
Trashed: new(int64),
|
|
|
|
Other: new(int64),
|
|
|
|
Free: new(int64),
|
|
|
|
Objects: new(int64),
|
|
|
|
}
|
|
|
|
for _, u := range f.upstreams {
|
|
|
|
usg, err := u.About(ctx)
|
2021-11-04 11:12:57 +01:00
|
|
|
if errors.Is(err, fs.ErrorDirNotFound) {
|
2020-08-19 19:04:16 +02:00
|
|
|
continue
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if usg.Total != nil && usage.Total != nil {
|
|
|
|
*usage.Total += *usg.Total
|
|
|
|
} else {
|
|
|
|
usage.Total = nil
|
|
|
|
}
|
|
|
|
if usg.Used != nil && usage.Used != nil {
|
|
|
|
*usage.Used += *usg.Used
|
|
|
|
} else {
|
|
|
|
usage.Used = nil
|
|
|
|
}
|
|
|
|
if usg.Trashed != nil && usage.Trashed != nil {
|
|
|
|
*usage.Trashed += *usg.Trashed
|
|
|
|
} else {
|
|
|
|
usage.Trashed = nil
|
|
|
|
}
|
|
|
|
if usg.Other != nil && usage.Other != nil {
|
|
|
|
*usage.Other += *usg.Other
|
|
|
|
} else {
|
|
|
|
usage.Other = nil
|
|
|
|
}
|
|
|
|
if usg.Free != nil && usage.Free != nil {
|
|
|
|
*usage.Free += *usg.Free
|
|
|
|
} else {
|
|
|
|
usage.Free = nil
|
|
|
|
}
|
|
|
|
if usg.Objects != nil && usage.Objects != nil {
|
|
|
|
*usage.Objects += *usg.Objects
|
|
|
|
} else {
|
|
|
|
usage.Objects = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return usage, nil
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
entriesList := make([][]upstream.Entry, len(f.upstreams))
|
2019-11-30 15:41:39 +01:00
|
|
|
errs := Errors(make([]error, len(f.upstreams)))
|
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
u := f.upstreams[i]
|
|
|
|
entries, err := u.List(ctx, dir)
|
2018-09-03 19:00:23 +02:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
2018-09-03 19:00:23 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
uEntries := make([]upstream.Entry, len(entries))
|
|
|
|
for j, e := range entries {
|
|
|
|
uEntries[j], _ = u.WrapEntry(e)
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
entriesList[i] = uEntries
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
if len(errs) == len(errs.FilterNil()) {
|
|
|
|
errs = errs.Map(func(e error) error {
|
2021-11-04 11:12:57 +01:00
|
|
|
if errors.Is(e, fs.ErrorDirNotFound) {
|
2019-11-30 15:41:39 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return e
|
|
|
|
})
|
|
|
|
if len(errs) == 0 {
|
|
|
|
return nil, fs.ErrorDirNotFound
|
2018-10-14 16:19:02 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
return nil, errs.Err()
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
return f.mergeDirEntries(entriesList)
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
2020-05-10 16:33:06 +02:00
|
|
|
// ListR lists the objects and directories of the Fs starting
|
|
|
|
// from dir recursively into out.
|
|
|
|
//
|
|
|
|
// dir should be "" to start from the root, and should not
|
|
|
|
// have trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
|
|
|
//
|
|
|
|
// It should call callback for each tranche of entries read.
|
|
|
|
// These need not be returned in any particular order. If
|
|
|
|
// callback returns an error then the listing will stop
|
|
|
|
// immediately.
|
|
|
|
//
|
|
|
|
// Don't implement this unless you have a more efficient way
|
|
|
|
// of listing recursively that doing a directory traversal.
|
|
|
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
var entriesList [][]upstream.Entry
|
2020-05-10 16:33:06 +02:00
|
|
|
errs := Errors(make([]error, len(f.upstreams)))
|
|
|
|
var mutex sync.Mutex
|
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
u := f.upstreams[i]
|
2020-05-13 14:10:35 +02:00
|
|
|
var err error
|
|
|
|
callback := func(entries fs.DirEntries) error {
|
2020-05-10 16:33:06 +02:00
|
|
|
uEntries := make([]upstream.Entry, len(entries))
|
|
|
|
for j, e := range entries {
|
|
|
|
uEntries[j], _ = u.WrapEntry(e)
|
|
|
|
}
|
|
|
|
mutex.Lock()
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
entriesList = append(entriesList, uEntries)
|
2020-05-10 16:33:06 +02:00
|
|
|
mutex.Unlock()
|
|
|
|
return nil
|
2020-05-13 14:10:35 +02:00
|
|
|
}
|
|
|
|
do := u.Features().ListR
|
|
|
|
if do != nil {
|
|
|
|
err = do(ctx, dir, callback)
|
|
|
|
} else {
|
|
|
|
err = walk.ListR(ctx, u, dir, true, -1, walk.ListAll, callback)
|
|
|
|
}
|
2020-05-10 16:33:06 +02:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
2020-05-10 16:33:06 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
})
|
|
|
|
if len(errs) == len(errs.FilterNil()) {
|
|
|
|
errs = errs.Map(func(e error) error {
|
2021-11-04 11:12:57 +01:00
|
|
|
if errors.Is(e, fs.ErrorDirNotFound) {
|
2020-05-10 16:33:06 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return e
|
|
|
|
})
|
|
|
|
if len(errs) == 0 {
|
|
|
|
return fs.ErrorDirNotFound
|
|
|
|
}
|
|
|
|
return errs.Err()
|
|
|
|
}
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
entries, err := f.mergeDirEntries(entriesList)
|
2020-05-10 16:33:06 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return callback(entries)
|
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
// NewObject creates a new remote union file object
|
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
|
|
objs := make([]*upstream.Object, len(f.upstreams))
|
|
|
|
errs := Errors(make([]error, len(f.upstreams)))
|
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
u := f.upstreams[i]
|
|
|
|
o, err := u.NewObject(ctx, remote)
|
|
|
|
if err != nil && err != fs.ErrorObjectNotFound {
|
2021-11-04 11:12:57 +01:00
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
2019-11-30 15:41:39 +01:00
|
|
|
return
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
objs[i] = u.WrapObject(o)
|
|
|
|
})
|
|
|
|
var entries []upstream.Entry
|
|
|
|
for _, o := range objs {
|
|
|
|
if o != nil {
|
|
|
|
entries = append(entries, o)
|
2018-09-03 19:00:23 +02:00
|
|
|
}
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
if len(entries) == 0 {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
e, err := f.wrapEntries(entries...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return e.(*Object), errs.Err()
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
// Precision is the greatest Precision of all upstreams
|
2018-08-18 02:39:49 +02:00
|
|
|
func (f *Fs) Precision() time.Duration {
|
2018-09-03 19:00:23 +02:00
|
|
|
var greatestPrecision time.Duration
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, u := range f.upstreams {
|
|
|
|
if u.Precision() > greatestPrecision {
|
|
|
|
greatestPrecision = u.Precision()
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return greatestPrecision
|
|
|
|
}
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
func (f *Fs) action(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
|
|
|
return f.actionPolicy.Action(ctx, f.upstreams, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) actionEntries(entries ...upstream.Entry) ([]upstream.Entry, error) {
|
|
|
|
return f.actionPolicy.ActionEntries(entries...)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) create(ctx context.Context, path string) ([]*upstream.Fs, error) {
|
|
|
|
return f.createPolicy.Create(ctx, f.upstreams, path)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *Fs) searchEntries(entries ...upstream.Entry) (upstream.Entry, error) {
|
|
|
|
return f.searchPolicy.SearchEntries(entries...)
|
|
|
|
}
|
|
|
|
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
func (f *Fs) mergeDirEntries(entriesList [][]upstream.Entry) (fs.DirEntries, error) {
|
2019-11-30 15:41:39 +01:00
|
|
|
entryMap := make(map[string]([]upstream.Entry))
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
for _, en := range entriesList {
|
2019-11-30 15:41:39 +01:00
|
|
|
if en == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, entry := range en {
|
|
|
|
remote := entry.Remote()
|
|
|
|
if f.Features().CaseInsensitive {
|
|
|
|
remote = strings.ToLower(remote)
|
|
|
|
}
|
|
|
|
entryMap[remote] = append(entryMap[remote], entry)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var entries fs.DirEntries
|
|
|
|
for path := range entryMap {
|
|
|
|
e, err := f.wrapEntries(entryMap[path]...)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
entries = append(entries, e)
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
2020-11-27 18:02:00 +01:00
|
|
|
// Shutdown the backend, closing any background tasks and any
|
|
|
|
// cached connections.
|
|
|
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
|
|
errs := Errors(make([]error, len(f.upstreams)))
|
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
u := f.upstreams[i]
|
|
|
|
if do := u.Features().Shutdown; do != nil {
|
|
|
|
err := do(ctx)
|
2021-11-09 14:00:51 +01:00
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
|
|
|
}
|
2020-11-27 18:02:00 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
return errs.Err()
|
|
|
|
}
|
|
|
|
|
2023-05-12 12:44:01 +02:00
|
|
|
// CleanUp the trash in the Fs
|
|
|
|
//
|
|
|
|
// Implement this if you have a way of emptying the trash or
|
|
|
|
// otherwise cleaning up old versions of files.
|
|
|
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
|
|
|
errs := Errors(make([]error, len(f.upstreams)))
|
|
|
|
multithread(len(f.upstreams), func(i int) {
|
|
|
|
u := f.upstreams[i]
|
|
|
|
if do := u.Features().CleanUp; do != nil {
|
|
|
|
err := do(ctx)
|
|
|
|
if err != nil {
|
|
|
|
errs[i] = fmt.Errorf("%s: %w", u.Name(), err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
return errs.Err()
|
|
|
|
}
|
|
|
|
|
2018-08-18 02:39:49 +02:00
|
|
|
// NewFs constructs an Fs from the path.
|
|
|
|
//
|
|
|
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
2020-11-05 16:18:51 +01:00
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-08-18 02:39:49 +02:00
|
|
|
// Parse config into Options struct
|
2022-04-03 14:39:42 +02:00
|
|
|
opt := new(common.Options)
|
2018-08-18 02:39:49 +02:00
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
// Backward compatible to old config
|
|
|
|
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
|
|
|
for i := 0; i < len(opt.Remotes)-1; i++ {
|
|
|
|
opt.Remotes[i] = opt.Remotes[i] + ":ro"
|
|
|
|
}
|
|
|
|
opt.Upstreams = opt.Remotes
|
|
|
|
}
|
|
|
|
if len(opt.Upstreams) == 0 {
|
|
|
|
return nil, errors.New("union can't point to an empty upstream - check the value of the upstreams setting")
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
if len(opt.Upstreams) == 1 {
|
|
|
|
return nil, errors.New("union can't point to a single upstream - check the value of the upstreams setting")
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, u := range opt.Upstreams {
|
|
|
|
if strings.HasPrefix(u, name+":") {
|
|
|
|
return nil, errors.New("can't point union remote at itself - check the value of the upstreams setting")
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-09 22:13:06 +02:00
|
|
|
root = strings.Trim(root, "/")
|
2019-11-30 15:41:39 +01:00
|
|
|
upstreams := make([]*upstream.Fs, len(opt.Upstreams))
|
|
|
|
errs := Errors(make([]error, len(opt.Upstreams)))
|
|
|
|
multithread(len(opt.Upstreams), func(i int) {
|
|
|
|
u := opt.Upstreams[i]
|
2022-04-03 14:39:42 +02:00
|
|
|
upstreams[i], errs[i] = upstream.New(ctx, u, root, opt)
|
2019-11-30 15:41:39 +01:00
|
|
|
})
|
|
|
|
var usedUpstreams []*upstream.Fs
|
|
|
|
var fserr error
|
|
|
|
for i, err := range errs {
|
|
|
|
if err != nil && err != fs.ErrorIsFile {
|
2018-08-18 02:39:49 +02:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
// Only the upstreams returns ErrorIsFile would be used if any
|
|
|
|
if err == fs.ErrorIsFile {
|
|
|
|
usedUpstreams = append(usedUpstreams, upstreams[i])
|
|
|
|
fserr = fs.ErrorIsFile
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
if fserr == nil {
|
|
|
|
usedUpstreams = upstreams
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
f := &Fs{
|
2019-11-30 15:41:39 +01:00
|
|
|
name: name,
|
|
|
|
root: root,
|
|
|
|
opt: *opt,
|
|
|
|
upstreams: usedUpstreams,
|
|
|
|
}
|
2023-12-08 15:00:22 +01:00
|
|
|
// Correct root if definitely pointing to a file
|
|
|
|
if fserr == fs.ErrorIsFile {
|
|
|
|
f.root = path.Dir(f.root)
|
|
|
|
if f.root == "." || f.root == "/" {
|
|
|
|
f.root = ""
|
|
|
|
}
|
|
|
|
}
|
2023-07-22 09:23:57 +02:00
|
|
|
err = upstream.Prepare(f.upstreams)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-30 15:41:39 +01:00
|
|
|
f.actionPolicy, err = policy.Get(opt.ActionPolicy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
f.createPolicy, err = policy.Get(opt.CreatePolicy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
f.searchPolicy, err = policy.Get(opt.SearchPolicy)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2020-10-25 17:02:52 +01:00
|
|
|
fs.Debugf(f, "actionPolicy = %T, createPolicy = %T, searchPolicy = %T", f.actionPolicy, f.createPolicy, f.searchPolicy)
|
2018-09-03 19:00:23 +02:00
|
|
|
var features = (&fs.Features{
|
2024-02-27 12:04:38 +01:00
|
|
|
CaseInsensitive: true,
|
|
|
|
DuplicateFiles: false,
|
|
|
|
ReadMimeType: true,
|
|
|
|
WriteMimeType: true,
|
|
|
|
CanHaveEmptyDirectories: true,
|
|
|
|
BucketBased: true,
|
|
|
|
SetTier: true,
|
|
|
|
GetTier: true,
|
|
|
|
ReadMetadata: true,
|
|
|
|
WriteMetadata: true,
|
|
|
|
UserMetadata: true,
|
|
|
|
ReadDirMetadata: true,
|
|
|
|
WriteDirMetadata: true,
|
|
|
|
WriteDirSetModTime: true,
|
|
|
|
UserDirMetadata: true,
|
|
|
|
DirModTimeUpdatesOnWrite: true,
|
|
|
|
PartialUploads: true,
|
2020-11-05 17:00:40 +01:00
|
|
|
}).Fill(ctx, f)
|
2022-10-10 05:18:54 +02:00
|
|
|
canMove, slowHash := true, false
|
2019-11-30 15:41:39 +01:00
|
|
|
for _, f := range upstreams {
|
2020-11-05 17:00:40 +01:00
|
|
|
features = features.Mask(ctx, f) // Mask all upstream fs
|
2021-09-30 12:11:46 +02:00
|
|
|
if !operations.CanServerSideMove(f) {
|
|
|
|
canMove = false
|
|
|
|
}
|
2022-10-10 05:18:54 +02:00
|
|
|
slowHash = slowHash || f.Features().SlowHash
|
2021-09-30 12:11:46 +02:00
|
|
|
}
|
|
|
|
// We can move if all remotes support Move or Copy
|
|
|
|
if canMove {
|
|
|
|
features.Move = f.Move
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
2018-10-02 23:04:50 +02:00
|
|
|
|
2022-10-10 05:18:54 +02:00
|
|
|
// If any of upstreams are SlowHash, propagate it
|
|
|
|
features.SlowHash = slowHash
|
|
|
|
|
2020-05-13 14:10:35 +02:00
|
|
|
// Enable ListR when upstreams either support ListR or is local
|
|
|
|
// But not when all upstreams are local
|
|
|
|
if features.ListR == nil {
|
|
|
|
for _, u := range upstreams {
|
|
|
|
if u.Features().ListR != nil {
|
|
|
|
features.ListR = f.ListR
|
|
|
|
} else if !u.Features().IsLocal {
|
|
|
|
features.ListR = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-12 12:42:22 +02:00
|
|
|
// show that we wrap other backends
|
|
|
|
features.Overlay = true
|
|
|
|
|
2018-08-18 02:39:49 +02:00
|
|
|
f.features = features
|
|
|
|
|
2018-10-02 23:04:50 +02:00
|
|
|
// Get common intersection of hashes
|
2019-11-30 15:41:39 +01:00
|
|
|
hashSet := f.upstreams[0].Hashes()
|
|
|
|
for _, u := range f.upstreams[1:] {
|
|
|
|
hashSet = hashSet.Overlap(u.Hashes())
|
2018-10-02 23:04:50 +02:00
|
|
|
}
|
|
|
|
f.hashSet = hashSet
|
|
|
|
|
2019-11-30 15:41:39 +01:00
|
|
|
return f, fserr
|
|
|
|
}
|
|
|
|
|
|
|
|
func parentDir(absPath string) string {
|
|
|
|
parent := path.Dir(strings.TrimRight(filepath.ToSlash(absPath), "/"))
|
|
|
|
if parent == "." {
|
|
|
|
parent = ""
|
|
|
|
}
|
|
|
|
return parent
|
|
|
|
}
|
|
|
|
|
|
|
|
func multithread(num int, fn func(int)) {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < num; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
i := i
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
fn(i)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
2018-08-18 02:39:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
2018-10-02 23:04:50 +02:00
|
|
|
_ fs.Fs = (*Fs)(nil)
|
|
|
|
_ fs.Purger = (*Fs)(nil)
|
|
|
|
_ fs.PutStreamer = (*Fs)(nil)
|
|
|
|
_ fs.Copier = (*Fs)(nil)
|
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
_ fs.DirMover = (*Fs)(nil)
|
2024-01-13 14:19:37 +01:00
|
|
|
_ fs.DirSetModTimer = (*Fs)(nil)
|
2024-02-27 12:04:38 +01:00
|
|
|
_ fs.MkdirMetadataer = (*Fs)(nil)
|
2018-10-02 23:04:50 +02:00
|
|
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
|
|
_ fs.ChangeNotifier = (*Fs)(nil)
|
|
|
|
_ fs.Abouter = (*Fs)(nil)
|
2020-05-10 16:33:06 +02:00
|
|
|
_ fs.ListRer = (*Fs)(nil)
|
2020-11-27 18:02:00 +01:00
|
|
|
_ fs.Shutdowner = (*Fs)(nil)
|
2023-05-12 12:44:01 +02:00
|
|
|
_ fs.CleanUpper = (*Fs)(nil)
|
2018-08-18 02:39:49 +02:00
|
|
|
)
|