2022-08-28 13:21:57 +02:00
|
|
|
|
// Package fichier provides an interface to the 1Fichier storage system.
|
2019-06-26 20:39:01 +02:00
|
|
|
|
package fichier
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
|
"errors"
|
2019-06-26 20:39:01 +02:00
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
|
|
|
|
"net/http"
|
|
|
|
|
"strconv"
|
|
|
|
|
"strings"
|
|
|
|
|
"time"
|
|
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs"
|
2020-01-14 18:33:35 +01:00
|
|
|
|
"github.com/rclone/rclone/fs/config"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
|
"github.com/rclone/rclone/lib/dircache"
|
2020-01-14 18:33:35 +01:00
|
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
|
"github.com/rclone/rclone/lib/rest"
|
2019-06-26 20:39:01 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
2020-04-24 03:15:52 +02:00
|
|
|
|
rootID = "0"
|
|
|
|
|
apiBaseURL = "https://api.1fichier.com/v1"
|
|
|
|
|
minSleep = 400 * time.Millisecond // api is extremely rate limited now
|
|
|
|
|
maxSleep = 5 * time.Second
|
|
|
|
|
decayConstant = 2 // bigger for slower decay, exponential
|
|
|
|
|
attackConstant = 0 // start with max sleep
|
2019-06-26 20:39:01 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
|
Name: "fichier",
|
|
|
|
|
Description: "1Fichier",
|
2021-04-06 22:27:34 +02:00
|
|
|
|
NewFs: NewFs,
|
2020-01-14 18:33:35 +01:00
|
|
|
|
Options: []fs.Option{{
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "Your API Key, get it from https://1fichier.com/console/params.pl.",
|
2020-01-14 18:33:35 +01:00
|
|
|
|
Name: "api_key",
|
|
|
|
|
}, {
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "If you want to download a shared folder, add this parameter.",
|
2020-01-14 18:33:35 +01:00
|
|
|
|
Name: "shared_folder",
|
|
|
|
|
Advanced: true,
|
2021-05-16 23:28:18 +02:00
|
|
|
|
}, {
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "If you want to download a shared file that is password protected, add this parameter.",
|
2021-05-16 23:28:18 +02:00
|
|
|
|
Name: "file_password",
|
|
|
|
|
Advanced: true,
|
|
|
|
|
IsPassword: true,
|
|
|
|
|
}, {
|
2021-08-16 11:30:01 +02:00
|
|
|
|
Help: "If you want to list the files in a shared folder that is password protected, add this parameter.",
|
2021-05-16 23:28:18 +02:00
|
|
|
|
Name: "folder_password",
|
|
|
|
|
Advanced: true,
|
|
|
|
|
IsPassword: true,
|
2020-01-14 18:33:35 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
|
Advanced: true,
|
2020-01-14 22:51:49 +01:00
|
|
|
|
// Characters that need escaping
|
|
|
|
|
//
|
|
|
|
|
// '\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
|
|
|
|
// '<': '<', // FULLWIDTH LESS-THAN SIGN
|
|
|
|
|
// '>': '>', // FULLWIDTH GREATER-THAN SIGN
|
|
|
|
|
// '"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
|
|
|
|
// '\'': ''', // FULLWIDTH APOSTROPHE
|
|
|
|
|
// '$': '$', // FULLWIDTH DOLLAR SIGN
|
|
|
|
|
// '`': '`', // FULLWIDTH GRAVE ACCENT
|
|
|
|
|
//
|
|
|
|
|
// Leading space and trailing space
|
|
|
|
|
Default: (encoder.Display |
|
|
|
|
|
encoder.EncodeBackSlash |
|
|
|
|
|
encoder.EncodeSingleQuote |
|
|
|
|
|
encoder.EncodeBackQuote |
|
|
|
|
|
encoder.EncodeDoubleQuote |
|
|
|
|
|
encoder.EncodeLtGt |
|
|
|
|
|
encoder.EncodeDollar |
|
|
|
|
|
encoder.EncodeLeftSpace |
|
|
|
|
|
encoder.EncodeRightSpace |
|
|
|
|
|
encoder.EncodeInvalidUtf8),
|
2020-01-14 18:33:35 +01:00
|
|
|
|
}},
|
2019-06-26 20:39:01 +02:00
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Options defines the configuration for this backend
|
|
|
|
|
type Options struct {
|
2021-05-16 23:28:18 +02:00
|
|
|
|
APIKey string `config:"api_key"`
|
|
|
|
|
SharedFolder string `config:"shared_folder"`
|
|
|
|
|
FilePassword string `config:"file_password"`
|
|
|
|
|
FolderPassword string `config:"folder_password"`
|
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2019-06-26 20:39:01 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Fs is the interface a cloud storage system must provide
|
|
|
|
|
type Fs struct {
|
|
|
|
|
root string
|
|
|
|
|
name string
|
|
|
|
|
features *fs.Features
|
2020-01-14 18:33:35 +01:00
|
|
|
|
opt Options
|
2019-06-26 20:39:01 +02:00
|
|
|
|
dirCache *dircache.DirCache
|
|
|
|
|
baseClient *http.Client
|
|
|
|
|
pacer *fs.Pacer
|
|
|
|
|
rest *rest.Client
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
|
|
|
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
2019-07-20 01:50:57 +02:00
|
|
|
|
folderID, err := strconv.Atoi(pathID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", false, err
|
|
|
|
|
}
|
2019-09-04 21:00:37 +02:00
|
|
|
|
folders, err := f.listFolders(ctx, folderID)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", false, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, folder := range folders.SubFolders {
|
|
|
|
|
if folder.Name == leaf {
|
|
|
|
|
pathIDOut := strconv.Itoa(folder.ID)
|
|
|
|
|
return pathIDOut, true, nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return "", false, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateDir makes a directory with pathID as parent and name leaf
|
|
|
|
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
2019-07-20 01:50:57 +02:00
|
|
|
|
folderID, err := strconv.Atoi(pathID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err := f.makeFolder(ctx, leaf, folderID)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return strconv.Itoa(resp.FolderID), err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
|
return f.name
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
|
return f.root
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns a description of the FS
|
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
|
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Precision of the ModTimes in this Fs
|
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
|
return fs.ModTimeNotSupported
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Hashes returns the supported hash types of the filesystem
|
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
|
|
|
|
return hash.Set(hash.Whirlpool)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
|
return f.features
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NewFs makes a new Fs object from the path
|
|
|
|
|
//
|
|
|
|
|
// The path is of the form remote:path
|
|
|
|
|
//
|
|
|
|
|
// Remotes are looked up in the config file. If the remote isn't
|
|
|
|
|
// found then NotFoundInConfigFile will be returned.
|
|
|
|
|
//
|
|
|
|
|
// On Windows avoid single character remote names as they can be mixed
|
|
|
|
|
// up with drive letters.
|
2020-11-05 16:18:51 +01:00
|
|
|
|
func NewFs(ctx context.Context, name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
2019-06-26 20:39:01 +02:00
|
|
|
|
opt := new(Options)
|
|
|
|
|
err := configstruct.Set(config, opt)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If using a Shared Folder override root
|
|
|
|
|
if opt.SharedFolder != "" {
|
|
|
|
|
root = ""
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//workaround for wonky parser
|
|
|
|
|
root = strings.Trim(root, "/")
|
|
|
|
|
|
|
|
|
|
f := &Fs{
|
|
|
|
|
name: name,
|
|
|
|
|
root: root,
|
2020-01-14 18:33:35 +01:00
|
|
|
|
opt: *opt,
|
2020-11-05 12:33:32 +01:00
|
|
|
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant), pacer.AttackConstant(attackConstant))),
|
2019-06-26 20:39:01 +02:00
|
|
|
|
baseClient: &http.Client{},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f.features = (&fs.Features{
|
|
|
|
|
DuplicateFiles: true,
|
|
|
|
|
CanHaveEmptyDirectories: true,
|
2020-11-29 16:13:51 +01:00
|
|
|
|
ReadMimeType: true,
|
2020-11-05 17:00:40 +01:00
|
|
|
|
}).Fill(ctx, f)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
|
2020-11-13 16:24:43 +01:00
|
|
|
|
client := fshttp.NewClient(ctx)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
|
|
|
|
|
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
|
|
|
|
|
2020-01-14 18:33:35 +01:00
|
|
|
|
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
|
|
|
|
|
f.dirCache = dircache.New(root, rootID, f)
|
|
|
|
|
|
|
|
|
|
// Find the current root
|
|
|
|
|
err = f.dirCache.FindRoot(ctx, false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// Assume it is a file
|
|
|
|
|
newRoot, remote := dircache.SplitPath(root)
|
|
|
|
|
tempF := *f
|
|
|
|
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
|
|
|
|
tempF.root = newRoot
|
|
|
|
|
// Make new Fs which is the parent
|
|
|
|
|
err = tempF.dirCache.FindRoot(ctx, false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
// No root so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
_, err := tempF.NewObject(ctx, remote)
|
|
|
|
|
if err != nil {
|
|
|
|
|
if err == fs.ErrorObjectNotFound {
|
|
|
|
|
// File doesn't exist so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2020-11-05 17:00:40 +01:00
|
|
|
|
f.features.Fill(ctx, &tempF)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
// XXX: update the old f here instead of returning tempF, since
|
|
|
|
|
// `features` were already filled with functions having *f as a receiver.
|
2019-07-28 19:47:38 +02:00
|
|
|
|
// See https://github.com/rclone/rclone/issues/2182
|
2019-06-26 20:39:01 +02:00
|
|
|
|
f.dirCache = tempF.dirCache
|
|
|
|
|
f.root = tempF.root
|
|
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
|
return f, fs.ErrorIsFile
|
|
|
|
|
}
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
|
// complete directory.
|
|
|
|
|
//
|
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
|
// trailing slashes.
|
|
|
|
|
//
|
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
|
// found.
|
|
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
2020-01-14 18:33:35 +01:00
|
|
|
|
if f.opt.SharedFolder != "" {
|
|
|
|
|
return f.listSharedFiles(ctx, f.opt.SharedFolder)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dirContent, err := f.listDir(ctx, dir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return dirContent, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
|
// it returns the error ErrorObjectNotFound.
|
|
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
2020-05-11 18:24:37 +02:00
|
|
|
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-20 01:50:57 +02:00
|
|
|
|
folderID, err := strconv.Atoi(directoryID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2019-09-04 21:00:37 +02:00
|
|
|
|
files, err := f.listFiles(ctx, folderID)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for _, file := range files.Items {
|
|
|
|
|
if file.Filename == leaf {
|
|
|
|
|
path, ok := f.dirCache.GetInv(directoryID)
|
|
|
|
|
|
|
|
|
|
if !ok {
|
2022-06-08 22:54:39 +02:00
|
|
|
|
return nil, errors.New("cannot find dir in dircache")
|
2019-06-26 20:39:01 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return f.newObjectFromFile(ctx, path, file), nil
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Put in to the remote path with the modTime given of the given size
|
|
|
|
|
//
|
2020-05-25 08:05:53 +02:00
|
|
|
|
// When called from outside an Fs by rclone, src.Size() will always be >= 0.
|
2019-06-26 20:39:01 +02:00
|
|
|
|
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
|
|
|
|
// return an error or upload it properly (rather than e.g. calling panic).
|
|
|
|
|
//
|
|
|
|
|
// May create the object even if it returns an error - if so
|
|
|
|
|
// will return the object and the error, otherwise will return
|
|
|
|
|
// nil and the error
|
|
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
|
existingObj, err := f.NewObject(ctx, src.Remote())
|
2019-06-26 20:39:01 +02:00
|
|
|
|
switch err {
|
|
|
|
|
case nil:
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
|
// Not found so create it
|
|
|
|
|
return f.PutUnchecked(ctx, in, src, options...)
|
|
|
|
|
default:
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// putUnchecked uploads the object with the given name and size
|
|
|
|
|
//
|
|
|
|
|
// This will create a duplicate if we upload a new file without
|
|
|
|
|
// checking to see if there is one already - use Put() for that.
|
|
|
|
|
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
2020-09-28 10:08:52 +02:00
|
|
|
|
if size > int64(300e9) {
|
2022-09-20 08:54:47 +02:00
|
|
|
|
return nil, errors.New("File too big, can't upload")
|
2019-06-26 20:39:01 +02:00
|
|
|
|
} else if size == 0 {
|
|
|
|
|
return nil, fs.ErrorCantUploadEmptyFiles
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-04 21:00:37 +02:00
|
|
|
|
nodeResponse, err := f.getUploadNode(ctx)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-11 18:24:37 +02:00
|
|
|
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2020-03-21 23:11:02 +01:00
|
|
|
|
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL, options...)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-04 21:00:37 +02:00
|
|
|
|
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2021-03-23 11:22:56 +01:00
|
|
|
|
if len(fileUploadResponse.Links) == 0 {
|
|
|
|
|
return nil, errors.New("upload response not found")
|
|
|
|
|
} else if len(fileUploadResponse.Links) > 1 {
|
|
|
|
|
fs.Debugf(remote, "Multiple upload responses found, using the first")
|
2019-06-26 20:39:01 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
link := fileUploadResponse.Links[0]
|
|
|
|
|
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return &Object{
|
|
|
|
|
fs: f,
|
|
|
|
|
remote: remote,
|
|
|
|
|
file: File{
|
|
|
|
|
CDN: 0,
|
|
|
|
|
Checksum: link.Whirlpool,
|
|
|
|
|
ContentType: "",
|
|
|
|
|
Date: time.Now().Format("2006-01-02 15:04:05"),
|
|
|
|
|
Filename: link.Filename,
|
|
|
|
|
Pass: 0,
|
2019-10-01 13:34:38 +02:00
|
|
|
|
Size: fileSize,
|
2019-06-26 20:39:01 +02:00
|
|
|
|
URL: link.Download,
|
|
|
|
|
},
|
|
|
|
|
}, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PutUnchecked uploads the object
|
|
|
|
|
//
|
|
|
|
|
// This will create a duplicate if we upload a new file without
|
|
|
|
|
// checking to see if there is one already - use Put() for that.
|
|
|
|
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
|
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Mkdir makes the directory (container, bucket)
|
|
|
|
|
//
|
|
|
|
|
// Shouldn't return an error if it already exists
|
|
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2020-05-11 18:24:37 +02:00
|
|
|
|
_, err := f.dirCache.FindDir(ctx, dir, true)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Rmdir removes the directory (container, bucket) if empty
|
|
|
|
|
//
|
|
|
|
|
// Return an error if it doesn't exist or isn't empty
|
|
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
2019-07-20 01:50:57 +02:00
|
|
|
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
folderID, err := strconv.Atoi(directoryID)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
2019-09-04 21:00:37 +02:00
|
|
|
|
_, err = f.removeFolder(ctx, dir, folderID)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
f.dirCache.FlushDir(dir)
|
|
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-22 00:37:38 +01:00
|
|
|
|
// Move src to this remote using server side move operations.
|
|
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
|
if !ok {
|
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-10 14:11:00 +02:00
|
|
|
|
// Find current directory ID
|
|
|
|
|
_, currentDirectoryID, err := f.dirCache.FindPath(ctx, remote, false)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2021-06-10 14:11:00 +02:00
|
|
|
|
// Create temporary object
|
|
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2021-06-10 14:11:00 +02:00
|
|
|
|
|
|
|
|
|
// If it is in the correct directory, just rename it
|
|
|
|
|
var url string
|
|
|
|
|
if currentDirectoryID == directoryID {
|
|
|
|
|
resp, err := f.renameFile(ctx, srcObj.file.URL, leaf)
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't rename file: %w", err)
|
2021-06-10 14:11:00 +02:00
|
|
|
|
}
|
|
|
|
|
if resp.Status != "OK" {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't rename file: %s", resp.Message)
|
2021-06-10 14:11:00 +02:00
|
|
|
|
}
|
|
|
|
|
url = resp.URLs[0].URL
|
|
|
|
|
} else {
|
|
|
|
|
folderID, err := strconv.Atoi(directoryID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
resp, err := f.moveFile(ctx, srcObj.file.URL, folderID, leaf)
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
2021-06-10 14:11:00 +02:00
|
|
|
|
}
|
|
|
|
|
if resp.Status != "OK" {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
2021-06-10 14:11:00 +02:00
|
|
|
|
}
|
|
|
|
|
url = resp.URLs[0]
|
2021-02-22 00:37:38 +01:00
|
|
|
|
}
|
|
|
|
|
|
2021-06-10 14:11:00 +02:00
|
|
|
|
file, err := f.readFileInfo(ctx, url)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.New("couldn't read file data")
|
|
|
|
|
}
|
|
|
|
|
dstObj.setMetaData(*file)
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy src to this remote using server side move operations.
|
|
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
|
if !ok {
|
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create temporary object
|
|
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
folderID, err := strconv.Atoi(directoryID)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
resp, err := f.copyFile(ctx, srcObj.file.URL, folderID, leaf)
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't move file: %w", err)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
}
|
|
|
|
|
if resp.Status != "OK" {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("couldn't move file: %s", resp.Message)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
file, err := f.readFileInfo(ctx, resp.URLs[0].ToURL)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.New("couldn't read file data")
|
|
|
|
|
}
|
|
|
|
|
dstObj.setMetaData(*file)
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-24 03:42:36 +01:00
|
|
|
|
// About gets quota information
|
|
|
|
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/user/info.cgi",
|
|
|
|
|
ContentType: "application/json",
|
|
|
|
|
}
|
2022-01-26 00:02:17 +01:00
|
|
|
|
var accountInfo AccountInfo
|
2022-01-24 03:42:36 +01:00
|
|
|
|
var resp *http.Response
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2022-01-26 00:02:17 +01:00
|
|
|
|
resp, err = f.rest.CallJSON(ctx, &opts, nil, &accountInfo)
|
2022-01-24 03:42:36 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("failed to read user info: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FIXME max upload size would be useful to use in Update
|
|
|
|
|
usage = &fs.Usage{
|
2022-01-26 00:02:17 +01:00
|
|
|
|
Used: fs.NewUsageValue(accountInfo.ColdStorage), // bytes in use
|
|
|
|
|
Total: fs.NewUsageValue(accountInfo.AvailableColdStorage), // bytes total
|
|
|
|
|
Free: fs.NewUsageValue(accountInfo.AvailableColdStorage - accountInfo.ColdStorage), // bytes free
|
2022-01-24 03:42:36 +01:00
|
|
|
|
}
|
|
|
|
|
return usage, nil
|
|
|
|
|
}
|
|
|
|
|
|
2021-02-23 21:08:09 +01:00
|
|
|
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
|
|
|
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
|
|
|
o, err := f.NewObject(ctx, remote)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return o.(*Object).file.URL, nil
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-26 20:39:01 +02:00
|
|
|
|
// Check the interfaces are satisfied
|
|
|
|
|
var (
|
|
|
|
|
_ fs.Fs = (*Fs)(nil)
|
2021-02-22 00:37:38 +01:00
|
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
|
_ fs.Copier = (*Fs)(nil)
|
2021-02-23 21:08:09 +01:00
|
|
|
|
_ fs.PublicLinker = (*Fs)(nil)
|
2019-06-26 20:39:01 +02:00
|
|
|
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
|
|
|
|
_ dircache.DirCacher = (*Fs)(nil)
|
|
|
|
|
)
|