2017-09-19 17:09:43 +02:00
|
|
|
|
// Package pcloud provides an interface to the Pcloud
|
|
|
|
|
// object storage system.
|
|
|
|
|
package pcloud
|
|
|
|
|
|
|
|
|
|
// FIXME cleanup returns login required?
|
|
|
|
|
|
|
|
|
|
// FIXME mime type? Fix overview if implement.
|
|
|
|
|
|
|
|
|
|
import (
|
2019-06-17 10:34:30 +02:00
|
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
|
"errors"
|
2017-09-19 17:09:43 +02:00
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
|
|
|
|
"net/http"
|
|
|
|
|
"net/url"
|
|
|
|
|
"path"
|
2024-06-09 04:07:01 +02:00
|
|
|
|
"strconv"
|
2017-09-19 17:09:43 +02:00
|
|
|
|
"strings"
|
|
|
|
|
"time"
|
|
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/backend/pcloud/api"
|
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
|
"github.com/rclone/rclone/fs/config"
|
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
|
"github.com/rclone/rclone/fs/config/obscure"
|
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2022-06-14 14:34:37 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/fs/hash"
|
2021-11-26 20:55:50 +01:00
|
|
|
|
"github.com/rclone/rclone/fs/walk"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/lib/dircache"
|
2020-01-14 18:33:35 +01:00
|
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2019-07-28 19:47:38 +02:00
|
|
|
|
"github.com/rclone/rclone/lib/oauthutil"
|
|
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
|
"github.com/rclone/rclone/lib/rest"
|
2017-09-19 17:09:43 +02:00
|
|
|
|
"golang.org/x/oauth2"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
rcloneClientID = "DnONSzyJXpm"
|
|
|
|
|
rcloneEncryptedClientSecret = "ej1OIF39VOQQ0PXaSdK9ztkLw3tdLNscW2157TKNQdQKkICR4uU7aFg4eFM"
|
|
|
|
|
minSleep = 10 * time.Millisecond
|
|
|
|
|
maxSleep = 2 * time.Second
|
2020-04-20 16:23:46 +02:00
|
|
|
|
decayConstant = 2 // bigger for slower decay, exponential
|
2020-07-02 19:39:14 +02:00
|
|
|
|
defaultHostname = "api.pcloud.com"
|
2017-09-19 17:09:43 +02:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
|
var (
|
|
|
|
|
// Description of how to auth for this app
|
|
|
|
|
oauthConfig = &oauth2.Config{
|
|
|
|
|
Scopes: nil,
|
|
|
|
|
Endpoint: oauth2.Endpoint{
|
2020-07-02 19:39:14 +02:00
|
|
|
|
AuthURL: "https://my.pcloud.com/oauth2/authorize",
|
|
|
|
|
// TokenURL: "https://api.pcloud.com/oauth2_token", set by updateTokenURL
|
2017-09-19 17:09:43 +02:00
|
|
|
|
},
|
|
|
|
|
ClientID: rcloneClientID,
|
2018-01-18 21:19:55 +01:00
|
|
|
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
2017-09-19 17:09:43 +02:00
|
|
|
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
|
|
|
}
|
|
|
|
|
)
|
|
|
|
|
|
2020-07-02 19:39:14 +02:00
|
|
|
|
// Update the TokenURL with the actual hostname
|
|
|
|
|
func updateTokenURL(oauthConfig *oauth2.Config, hostname string) {
|
|
|
|
|
oauthConfig.Endpoint.TokenURL = "https://" + hostname + "/oauth2_token"
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Register with Fs
|
|
|
|
|
func init() {
|
2020-07-02 19:39:14 +02:00
|
|
|
|
updateTokenURL(oauthConfig, defaultHostname)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
|
Name: "pcloud",
|
|
|
|
|
Description: "Pcloud",
|
|
|
|
|
NewFs: NewFs,
|
2021-04-29 10:28:18 +02:00
|
|
|
|
Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
|
2020-07-02 19:39:14 +02:00
|
|
|
|
optc := new(Options)
|
|
|
|
|
err := configstruct.Set(m, optc)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Errorf(nil, "Failed to read config: %v", err)
|
|
|
|
|
}
|
|
|
|
|
updateTokenURL(oauthConfig, optc.Hostname)
|
|
|
|
|
checkAuth := func(oauthConfig *oauth2.Config, auth *oauthutil.AuthResult) error {
|
|
|
|
|
if auth == nil || auth.Form == nil {
|
|
|
|
|
return errors.New("form not found in response")
|
|
|
|
|
}
|
|
|
|
|
hostname := auth.Form.Get("hostname")
|
|
|
|
|
if hostname == "" {
|
|
|
|
|
hostname = defaultHostname
|
|
|
|
|
}
|
|
|
|
|
// Save the hostname in the config
|
|
|
|
|
m.Set("hostname", hostname)
|
|
|
|
|
// Update the token URL
|
|
|
|
|
updateTokenURL(oauthConfig, hostname)
|
|
|
|
|
fs.Debugf(nil, "pcloud: got hostname %q", hostname)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2021-04-29 10:28:18 +02:00
|
|
|
|
return oauthutil.ConfigOut("", &oauthutil.Options{
|
|
|
|
|
OAuth2Config: oauthConfig,
|
2020-07-02 19:39:14 +02:00
|
|
|
|
CheckAuth: checkAuth,
|
2020-05-25 16:12:25 +02:00
|
|
|
|
StateBlankOK: true, // pCloud seems to drop the state parameter now - see #4210
|
2021-04-29 10:28:18 +02:00
|
|
|
|
})
|
2017-09-19 17:09:43 +02:00
|
|
|
|
},
|
2020-08-02 01:32:21 +02:00
|
|
|
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
2020-01-14 18:33:35 +01:00
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
|
Advanced: true,
|
2020-01-14 22:51:49 +01:00
|
|
|
|
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
|
|
|
|
//
|
|
|
|
|
// TODO: Investigate Unicode simplification (\ gets converted to \ server-side)
|
|
|
|
|
Default: (encoder.Display |
|
|
|
|
|
encoder.EncodeBackSlash |
|
|
|
|
|
encoder.EncodeInvalidUtf8),
|
2020-04-20 16:23:46 +02:00
|
|
|
|
}, {
|
2023-07-06 18:55:53 +02:00
|
|
|
|
Name: "root_folder_id",
|
|
|
|
|
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
|
|
|
|
Default: "d0",
|
|
|
|
|
Advanced: true,
|
|
|
|
|
Sensitive: true,
|
2020-07-02 19:39:14 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "hostname",
|
|
|
|
|
Help: `Hostname to connect to.
|
|
|
|
|
|
2020-08-21 17:14:02 +02:00
|
|
|
|
This is normally set when rclone initially does the oauth connection,
|
|
|
|
|
however you will need to set it by hand if you are using remote config
|
|
|
|
|
with rclone authorize.
|
|
|
|
|
`,
|
2020-07-02 19:39:14 +02:00
|
|
|
|
Default: defaultHostname,
|
|
|
|
|
Advanced: true,
|
2020-08-21 17:14:02 +02:00
|
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
|
Value: defaultHostname,
|
|
|
|
|
Help: "Original/US region",
|
|
|
|
|
}, {
|
|
|
|
|
Value: "eapi.pcloud.com",
|
|
|
|
|
Help: "EU region",
|
|
|
|
|
}},
|
2022-06-14 14:34:37 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "username",
|
|
|
|
|
Help: `Your pcloud username.
|
|
|
|
|
|
|
|
|
|
This is only required when you want to use the cleanup command. Due to a bug
|
|
|
|
|
in the pcloud API the required API does not support OAuth authentication so
|
|
|
|
|
we have to rely on user password authentication for it.`,
|
2023-07-06 18:55:53 +02:00
|
|
|
|
Advanced: true,
|
|
|
|
|
Sensitive: true,
|
2022-06-14 14:34:37 +02:00
|
|
|
|
}, {
|
|
|
|
|
Name: "password",
|
|
|
|
|
Help: "Your pcloud password.",
|
|
|
|
|
IsPassword: true,
|
|
|
|
|
Advanced: true,
|
2024-06-09 04:07:01 +02:00
|
|
|
|
},
|
|
|
|
|
}...),
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
|
// Options defines the configuration for this backend
|
|
|
|
|
type Options struct {
|
2020-04-20 16:23:46 +02:00
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
|
|
|
|
RootFolderID string `config:"root_folder_id"`
|
2020-07-02 19:39:14 +02:00
|
|
|
|
Hostname string `config:"hostname"`
|
2022-06-14 14:34:37 +02:00
|
|
|
|
Username string `config:"username"`
|
|
|
|
|
Password string `config:"password"`
|
2018-05-14 19:06:57 +02:00
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Fs represents a remote pcloud
|
|
|
|
|
type Fs struct {
|
2024-06-09 04:07:01 +02:00
|
|
|
|
name string // name of this remote
|
|
|
|
|
root string // the path we are working on
|
|
|
|
|
opt Options // parsed options
|
|
|
|
|
features *fs.Features // optional features
|
|
|
|
|
ts *oauthutil.TokenSource // the token source, used to create new clients
|
|
|
|
|
srv *rest.Client // the connection to the server
|
|
|
|
|
cleanupSrv *rest.Client // the connection used for the cleanup method
|
|
|
|
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
|
|
|
|
pacer *fs.Pacer // pacer for API calls
|
|
|
|
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Object describes a pcloud object
|
|
|
|
|
//
|
|
|
|
|
// Will definitely have info but maybe not meta
|
|
|
|
|
type Object struct {
|
|
|
|
|
fs *Fs // what this object is part of
|
|
|
|
|
remote string // The remote path
|
|
|
|
|
hasMetaData bool // whether info below has been set
|
|
|
|
|
size int64 // size of the object
|
|
|
|
|
modTime time.Time // modification time of the object
|
|
|
|
|
id string // ID of the object
|
|
|
|
|
md5 string // MD5 if known
|
|
|
|
|
sha1 string // SHA1 if known
|
2021-09-16 16:46:44 +02:00
|
|
|
|
sha256 string // SHA256 if known
|
2017-09-19 17:09:43 +02:00
|
|
|
|
link *api.GetFileLinkResult
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
|
return f.name
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
|
return f.root
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
|
return fmt.Sprintf("pcloud root '%s'", f.root)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
|
return f.features
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-20 12:39:20 +02:00
|
|
|
|
// parsePath parses a pcloud 'url'
|
2017-09-19 17:09:43 +02:00
|
|
|
|
func parsePath(path string) (root string) {
|
|
|
|
|
root = strings.Trim(path, "/")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// retryErrorCodes is a slice of error codes that we will retry
|
|
|
|
|
var retryErrorCodes = []int{
|
|
|
|
|
429, // Too Many Requests.
|
|
|
|
|
500, // Internal Server Error
|
|
|
|
|
502, // Bad Gateway
|
|
|
|
|
503, // Service Unavailable
|
|
|
|
|
504, // Gateway Timeout
|
|
|
|
|
509, // Bandwidth Limit Exceeded
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// shouldRetry returns a boolean as to whether this resp and err
|
|
|
|
|
// deserve to be retried. It returns the err as a convenience
|
2021-03-11 15:44:01 +01:00
|
|
|
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
|
|
|
if fserrors.ContextError(ctx, &err) {
|
|
|
|
|
return false, err
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
doRetry := false
|
|
|
|
|
|
|
|
|
|
// Check if it is an api.Error
|
|
|
|
|
if apiErr, ok := err.(*api.Error); ok {
|
|
|
|
|
// See https://docs.pcloud.com/errors/ for error treatment
|
2020-10-14 00:07:12 +02:00
|
|
|
|
// Errors are classified as 1xxx, 2xxx, etc.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
switch apiErr.Result / 1000 {
|
|
|
|
|
case 4: // 4xxx: rate limiting
|
|
|
|
|
doRetry = true
|
|
|
|
|
case 5: // 5xxx: internal errors
|
|
|
|
|
doRetry = true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-08 22:25:17 +02:00
|
|
|
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Contains(resp.Header["Www-Authenticate"][0], "expired_token") {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
doRetry = true
|
|
|
|
|
fs.Debugf(nil, "Should retry: %v", err)
|
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
|
return doRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// readMetaDataForPath reads the metadata from the path
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
2020-05-11 18:24:37 +02:00
|
|
|
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-26 20:55:50 +01:00
|
|
|
|
found, err := f.listAll(ctx, directoryID, false, true, false, func(item *api.Item) bool {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if item.Name == leaf {
|
|
|
|
|
info = item
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
if !found {
|
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
|
}
|
|
|
|
|
return info, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// errorHandler parses a non 2xx error response into an error
|
|
|
|
|
func errorHandler(resp *http.Response) error {
|
|
|
|
|
// Decode error response
|
|
|
|
|
errResponse := new(api.Error)
|
|
|
|
|
err := rest.DecodeJSON(resp, &errResponse)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
|
|
|
|
}
|
|
|
|
|
if errResponse.ErrorString == "" {
|
|
|
|
|
errResponse.ErrorString = resp.Status
|
|
|
|
|
}
|
|
|
|
|
if errResponse.Result == 0 {
|
|
|
|
|
errResponse.Result = resp.StatusCode
|
|
|
|
|
}
|
|
|
|
|
return errResponse
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NewFs constructs an Fs from the path, container:path
|
2020-11-05 16:18:51 +01:00
|
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-05-14 19:06:57 +02:00
|
|
|
|
// Parse config into Options struct
|
|
|
|
|
opt := new(Options)
|
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
root = parsePath(root)
|
2020-11-05 19:02:26 +01:00
|
|
|
|
oAuthClient, ts, err := oauthutil.NewClient(ctx, name, m, oauthConfig)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return nil, fmt.Errorf("failed to configure Pcloud: %w", err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2020-07-02 19:39:14 +02:00
|
|
|
|
updateTokenURL(oauthConfig, opt.Hostname)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
|
2022-06-14 14:34:37 +02:00
|
|
|
|
canCleanup := opt.Username != "" && opt.Password != ""
|
2017-09-19 17:09:43 +02:00
|
|
|
|
f := &Fs{
|
2018-02-01 17:33:30 +01:00
|
|
|
|
name: name,
|
|
|
|
|
root: root,
|
2018-05-14 19:06:57 +02:00
|
|
|
|
opt: *opt,
|
2024-06-09 04:07:01 +02:00
|
|
|
|
ts: ts,
|
2020-07-02 19:39:14 +02:00
|
|
|
|
srv: rest.NewClient(oAuthClient).SetRoot("https://" + opt.Hostname),
|
2020-11-05 12:33:32 +01:00
|
|
|
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2022-06-14 14:34:37 +02:00
|
|
|
|
if canCleanup {
|
|
|
|
|
f.cleanupSrv = rest.NewClient(fshttp.NewClient(ctx)).SetRoot("https://" + opt.Hostname)
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
f.features = (&fs.Features{
|
|
|
|
|
CaseInsensitive: false,
|
|
|
|
|
CanHaveEmptyDirectories: true,
|
2024-06-09 04:07:01 +02:00
|
|
|
|
PartialUploads: true,
|
2020-11-05 17:00:40 +01:00
|
|
|
|
}).Fill(ctx, f)
|
2022-06-14 14:34:37 +02:00
|
|
|
|
if !canCleanup {
|
|
|
|
|
f.features.CleanUp = nil
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
f.srv.SetErrorHandler(errorHandler)
|
|
|
|
|
|
|
|
|
|
// Renew the token in the background
|
2024-06-09 04:07:01 +02:00
|
|
|
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), f.ts, func() error {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
_, err := f.readMetaDataForPath(ctx, "")
|
2017-09-19 17:09:43 +02:00
|
|
|
|
return err
|
|
|
|
|
})
|
|
|
|
|
|
2020-04-20 16:23:46 +02:00
|
|
|
|
// Get rootFolderID
|
|
|
|
|
rootID := f.opt.RootFolderID
|
2017-09-19 17:09:43 +02:00
|
|
|
|
f.dirCache = dircache.New(root, rootID, f)
|
|
|
|
|
|
|
|
|
|
// Find the current root
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err = f.dirCache.FindRoot(ctx, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
// Assume it is a file
|
|
|
|
|
newRoot, remote := dircache.SplitPath(root)
|
2018-10-14 15:41:26 +02:00
|
|
|
|
tempF := *f
|
|
|
|
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
|
|
|
|
tempF.root = newRoot
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Make new Fs which is the parent
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err = tempF.dirCache.FindRoot(ctx, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
// No root so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
if err == fs.ErrorObjectNotFound {
|
|
|
|
|
// File doesn't exist so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2018-10-14 15:41:26 +02:00
|
|
|
|
// XXX: update the old f here instead of returning tempF, since
|
|
|
|
|
// `features` were already filled with functions having *f as a receiver.
|
2019-07-28 19:47:38 +02:00
|
|
|
|
// See https://github.com/rclone/rclone/issues/2182
|
2018-10-14 15:41:26 +02:00
|
|
|
|
f.dirCache = tempF.dirCache
|
|
|
|
|
f.root = tempF.root
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// return an error with an fs which points to the parent
|
2018-10-14 15:41:26 +02:00
|
|
|
|
return f, fs.ErrorIsFile
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
|
2024-06-09 04:07:01 +02:00
|
|
|
|
// OpenWriterAt opens with a handle for random access writes
|
|
|
|
|
//
|
|
|
|
|
// Pass in the remote desired and the size if known.
|
|
|
|
|
//
|
|
|
|
|
// It truncates any existing object
|
|
|
|
|
func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
|
|
|
|
|
client, err := f.newSingleConnClient(ctx)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("create client: %w", err)
|
|
|
|
|
}
|
|
|
|
|
// init an empty file
|
|
|
|
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("resolve src: %w", err)
|
|
|
|
|
}
|
|
|
|
|
openResult, err := fileOpenNew(ctx, client, f, directoryID, leaf)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, fmt.Errorf("open file: %w", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
writer := &writerAt{
|
|
|
|
|
ctx: ctx,
|
|
|
|
|
client: client,
|
|
|
|
|
fs: f,
|
|
|
|
|
size: size,
|
|
|
|
|
remote: remote,
|
|
|
|
|
fd: openResult.FileDescriptor,
|
|
|
|
|
fileID: openResult.Fileid,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return writer, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create a new http client, accepting keep-alive headers, limited to single connection.
|
|
|
|
|
// Necessary for pcloud fileops API, as it binds the session to the underlying TCP connection.
|
|
|
|
|
// File descriptors are only valid within the same connection and auto-closed when the connection is closed,
|
|
|
|
|
// hence we need a separate client (with single connection) for each fd to avoid all sorts of errors and race conditions.
|
|
|
|
|
func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
|
|
|
|
|
baseClient := fshttp.NewClient(ctx)
|
|
|
|
|
baseClient.Transport = fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
|
|
|
|
|
t.MaxConnsPerHost = 1
|
|
|
|
|
t.DisableKeepAlives = false
|
|
|
|
|
})
|
|
|
|
|
// Set our own http client in the context
|
|
|
|
|
ctx = oauthutil.Context(ctx, baseClient)
|
|
|
|
|
// create a new oauth client, re-use the token source
|
|
|
|
|
oAuthClient := oauth2.NewClient(ctx, f.ts)
|
|
|
|
|
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Return an Object from a path
|
|
|
|
|
//
|
|
|
|
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
o := &Object{
|
|
|
|
|
fs: f,
|
|
|
|
|
remote: remote,
|
|
|
|
|
}
|
|
|
|
|
var err error
|
|
|
|
|
if info != nil {
|
|
|
|
|
// Set info
|
|
|
|
|
err = o.setMetaData(info)
|
|
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return o, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
|
|
|
return f.newObjectWithInfo(ctx, remote, nil)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Find the leaf in pathID
|
2021-11-26 20:55:50 +01:00
|
|
|
|
found, err = f.listAll(ctx, pathID, true, false, false, func(item *api.Item) bool {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if item.Name == leaf {
|
|
|
|
|
pathIDOut = item.ID
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
})
|
|
|
|
|
return pathIDOut, found, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateDir makes a directory with pathID as parent and name leaf
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/createfolder",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
2020-01-14 18:33:35 +01:00
|
|
|
|
opts.Parameters.Set("name", f.opt.Enc.FromStandardName(leaf))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(pathID))
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
//fmt.Printf("...Error %v\n", err)
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
// fmt.Printf("...Id %q\n", *info.Id)
|
|
|
|
|
return result.Metadata.ID, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Converts a dirID which is usually 'd' followed by digits into just
|
|
|
|
|
// the digits
|
|
|
|
|
func dirIDtoNumber(dirID string) string {
|
|
|
|
|
if len(dirID) > 0 && dirID[0] == 'd' {
|
|
|
|
|
return dirID[1:]
|
|
|
|
|
}
|
|
|
|
|
fs.Debugf(nil, "Invalid directory id %q", dirID)
|
|
|
|
|
return dirID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Converts a fileID which is usually 'f' followed by digits into just
|
|
|
|
|
// the digits
|
|
|
|
|
func fileIDtoNumber(fileID string) string {
|
|
|
|
|
if len(fileID) > 0 && fileID[0] == 'f' {
|
|
|
|
|
return fileID[1:]
|
|
|
|
|
}
|
2019-02-07 18:41:17 +01:00
|
|
|
|
fs.Debugf(nil, "Invalid file id %q", fileID)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
return fileID
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// list the objects into the function supplied
|
|
|
|
|
//
|
|
|
|
|
// If directories is set it only sends directories
|
|
|
|
|
// User function to process a File item from listAll
|
|
|
|
|
//
|
|
|
|
|
// Should return true to finish processing
|
|
|
|
|
type listAllFn func(*api.Item) bool
|
|
|
|
|
|
|
|
|
|
// Lists the directory required calling the user function on each item found
|
|
|
|
|
//
|
|
|
|
|
// If the user fn ever returns true then it early exits with found = true
|
2021-11-26 20:55:50 +01:00
|
|
|
|
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, recursive bool, fn listAllFn) (found bool, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "GET",
|
|
|
|
|
Path: "/listfolder",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
if recursive {
|
|
|
|
|
opts.Parameters.Set("recursive", "1")
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
|
|
|
|
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return found, fmt.Errorf("couldn't list files: %w", err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
var recursiveContents func(is []api.Item, path string)
|
|
|
|
|
recursiveContents = func(is []api.Item, path string) {
|
|
|
|
|
for i := range is {
|
|
|
|
|
item := &is[i]
|
|
|
|
|
if item.IsFolder {
|
|
|
|
|
if filesOnly {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if directoriesOnly {
|
|
|
|
|
continue
|
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
item.Name = path + f.opt.Enc.ToStandardName(item.Name)
|
|
|
|
|
if fn(item) {
|
|
|
|
|
found = true
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
if recursive {
|
|
|
|
|
recursiveContents(item.Contents, item.Name+"/")
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
recursiveContents(result.Metadata.Contents, "")
|
2017-09-19 17:09:43 +02:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-26 20:55:50 +01:00
|
|
|
|
// listHelper iterates over all items from the directory
|
|
|
|
|
// and calls the callback for each element.
|
|
|
|
|
func (f *Fs) listHelper(ctx context.Context, dir string, recursive bool, callback func(entries fs.DirEntry) error) (err error) {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
2021-11-26 20:55:50 +01:00
|
|
|
|
return err
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
var iErr error
|
2021-11-26 20:55:50 +01:00
|
|
|
|
_, err = f.listAll(ctx, directoryID, false, false, recursive, func(info *api.Item) bool {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
remote := path.Join(dir, info.Name)
|
|
|
|
|
if info.IsFolder {
|
|
|
|
|
// cache the directory ID for later lookups
|
|
|
|
|
f.dirCache.Put(remote, info.ID)
|
|
|
|
|
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
|
|
|
|
// FIXME more info from dir?
|
2021-11-26 20:55:50 +01:00
|
|
|
|
iErr = callback(d)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
iErr = err
|
|
|
|
|
return true
|
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
iErr = callback(o)
|
|
|
|
|
}
|
|
|
|
|
if iErr != nil {
|
|
|
|
|
return true
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2021-11-26 20:55:50 +01:00
|
|
|
|
return err
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
if iErr != nil {
|
2021-11-26 20:55:50 +01:00
|
|
|
|
return iErr
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
|
// complete directory.
|
|
|
|
|
//
|
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
|
// trailing slashes.
|
|
|
|
|
//
|
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
|
// found.
|
|
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
|
|
|
err = f.listHelper(ctx, dir, false, func(o fs.DirEntry) error {
|
|
|
|
|
entries = append(entries, o)
|
|
|
|
|
return nil
|
|
|
|
|
})
|
|
|
|
|
return entries, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListR lists the objects and directories of the Fs starting
|
|
|
|
|
// from dir recursively into out.
|
|
|
|
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
|
|
|
|
list := walk.NewListRHelper(callback)
|
|
|
|
|
err = f.listHelper(ctx, dir, true, func(o fs.DirEntry) error {
|
|
|
|
|
return list.Add(o)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2021-11-26 20:55:50 +01:00
|
|
|
|
return list.Flush()
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Creates from the parameters passed in a half finished Object which
|
|
|
|
|
// must have setMetaData called on it
|
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// Returns the object, leaf, directoryID and error.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// Used to create new objects
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Create the directory for the object if it doesn't exist
|
2020-05-11 18:24:37 +02:00
|
|
|
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
// Temporary Object under construction
|
|
|
|
|
o = &Object{
|
|
|
|
|
fs: f,
|
|
|
|
|
remote: remote,
|
|
|
|
|
}
|
|
|
|
|
return o, leaf, directoryID, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Put the object into the container
|
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// Copy the reader in to the new object which is returned.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
remote := src.Remote()
|
|
|
|
|
size := src.Size()
|
2019-06-17 10:34:30 +02:00
|
|
|
|
modTime := src.ModTime(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
|
return o, o.Update(ctx, in, src, options...)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Mkdir creates the container if it doesn't exist
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2020-05-11 18:24:37 +02:00
|
|
|
|
_, err := f.dirCache.FindDir(ctx, dir, true)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// purgeCheck removes the root directory, if check is set then it
|
|
|
|
|
// refuses to do so if it has anything in
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
root := path.Join(f.root, dir)
|
|
|
|
|
if root == "" {
|
|
|
|
|
return errors.New("can't purge root directory")
|
|
|
|
|
}
|
|
|
|
|
dc := f.dirCache
|
2019-06-17 10:34:30 +02:00
|
|
|
|
rootID, err := dc.FindDir(ctx, dir, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/deletefolder",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(rootID))
|
|
|
|
|
if !check {
|
|
|
|
|
opts.Path = "/deletefolderrecursive"
|
|
|
|
|
}
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return fmt.Errorf("rmdir failed: %w", err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
f.dirCache.FlushDir(dir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Rmdir deletes the root folder
|
|
|
|
|
//
|
|
|
|
|
// Returns an error if it isn't empty
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
|
|
|
return f.purgeCheck(ctx, dir, true)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Precision return the precision of this Fs
|
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
|
return time.Second
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
|
// Copy src to this remote using server-side copy operations.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// This is stored with the remote path given.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// It returns the destination Object and a possible error.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
|
if !ok {
|
|
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err := srcObj.readMetaData(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create temporary object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Copy the object
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/copyfile",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
2020-01-14 18:33:35 +01:00
|
|
|
|
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
2022-01-18 12:24:13 +01:00
|
|
|
|
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(srcObj.modTime.Unix())))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
err = dstObj.setMetaData(&result.Metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-04 23:25:14 +02:00
|
|
|
|
// Purge deletes all the files in the directory
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// Optional interface: Only implement this if you have a way of
|
|
|
|
|
// deleting all the files quicker than just running Remove() on the
|
|
|
|
|
// result of List()
|
2020-06-04 23:25:14 +02:00
|
|
|
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
|
|
|
return f.purgeCheck(ctx, dir, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CleanUp empties the trash
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) CleanUp(ctx context.Context) error {
|
2020-05-11 18:24:37 +02:00
|
|
|
|
rootID, err := f.dirCache.RootID(ctx, false)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/trash_clear",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
2020-05-11 18:24:37 +02:00
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(rootID))
|
2022-06-14 14:34:37 +02:00
|
|
|
|
opts.Parameters.Set("username", f.opt.Username)
|
|
|
|
|
opts.Parameters.Set("password", obscure.MustReveal(f.opt.Password))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.Error
|
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
2022-06-14 14:34:37 +02:00
|
|
|
|
resp, err = f.cleanupSrv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
|
// Move src to this remote using server-side move operations.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// This is stored with the remote path given.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// It returns the destination Object and a possible error.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
|
if !ok {
|
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create temporary object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Do the move
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/renamefile",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(srcObj.id))
|
2020-01-14 18:33:35 +01:00
|
|
|
|
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(leaf))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = dstObj.setMetaData(&result.Metadata)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
2020-10-13 23:43:40 +02:00
|
|
|
|
// using server-side move operations.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
|
//
|
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
srcFs, ok := src.(*Fs)
|
|
|
|
|
if !ok {
|
|
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-12 12:39:30 +02:00
|
|
|
|
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Do the move
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/renamefolder",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(srcID))
|
2020-05-12 12:39:30 +02:00
|
|
|
|
opts.Parameters.Set("toname", f.opt.Enc.FromStandardName(dstLeaf))
|
|
|
|
|
opts.Parameters.Set("tofolderid", dirIDtoNumber(dstDirectoryID))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
srcFs.dirCache.FlushDir(srcRemote)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DirCacheFlush resets the directory cache - used in testing as an
|
|
|
|
|
// optional interface
|
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
|
|
|
|
f.dirCache.ResetRoot()
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-19 18:09:48 +02:00
|
|
|
|
func (f *Fs) linkDir(ctx context.Context, dirID string, expire fs.Duration) (string, error) {
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/getfolderpublink",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
var result api.PubLinkResult
|
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(dirID))
|
|
|
|
|
err := f.pacer.Call(func() (bool, error) {
|
|
|
|
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2020-06-19 18:09:48 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return result.Link, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (f *Fs) linkFile(ctx context.Context, path string, expire fs.Duration) (string, error) {
|
2020-08-20 21:09:55 +02:00
|
|
|
|
obj, err := f.NewObject(ctx, path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
o := obj.(*Object)
|
2020-06-19 18:09:48 +02:00
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/getfilepublink",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
var result api.PubLinkResult
|
2020-08-20 21:09:55 +02:00
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2020-06-19 18:09:48 +02:00
|
|
|
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &result)
|
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2020-06-19 18:09:48 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return result.Link, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
|
|
|
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
|
|
|
|
dirID, err := f.dirCache.FindDir(ctx, remote, false)
|
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
|
return f.linkFile(ctx, remote, expire)
|
|
|
|
|
}
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return f.linkDir(ctx, dirID, expire)
|
|
|
|
|
}
|
|
|
|
|
|
2018-04-16 23:19:25 +02:00
|
|
|
|
// About gets quota information
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
2018-04-16 23:19:25 +02:00
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/userinfo",
|
|
|
|
|
}
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var q api.UserInfo
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &q)
|
2018-04-16 23:19:25 +02:00
|
|
|
|
err = q.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2018-04-16 23:19:25 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2022-01-14 22:18:32 +01:00
|
|
|
|
return nil, err
|
2018-04-16 23:19:25 +02:00
|
|
|
|
}
|
2022-06-13 20:11:31 +02:00
|
|
|
|
free := q.Quota - q.UsedQuota
|
|
|
|
|
if free < 0 {
|
|
|
|
|
free = 0
|
|
|
|
|
}
|
2018-04-16 23:19:25 +02:00
|
|
|
|
usage = &fs.Usage{
|
2022-06-13 20:11:31 +02:00
|
|
|
|
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
|
|
|
|
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
|
|
|
|
Free: fs.NewUsageValue(free), // bytes which can be uploaded before reaching the quota
|
2018-04-16 23:19:25 +02:00
|
|
|
|
}
|
|
|
|
|
return usage, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-12-08 05:33:51 +01:00
|
|
|
|
// Shutdown shutdown the fs
|
|
|
|
|
func (f *Fs) Shutdown(ctx context.Context) error {
|
|
|
|
|
f.tokenRenewer.Shutdown()
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2020-10-08 19:14:53 +02:00
|
|
|
|
// EU region supports SHA1 and SHA256 (but rclone doesn't
|
|
|
|
|
// support SHA256 yet).
|
|
|
|
|
//
|
|
|
|
|
// https://forum.rclone.org/t/pcloud-to-local-no-hashes-in-common/19440
|
|
|
|
|
if f.opt.Hostname == "eapi.pcloud.com" {
|
2021-08-13 21:17:52 +02:00
|
|
|
|
return hash.Set(hash.SHA1 | hash.SHA256)
|
2020-10-08 19:14:53 +02:00
|
|
|
|
}
|
2018-01-18 21:27:52 +01:00
|
|
|
|
return hash.Set(hash.MD5 | hash.SHA1)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
// Fs returns the parent Fs
|
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
|
return o.fs
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return a string version
|
|
|
|
|
func (o *Object) String() string {
|
|
|
|
|
if o == nil {
|
|
|
|
|
return "<nil>"
|
|
|
|
|
}
|
|
|
|
|
return o.remote
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remote returns the remote path
|
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
|
return o.remote
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// getHashes fetches the hashes into the object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) getHashes(ctx context.Context) (err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.ChecksumFileResult
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "GET",
|
|
|
|
|
Path: "/checksumfile",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
o.setHashes(&result.Hashes)
|
|
|
|
|
return o.setMetaData(&result.Metadata)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
2021-09-16 16:46:44 +02:00
|
|
|
|
var pHash *string
|
|
|
|
|
switch t {
|
|
|
|
|
case hash.MD5:
|
|
|
|
|
pHash = &o.md5
|
|
|
|
|
case hash.SHA1:
|
|
|
|
|
pHash = &o.sha1
|
|
|
|
|
case hash.SHA256:
|
|
|
|
|
pHash = &o.sha256
|
|
|
|
|
default:
|
2018-01-18 21:27:52 +01:00
|
|
|
|
return "", hash.ErrUnsupported
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2021-09-16 16:46:44 +02:00
|
|
|
|
if o.md5 == "" && o.sha1 == "" && o.sha256 == "" {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err := o.getHashes(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("failed to get hash: %w", err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2021-09-16 16:46:44 +02:00
|
|
|
|
return *pHash, nil
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
|
|
|
|
func (o *Object) Size() int64 {
|
2019-06-17 10:34:30 +02:00
|
|
|
|
err := o.readMetaData(context.TODO())
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
return o.size
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// setMetaData sets the metadata from info
|
|
|
|
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
|
|
|
|
if info.IsFolder {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return fmt.Errorf("%q is a folder: %w", o.remote, fs.ErrorNotAFile)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
o.hasMetaData = true
|
|
|
|
|
o.size = info.Size
|
|
|
|
|
o.modTime = info.ModTime()
|
|
|
|
|
o.id = info.ID
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// setHashes sets the hashes from that passed in
|
|
|
|
|
func (o *Object) setHashes(hashes *api.Hashes) {
|
|
|
|
|
o.sha1 = hashes.SHA1
|
|
|
|
|
o.md5 = hashes.MD5
|
2021-09-16 16:46:44 +02:00
|
|
|
|
o.sha256 = hashes.SHA256
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// readMetaData gets the metadata if it hasn't already been fetched
|
|
|
|
|
//
|
|
|
|
|
// it also sets the info
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if o.hasMetaData {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
//if apiErr, ok := err.(*api.Error); ok {
|
|
|
|
|
// FIXME
|
|
|
|
|
// if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
|
|
|
|
// return fs.ErrorObjectNotFound
|
|
|
|
|
// }
|
|
|
|
|
//}
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return o.setMetaData(info)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
|
|
|
|
//
|
|
|
|
|
// It attempts to read the objects mtime and if that isn't present the
|
|
|
|
|
// LastModified returned in the http headers
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
|
|
|
err := o.readMetaData(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
|
|
|
|
return time.Now()
|
|
|
|
|
}
|
|
|
|
|
return o.modTime
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
2024-06-08 23:47:32 +02:00
|
|
|
|
filename, directoryID, err := o.fs.dirCache.FindPath(ctx, o.Remote(), true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2024-06-09 04:07:01 +02:00
|
|
|
|
fileID := fileIDtoNumber(o.id)
|
2024-06-08 23:47:32 +02:00
|
|
|
|
filename = o.fs.opt.Enc.FromStandardName(filename)
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "PUT",
|
|
|
|
|
Path: "/copyfile",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
|
|
|
|
ExtraHeaders: map[string]string{
|
|
|
|
|
"Connection": "keep-alive",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("fileid", fileID)
|
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
|
|
|
|
opts.Parameters.Set("toname", filename)
|
|
|
|
|
opts.Parameters.Set("tofolderid", dirIDtoNumber(directoryID))
|
|
|
|
|
opts.Parameters.Set("ctime", strconv.FormatInt(modTime.Unix(), 10))
|
|
|
|
|
opts.Parameters.Set("mtime", strconv.FormatInt(modTime.Unix(), 10))
|
|
|
|
|
|
|
|
|
|
result := &api.ItemResult{}
|
2024-06-09 04:07:01 +02:00
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
2024-06-08 23:47:32 +02:00
|
|
|
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, result)
|
|
|
|
|
err = result.Error.Update(err)
|
|
|
|
|
return shouldRetry(ctx, resp, err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return fmt.Errorf("update mtime: copyfile: %w", err)
|
|
|
|
|
}
|
|
|
|
|
if err := o.setMetaData(&result.Metadata); err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return nil
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Storable returns a boolean showing whether this object storable
|
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// downloadURL fetches the download link
|
2019-09-04 21:00:37 +02:00
|
|
|
|
func (o *Object) downloadURL(ctx context.Context) (URL string, err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if o.id == "" {
|
|
|
|
|
return "", errors.New("can't download - no id")
|
|
|
|
|
}
|
|
|
|
|
if o.link.IsValid() {
|
|
|
|
|
return o.link.URL(), nil
|
|
|
|
|
}
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.GetFileLinkResult
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "GET",
|
|
|
|
|
Path: "/getfilelink",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
if !result.IsValid() {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return "", fmt.Errorf("fetched invalid link %+v", result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
o.link = &result
|
|
|
|
|
return o.link.URL(), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Open an object for read
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
url, err := o.downloadURL(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "GET",
|
|
|
|
|
RootURL: url,
|
|
|
|
|
Options: options,
|
|
|
|
|
}
|
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
return resp.Body, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update the object with the contents of the io.Reader, modTime and size
|
|
|
|
|
//
|
2022-08-05 17:35:41 +02:00
|
|
|
|
// If existing is set then it updates the object rather than creating a new one.
|
2017-09-19 17:09:43 +02:00
|
|
|
|
//
|
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
o.fs.tokenRenewer.Start()
|
|
|
|
|
defer o.fs.tokenRenewer.Stop()
|
|
|
|
|
|
|
|
|
|
size := src.Size() // NB can upload without size
|
2019-06-17 10:34:30 +02:00
|
|
|
|
modTime := src.ModTime(ctx)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
remote := o.Remote()
|
|
|
|
|
|
2021-09-17 11:04:19 +02:00
|
|
|
|
if size < 0 {
|
|
|
|
|
return errors.New("can't upload unknown sizes objects")
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Create the directory for the object if it doesn't exist
|
2020-05-11 18:24:37 +02:00
|
|
|
|
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Experiments with pcloud indicate that it doesn't like any
|
|
|
|
|
// form of request which doesn't have a Content-Length.
|
|
|
|
|
// According to the docs if you close the connection at the
|
|
|
|
|
// end then it should work without Content-Length, but I
|
|
|
|
|
// couldn't get this to work using opts.Close (which sets
|
|
|
|
|
// http.Request.Close).
|
|
|
|
|
//
|
|
|
|
|
// This means that chunked transfer encoding needs to be
|
|
|
|
|
// disabled and a Content-Length needs to be supplied. This
|
|
|
|
|
// also rules out streaming.
|
|
|
|
|
//
|
|
|
|
|
// Docs: https://docs.pcloud.com/methods/file/uploadfile.html
|
|
|
|
|
var resp *http.Response
|
|
|
|
|
var result api.UploadFileResponse
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "PUT",
|
|
|
|
|
Path: "/uploadfile",
|
|
|
|
|
Body: in,
|
2020-11-26 22:28:39 +01:00
|
|
|
|
ContentType: fs.MimeType(ctx, src),
|
2017-09-19 17:09:43 +02:00
|
|
|
|
ContentLength: &size,
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
TransferEncoding: []string{"identity"}, // pcloud doesn't like chunked encoding
|
2020-03-21 23:04:14 +01:00
|
|
|
|
Options: options,
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2020-01-14 18:33:35 +01:00
|
|
|
|
leaf = o.fs.opt.Enc.FromStandardName(leaf)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Parameters.Set("filename", leaf)
|
|
|
|
|
opts.Parameters.Set("folderid", dirIDtoNumber(directoryID))
|
|
|
|
|
opts.Parameters.Set("nopartial", "1")
|
2022-01-18 12:24:13 +01:00
|
|
|
|
opts.Parameters.Set("mtime", fmt.Sprintf("%d", uint64(modTime.Unix())))
|
2017-09-19 17:09:43 +02:00
|
|
|
|
|
|
|
|
|
// Special treatment for a 0 length upload. This doesn't work
|
|
|
|
|
// with PUT even with Content-Length set (by setting
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
|
// opts.Body=0), so upload it as a multipart form POST with
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Content-Length set.
|
|
|
|
|
if size == 0 {
|
2021-03-25 16:35:08 +01:00
|
|
|
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, opts.Parameters, "content", leaf)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return fmt.Errorf("failed to make multipart upload for 0 length file: %w", err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
2019-06-24 20:34:49 +02:00
|
|
|
|
|
|
|
|
|
contentLength := overhead + size
|
2017-09-19 17:09:43 +02:00
|
|
|
|
|
|
|
|
|
opts.ContentType = contentType
|
2019-06-24 20:34:49 +02:00
|
|
|
|
opts.Body = formReader
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts.Method = "POST"
|
|
|
|
|
opts.Parameters = nil
|
2019-06-24 20:34:49 +02:00
|
|
|
|
opts.ContentLength = &contentLength
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
2018-09-01 11:01:02 +02:00
|
|
|
|
// sometimes pcloud leaves a half complete file on
|
2021-09-17 11:04:53 +02:00
|
|
|
|
// error, so delete it if it exists, trying a few times
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
|
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
|
|
|
|
if delErr == nil && delObj != nil {
|
|
|
|
|
_ = delObj.Remove(ctx)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
time.Sleep(time.Second)
|
2018-09-01 11:01:02 +02:00
|
|
|
|
}
|
2017-09-19 17:09:43 +02:00
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
if len(result.Items) != 1 {
|
2021-11-04 11:12:57 +01:00
|
|
|
|
return fmt.Errorf("failed to upload %v - not sure why", o)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
}
|
|
|
|
|
o.setHashes(&result.Checksums[0])
|
|
|
|
|
return o.setMetaData(&result.Items[0])
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 10:34:30 +02:00
|
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2017-09-19 17:09:43 +02:00
|
|
|
|
opts := rest.Opts{
|
|
|
|
|
Method: "POST",
|
|
|
|
|
Path: "/deletefile",
|
|
|
|
|
Parameters: url.Values{},
|
|
|
|
|
}
|
|
|
|
|
var result api.ItemResult
|
|
|
|
|
opts.Parameters.Set("fileid", fileIDtoNumber(o.id))
|
|
|
|
|
return o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &result)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
err = result.Error.Update(err)
|
2021-03-11 15:44:01 +01:00
|
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-13 10:16:56 +02:00
|
|
|
|
// ID returns the ID of the Object if known, or "" if not
|
|
|
|
|
func (o *Object) ID() string {
|
|
|
|
|
return o.id
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-19 17:09:43 +02:00
|
|
|
|
// Check the interfaces are satisfied
|
|
|
|
|
var (
|
|
|
|
|
_ fs.Fs = (*Fs)(nil)
|
|
|
|
|
_ fs.Purger = (*Fs)(nil)
|
|
|
|
|
_ fs.CleanUpper = (*Fs)(nil)
|
|
|
|
|
_ fs.Copier = (*Fs)(nil)
|
|
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
|
_ fs.DirMover = (*Fs)(nil)
|
|
|
|
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
2020-06-19 18:09:48 +02:00
|
|
|
|
_ fs.PublicLinker = (*Fs)(nil)
|
2018-04-16 23:19:25 +02:00
|
|
|
|
_ fs.Abouter = (*Fs)(nil)
|
2023-12-08 05:33:51 +01:00
|
|
|
|
_ fs.Shutdowner = (*Fs)(nil)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
_ fs.Object = (*Object)(nil)
|
2018-05-13 10:16:56 +02:00
|
|
|
|
_ fs.IDer = (*Object)(nil)
|
2017-09-19 17:09:43 +02:00
|
|
|
|
)
|