2017-07-05 23:20:40 +02:00
|
|
|
// Package box provides an interface to the Box
|
|
|
|
// object storage system.
|
|
|
|
package box
|
|
|
|
|
|
|
|
// FIXME Box only supports file names of 255 characters or less. Names
|
|
|
|
// that will not be supported are those that contain non-printable
|
|
|
|
// ascii, / or \, names with trailing spaces, and the special names
|
|
|
|
// “.” and “..”.
|
|
|
|
|
|
|
|
// FIXME box can copy a directory
|
|
|
|
|
|
|
|
import (
|
2019-06-17 10:34:30 +02:00
|
|
|
"context"
|
2019-09-20 14:26:53 +02:00
|
|
|
"crypto/rsa"
|
|
|
|
"encoding/json"
|
|
|
|
"encoding/pem"
|
2017-07-05 23:20:40 +02:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2019-09-20 14:26:53 +02:00
|
|
|
"io/ioutil"
|
2017-07-05 23:20:40 +02:00
|
|
|
"log"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"path"
|
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-01-14 18:33:35 +01:00
|
|
|
"github.com/rclone/rclone/lib/encoder"
|
2020-06-02 12:54:52 +02:00
|
|
|
"github.com/rclone/rclone/lib/env"
|
2019-09-20 14:26:53 +02:00
|
|
|
"github.com/rclone/rclone/lib/jwtutil"
|
|
|
|
|
|
|
|
"github.com/youmark/pkcs8"
|
|
|
|
|
2017-07-05 23:20:40 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/backend/box/api"
|
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/config"
|
|
|
|
"github.com/rclone/rclone/fs/config/configmap"
|
|
|
|
"github.com/rclone/rclone/fs/config/configstruct"
|
|
|
|
"github.com/rclone/rclone/fs/config/obscure"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2019-09-20 14:26:53 +02:00
|
|
|
"github.com/rclone/rclone/fs/fshttp"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs/hash"
|
|
|
|
"github.com/rclone/rclone/lib/dircache"
|
|
|
|
"github.com/rclone/rclone/lib/oauthutil"
|
|
|
|
"github.com/rclone/rclone/lib/pacer"
|
|
|
|
"github.com/rclone/rclone/lib/rest"
|
2017-07-05 23:20:40 +02:00
|
|
|
"golang.org/x/oauth2"
|
2019-09-20 14:26:53 +02:00
|
|
|
"golang.org/x/oauth2/jws"
|
2017-07-05 23:20:40 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
rcloneClientID = "d0374ba6pgmaguie02ge15sv1mllndho"
|
|
|
|
rcloneEncryptedClientSecret = "sYbJYm99WB8jzeaLPU0OPDMJKIkZvD2qOn3SyEMfiJr03RdtDt3xcZEIudRhbIDL"
|
|
|
|
minSleep = 10 * time.Millisecond
|
|
|
|
maxSleep = 2 * time.Second
|
2020-04-21 16:34:56 +02:00
|
|
|
decayConstant = 2 // bigger for slower decay, exponential
|
2017-07-05 23:20:40 +02:00
|
|
|
rootURL = "https://api.box.com/2.0"
|
|
|
|
uploadURL = "https://upload.box.com/api/2.0"
|
|
|
|
listChunks = 1000 // chunk size to read directory listings
|
|
|
|
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
2018-05-14 19:06:57 +02:00
|
|
|
defaultUploadCutoff = 50 * 1024 * 1024
|
2019-09-20 14:26:53 +02:00
|
|
|
tokenURL = "https://api.box.com/oauth2/token"
|
2017-07-05 23:20:40 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
var (
|
|
|
|
// Description of how to auth for this app
|
|
|
|
oauthConfig = &oauth2.Config{
|
|
|
|
Scopes: nil,
|
|
|
|
Endpoint: oauth2.Endpoint{
|
|
|
|
AuthURL: "https://app.box.com/api/oauth2/authorize",
|
|
|
|
TokenURL: "https://app.box.com/api/oauth2/token",
|
|
|
|
},
|
|
|
|
ClientID: rcloneClientID,
|
2018-01-18 21:19:55 +01:00
|
|
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
2017-07-05 23:20:40 +02:00
|
|
|
RedirectURL: oauthutil.RedirectURL,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
// Register with Fs
|
|
|
|
func init() {
|
|
|
|
fs.Register(&fs.RegInfo{
|
|
|
|
Name: "box",
|
|
|
|
Description: "Box",
|
|
|
|
NewFs: NewFs,
|
2020-11-05 19:02:26 +01:00
|
|
|
Config: func(ctx context.Context, name string, m configmap.Mapper) {
|
2019-09-20 14:26:53 +02:00
|
|
|
jsonFile, ok := m.Get("box_config_file")
|
|
|
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
2020-05-22 11:59:45 +02:00
|
|
|
boxAccessToken, boxAccessTokenOk := m.Get("access_token")
|
2019-09-20 14:26:53 +02:00
|
|
|
var err error
|
2020-05-22 11:59:45 +02:00
|
|
|
// If using box config.json, use JWT auth
|
2019-09-20 14:26:53 +02:00
|
|
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
2020-11-05 12:33:32 +01:00
|
|
|
err = refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
2019-09-20 14:26:53 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
|
|
|
}
|
2020-05-22 11:59:45 +02:00
|
|
|
// Else, if not using an access token, use oauth2
|
|
|
|
} else if boxAccessToken == "" || !boxAccessTokenOk {
|
2020-11-05 19:02:26 +01:00
|
|
|
err = oauthutil.Config(ctx, "box", name, m, oauthConfig, nil)
|
2019-09-20 14:26:53 +02:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
|
|
|
}
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
},
|
2020-08-02 01:32:21 +02:00
|
|
|
Options: append(oauthutil.SharedOptions, []fs.Option{{
|
2020-04-21 16:34:56 +02:00
|
|
|
Name: "root_folder_id",
|
|
|
|
Help: "Fill in for rclone to use a non root folder as its starting point.",
|
|
|
|
Default: "0",
|
|
|
|
Advanced: true,
|
2019-09-20 14:26:53 +02:00
|
|
|
}, {
|
|
|
|
Name: "box_config_file",
|
2020-06-02 12:54:52 +02:00
|
|
|
Help: "Box App config.json location\nLeave blank normally." + env.ShellExpandHelp,
|
2020-05-22 11:59:45 +02:00
|
|
|
}, {
|
|
|
|
Name: "access_token",
|
|
|
|
Help: "Box App Primary Access Token\nLeave blank normally.",
|
2019-09-20 14:26:53 +02:00
|
|
|
}, {
|
|
|
|
Name: "box_sub_type",
|
|
|
|
Default: "user",
|
|
|
|
Examples: []fs.OptionExample{{
|
|
|
|
Value: "user",
|
|
|
|
Help: "Rclone should act on behalf of a user",
|
|
|
|
}, {
|
|
|
|
Value: "enterprise",
|
|
|
|
Help: "Rclone should act on behalf of a service account",
|
|
|
|
}},
|
2018-05-14 19:06:57 +02:00
|
|
|
}, {
|
|
|
|
Name: "upload_cutoff",
|
2018-10-01 19:36:15 +02:00
|
|
|
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
2018-05-14 19:06:57 +02:00
|
|
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
|
|
|
Advanced: true,
|
2018-07-09 18:12:18 +02:00
|
|
|
}, {
|
|
|
|
Name: "commit_retries",
|
|
|
|
Help: "Max number of times to try committing a multipart file.",
|
|
|
|
Default: 100,
|
|
|
|
Advanced: true,
|
2020-01-14 18:33:35 +01:00
|
|
|
}, {
|
|
|
|
Name: config.ConfigEncoding,
|
|
|
|
Help: config.ConfigEncodingHelp,
|
|
|
|
Advanced: true,
|
2020-01-14 22:51:49 +01:00
|
|
|
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
|
|
|
|
// > Box only supports file or folder names that are 255 characters or less.
|
|
|
|
// > File names containing non-printable ascii, "/" or "\", names with leading
|
|
|
|
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
|
|
|
|
//
|
|
|
|
// Testing revealed names with leading spaces work fine.
|
|
|
|
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
|
|
|
|
Default: (encoder.Display |
|
|
|
|
encoder.EncodeBackSlash |
|
|
|
|
encoder.EncodeRightSpace |
|
|
|
|
encoder.EncodeInvalidUtf8),
|
2020-08-02 01:32:21 +02:00
|
|
|
}}...),
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
2018-05-14 19:06:57 +02:00
|
|
|
}
|
|
|
|
|
2020-11-05 12:33:32 +01:00
|
|
|
func refreshJWTToken(ctx context.Context, jsonFile string, boxSubType string, name string, m configmap.Mapper) error {
|
2020-06-02 12:54:52 +02:00
|
|
|
jsonFile = env.ShellExpand(jsonFile)
|
2020-04-22 17:53:03 +02:00
|
|
|
boxConfig, err := getBoxConfig(jsonFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to configure token: %v", err)
|
|
|
|
}
|
|
|
|
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to configure token: %v", err)
|
|
|
|
}
|
|
|
|
claims, err := getClaims(boxConfig, boxSubType)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalf("Failed to configure token: %v", err)
|
|
|
|
}
|
|
|
|
signingHeaders := getSigningHeaders(boxConfig)
|
|
|
|
queryParams := getQueryParams(boxConfig)
|
2020-11-13 16:24:43 +01:00
|
|
|
client := fshttp.NewClient(ctx)
|
2020-04-22 17:53:03 +02:00
|
|
|
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-09-20 14:26:53 +02:00
|
|
|
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
|
|
|
file, err := ioutil.ReadFile(configFile)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "box: failed to read Box config")
|
|
|
|
}
|
|
|
|
err = json.Unmarshal(file, &boxConfig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
|
|
|
}
|
|
|
|
return boxConfig, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
|
|
|
val, err := jwtutil.RandomHex(20)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
|
|
|
}
|
|
|
|
|
|
|
|
claims = &jws.ClaimSet{
|
|
|
|
Iss: boxConfig.BoxAppSettings.ClientID,
|
|
|
|
Sub: boxConfig.EnterpriseID,
|
|
|
|
Aud: tokenURL,
|
|
|
|
Exp: time.Now().Add(time.Second * 45).Unix(),
|
|
|
|
PrivateClaims: map[string]interface{}{
|
|
|
|
"box_sub_type": boxSubType,
|
|
|
|
"aud": tokenURL,
|
|
|
|
"jti": val,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
return claims, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
|
|
|
signingHeaders := &jws.Header{
|
|
|
|
Algorithm: "RS256",
|
|
|
|
Typ: "JWT",
|
|
|
|
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
|
|
|
}
|
|
|
|
|
|
|
|
return signingHeaders
|
|
|
|
}
|
|
|
|
|
|
|
|
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
|
|
|
queryParams := map[string]string{
|
|
|
|
"client_id": boxConfig.BoxAppSettings.ClientID,
|
|
|
|
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
|
|
|
}
|
|
|
|
|
|
|
|
return queryParams
|
|
|
|
}
|
|
|
|
|
|
|
|
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
|
|
|
|
|
|
|
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
|
|
|
if len(rest) > 0 {
|
|
|
|
return nil, errors.Wrap(err, "box: extra data included in private key")
|
|
|
|
}
|
|
|
|
|
|
|
|
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
|
|
|
}
|
|
|
|
|
|
|
|
return rsaKey.(*rsa.PrivateKey), nil
|
|
|
|
}
|
|
|
|
|
2018-05-14 19:06:57 +02:00
|
|
|
// Options defines the configuration for this backend
|
|
|
|
type Options struct {
|
2020-01-14 18:33:35 +01:00
|
|
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
|
|
|
CommitRetries int `config:"commit_retries"`
|
|
|
|
Enc encoder.MultiEncoder `config:"encoding"`
|
2020-04-21 16:34:56 +02:00
|
|
|
RootFolderID string `config:"root_folder_id"`
|
2020-05-22 11:59:45 +02:00
|
|
|
AccessToken string `config:"access_token"`
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fs represents a remote box
|
|
|
|
type Fs struct {
|
2017-07-29 23:05:36 +02:00
|
|
|
name string // name of this remote
|
|
|
|
root string // the path we are working on
|
2018-05-14 19:06:57 +02:00
|
|
|
opt Options // parsed options
|
2017-07-29 23:05:36 +02:00
|
|
|
features *fs.Features // optional features
|
|
|
|
srv *rest.Client // the connection to the one drive server
|
|
|
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
2019-02-09 21:52:15 +01:00
|
|
|
pacer *fs.Pacer // pacer for API calls
|
2017-07-29 23:05:36 +02:00
|
|
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
|
|
|
uploadToken *pacer.TokenDispenser // control concurrency
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Object describes a box object
|
|
|
|
//
|
|
|
|
// Will definitely have info but maybe not meta
|
|
|
|
type Object struct {
|
|
|
|
fs *Fs // what this object is part of
|
|
|
|
remote string // The remote path
|
|
|
|
hasMetaData bool // whether info below has been set
|
|
|
|
size int64 // size of the object
|
|
|
|
modTime time.Time // modification time of the object
|
|
|
|
id string // ID of the object
|
2018-08-19 15:13:33 +02:00
|
|
|
publicLink string // Public Link for the object
|
2017-07-05 23:20:40 +02:00
|
|
|
sha1 string // SHA-1 of the object content
|
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Name of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Name() string {
|
|
|
|
return f.name
|
|
|
|
}
|
|
|
|
|
|
|
|
// Root of the remote (as passed into NewFs)
|
|
|
|
func (f *Fs) Root() string {
|
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
|
|
|
|
// String converts this Fs to a string
|
|
|
|
func (f *Fs) String() string {
|
|
|
|
return fmt.Sprintf("box root '%s'", f.root)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
return f.features
|
|
|
|
}
|
|
|
|
|
2020-05-20 12:39:20 +02:00
|
|
|
// parsePath parses a box 'url'
|
2017-07-05 23:20:40 +02:00
|
|
|
func parsePath(path string) (root string) {
|
|
|
|
root = strings.Trim(path, "/")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// retryErrorCodes is a slice of error codes that we will retry
|
|
|
|
var retryErrorCodes = []int{
|
|
|
|
429, // Too Many Requests.
|
|
|
|
500, // Internal Server Error
|
|
|
|
502, // Bad Gateway
|
|
|
|
503, // Service Unavailable
|
|
|
|
504, // Gateway Timeout
|
|
|
|
509, // Bandwidth Limit Exceeded
|
|
|
|
}
|
|
|
|
|
|
|
|
// shouldRetry returns a boolean as to whether this resp and err
|
|
|
|
// deserve to be retried. It returns the err as a convenience
|
2021-03-11 15:44:01 +01:00
|
|
|
func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
|
|
|
if fserrors.ContextError(ctx, &err) {
|
|
|
|
return false, err
|
|
|
|
}
|
2019-02-07 18:41:17 +01:00
|
|
|
authRetry := false
|
2017-07-05 23:20:40 +02:00
|
|
|
|
2021-03-23 17:07:24 +01:00
|
|
|
if resp != nil && resp.StatusCode == 401 && strings.Contains(resp.Header.Get("Www-Authenticate"), "expired_token") {
|
2019-02-07 18:41:17 +01:00
|
|
|
authRetry = true
|
2017-07-05 23:20:40 +02:00
|
|
|
fs.Debugf(nil, "Should retry: %v", err)
|
|
|
|
}
|
2019-02-07 18:41:17 +01:00
|
|
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// readMetaDataForPath reads the metadata from the path
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
2020-05-11 18:24:37 +02:00
|
|
|
leaf, directoryID, err := f.dirCache.FindPath(ctx, path, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-09-04 21:00:37 +02:00
|
|
|
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
2020-12-06 18:08:45 +01:00
|
|
|
if strings.EqualFold(item.Name, leaf) {
|
2017-07-05 23:20:40 +02:00
|
|
|
info = item
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
return nil, fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// errorHandler parses a non 2xx error response into an error
|
|
|
|
func errorHandler(resp *http.Response) error {
|
|
|
|
// Decode error response
|
|
|
|
errResponse := new(api.Error)
|
|
|
|
err := rest.DecodeJSON(resp, &errResponse)
|
|
|
|
if err != nil {
|
|
|
|
fs.Debugf(nil, "Couldn't decode error response: %v", err)
|
|
|
|
}
|
|
|
|
if errResponse.Code == "" {
|
|
|
|
errResponse.Code = resp.Status
|
|
|
|
}
|
|
|
|
if errResponse.Status == 0 {
|
|
|
|
errResponse.Status = resp.StatusCode
|
|
|
|
}
|
|
|
|
return errResponse
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewFs constructs an Fs from the path, container:path
|
2020-11-05 16:18:51 +01:00
|
|
|
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
|
2018-05-14 19:06:57 +02:00
|
|
|
// Parse config into Options struct
|
|
|
|
opt := new(Options)
|
|
|
|
err := configstruct.Set(m, opt)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if opt.UploadCutoff < minUploadCutoff {
|
|
|
|
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
root = parsePath(root)
|
2020-05-22 11:59:45 +02:00
|
|
|
|
2020-11-13 16:24:43 +01:00
|
|
|
client := fshttp.NewClient(ctx)
|
2020-05-22 11:59:45 +02:00
|
|
|
var ts *oauthutil.TokenSource
|
|
|
|
// If not using an accessToken, create an oauth client and tokensource
|
|
|
|
if opt.AccessToken == "" {
|
2020-11-05 19:02:26 +01:00
|
|
|
client, ts, err = oauthutil.NewClient(ctx, name, m, oauthConfig)
|
2020-05-22 11:59:45 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to configure Box")
|
|
|
|
}
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
2020-11-05 12:33:32 +01:00
|
|
|
ci := fs.GetConfig(ctx)
|
2017-07-05 23:20:40 +02:00
|
|
|
f := &Fs{
|
2017-07-29 23:05:36 +02:00
|
|
|
name: name,
|
|
|
|
root: root,
|
2018-05-14 19:06:57 +02:00
|
|
|
opt: *opt,
|
2020-05-22 11:59:45 +02:00
|
|
|
srv: rest.NewClient(client).SetRoot(rootURL),
|
2020-11-05 12:33:32 +01:00
|
|
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
|
|
|
uploadToken: pacer.NewTokenDispenser(ci.Transfers),
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
2017-08-09 16:27:43 +02:00
|
|
|
f.features = (&fs.Features{
|
|
|
|
CaseInsensitive: true,
|
|
|
|
CanHaveEmptyDirectories: true,
|
2020-11-05 17:00:40 +01:00
|
|
|
}).Fill(ctx, f)
|
2017-07-05 23:20:40 +02:00
|
|
|
f.srv.SetErrorHandler(errorHandler)
|
|
|
|
|
2020-05-22 11:59:45 +02:00
|
|
|
// If using an accessToken, set the Authorization header
|
|
|
|
if f.opt.AccessToken != "" {
|
|
|
|
f.srv.SetHeader("Authorization", "Bearer "+f.opt.AccessToken)
|
|
|
|
}
|
|
|
|
|
2020-04-22 17:53:03 +02:00
|
|
|
jsonFile, ok := m.Get("box_config_file")
|
|
|
|
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
|
|
|
|
2020-05-22 11:59:45 +02:00
|
|
|
if ts != nil {
|
|
|
|
// If using box config.json and JWT, renewing should just refresh the token and
|
|
|
|
// should do so whether there are uploads pending or not.
|
|
|
|
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
|
|
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
2020-11-05 12:33:32 +01:00
|
|
|
err := refreshJWTToken(ctx, jsonFile, boxSubType, name, m)
|
2020-05-22 11:59:45 +02:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
f.tokenRenewer.Start()
|
|
|
|
} else {
|
|
|
|
// Renew the token in the background
|
|
|
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
|
|
|
_, err := f.readMetaDataForPath(ctx, "")
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
}
|
2020-04-22 17:53:03 +02:00
|
|
|
}
|
2017-07-05 23:20:40 +02:00
|
|
|
|
2020-04-21 16:34:56 +02:00
|
|
|
// Get rootFolderID
|
|
|
|
rootID := f.opt.RootFolderID
|
2017-07-05 23:20:40 +02:00
|
|
|
f.dirCache = dircache.New(root, rootID, f)
|
|
|
|
|
|
|
|
// Find the current root
|
2019-06-17 10:34:30 +02:00
|
|
|
err = f.dirCache.FindRoot(ctx, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
// Assume it is a file
|
|
|
|
newRoot, remote := dircache.SplitPath(root)
|
2018-10-14 15:41:26 +02:00
|
|
|
tempF := *f
|
|
|
|
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
|
|
|
tempF.root = newRoot
|
2017-07-05 23:20:40 +02:00
|
|
|
// Make new Fs which is the parent
|
2019-06-17 10:34:30 +02:00
|
|
|
err = tempF.dirCache.FindRoot(ctx, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
// No root so return old f
|
|
|
|
return f, nil
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
if err == fs.ErrorObjectNotFound {
|
|
|
|
// File doesn't exist so return old f
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-11-05 17:00:40 +01:00
|
|
|
f.features.Fill(ctx, &tempF)
|
2018-10-14 15:41:26 +02:00
|
|
|
// XXX: update the old f here instead of returning tempF, since
|
|
|
|
// `features` were already filled with functions having *f as a receiver.
|
2019-07-28 19:47:38 +02:00
|
|
|
// See https://github.com/rclone/rclone/issues/2182
|
2018-10-14 15:41:26 +02:00
|
|
|
f.dirCache = tempF.dirCache
|
|
|
|
f.root = tempF.root
|
2017-07-05 23:20:40 +02:00
|
|
|
// return an error with an fs which points to the parent
|
2018-10-14 15:41:26 +02:00
|
|
|
return f, fs.ErrorIsFile
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// rootSlash returns root with a slash on if it is empty, otherwise empty string
|
|
|
|
func (f *Fs) rootSlash() string {
|
|
|
|
if f.root == "" {
|
|
|
|
return f.root
|
|
|
|
}
|
|
|
|
return f.root + "/"
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return an Object from a path
|
|
|
|
//
|
|
|
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
o := &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
if info != nil {
|
|
|
|
// Set info
|
|
|
|
err = o.setMetaData(info)
|
|
|
|
} else {
|
2019-06-17 10:34:30 +02:00
|
|
|
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return o, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
|
|
|
return f.newObjectWithInfo(ctx, remote, nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
// Find the leaf in pathID
|
2019-09-04 21:00:37 +02:00
|
|
|
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
2021-01-26 15:47:34 +01:00
|
|
|
if strings.EqualFold(item.Name, leaf) {
|
2017-07-05 23:20:40 +02:00
|
|
|
pathIDOut = item.ID
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
return pathIDOut, found, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fieldsValue creates a url.Values with fields set to those in api.Item
|
|
|
|
func fieldsValue() url.Values {
|
|
|
|
values := url.Values{}
|
|
|
|
values.Set("fields", api.ItemFields)
|
|
|
|
return values
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateDir makes a directory with pathID as parent and name leaf
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
|
|
|
var resp *http.Response
|
|
|
|
var info *api.Item
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "POST",
|
|
|
|
Path: "/folders",
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
mkdir := api.CreateFolder{
|
2020-01-14 18:33:35 +01:00
|
|
|
Name: f.opt.Enc.FromStandardName(leaf),
|
2017-07-05 23:20:40 +02:00
|
|
|
Parent: api.Parent{
|
|
|
|
ID: pathID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
//fmt.Printf("...Error %v\n", err)
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
// fmt.Printf("...Id %q\n", *info.Id)
|
|
|
|
return info.ID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// list the objects into the function supplied
|
|
|
|
//
|
|
|
|
// If directories is set it only sends directories
|
|
|
|
// User function to process a File item from listAll
|
|
|
|
//
|
|
|
|
// Should return true to finish processing
|
|
|
|
type listAllFn func(*api.Item) bool
|
|
|
|
|
|
|
|
// Lists the directory required calling the user function on each item found
|
|
|
|
//
|
|
|
|
// If the user fn ever returns true then it early exits with found = true
|
2019-09-04 21:00:37 +02:00
|
|
|
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
Path: "/folders/" + dirID + "/items",
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
|
|
|
offset := 0
|
|
|
|
OUTER:
|
|
|
|
for {
|
|
|
|
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
|
|
|
|
|
|
|
var result api.FolderItems
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return found, errors.Wrap(err, "couldn't list files")
|
|
|
|
}
|
|
|
|
for i := range result.Entries {
|
|
|
|
item := &result.Entries[i]
|
|
|
|
if item.Type == api.ItemTypeFolder {
|
|
|
|
if filesOnly {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else if item.Type == api.ItemTypeFile {
|
|
|
|
if directoriesOnly {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if item.ItemStatus != api.ItemStatusActive {
|
|
|
|
continue
|
|
|
|
}
|
2020-01-14 18:33:35 +01:00
|
|
|
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
2017-07-05 23:20:40 +02:00
|
|
|
if fn(item) {
|
|
|
|
found = true
|
|
|
|
break OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset += result.Limit
|
|
|
|
if offset >= result.TotalCount {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
// complete directory.
|
|
|
|
//
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
// trailing slashes.
|
|
|
|
//
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
// found.
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
|
|
|
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var iErr error
|
2019-09-04 21:00:37 +02:00
|
|
|
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
2017-07-05 23:20:40 +02:00
|
|
|
remote := path.Join(dir, info.Name)
|
|
|
|
if info.Type == api.ItemTypeFolder {
|
|
|
|
// cache the directory ID for later lookups
|
|
|
|
f.dirCache.Put(remote, info.ID)
|
2017-08-02 17:44:36 +02:00
|
|
|
d := fs.NewDir(remote, info.ModTime()).SetID(info.ID)
|
2017-07-05 23:20:40 +02:00
|
|
|
// FIXME more info from dir?
|
|
|
|
entries = append(entries, d)
|
|
|
|
} else if info.Type == api.ItemTypeFile {
|
2019-06-17 10:34:30 +02:00
|
|
|
o, err := f.newObjectWithInfo(ctx, remote, info)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
iErr = err
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
entries = append(entries, o)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if iErr != nil {
|
|
|
|
return nil, iErr
|
|
|
|
}
|
|
|
|
return entries, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Creates from the parameters passed in a half finished Object which
|
|
|
|
// must have setMetaData called on it
|
|
|
|
//
|
|
|
|
// Returns the object, leaf, directoryID and error
|
|
|
|
//
|
|
|
|
// Used to create new objects
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
// Create the directory for the object if it doesn't exist
|
2020-05-11 18:24:37 +02:00
|
|
|
leaf, directoryID, err = f.dirCache.FindPath(ctx, remote, true)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Temporary Object under construction
|
|
|
|
o = &Object{
|
|
|
|
fs: f,
|
|
|
|
remote: remote,
|
|
|
|
}
|
|
|
|
return o, leaf, directoryID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put the object
|
|
|
|
//
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2019-06-17 10:34:30 +02:00
|
|
|
return existingObj, existingObj.Update(ctx, in, src, options...)
|
2017-07-05 23:20:40 +02:00
|
|
|
case fs.ErrorObjectNotFound:
|
|
|
|
// Not found so create it
|
2019-06-17 10:34:30 +02:00
|
|
|
return f.PutUnchecked(ctx, in, src)
|
2017-07-05 23:20:40 +02:00
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-19 12:32:56 +02:00
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
return f.Put(ctx, in, src, options...)
|
2017-08-19 12:32:56 +02:00
|
|
|
}
|
|
|
|
|
2017-07-05 23:20:40 +02:00
|
|
|
// PutUnchecked the object into the container
|
|
|
|
//
|
|
|
|
// This will produce an error if the object already exists
|
|
|
|
//
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
remote := src.Remote()
|
|
|
|
size := src.Size()
|
2019-06-17 10:34:30 +02:00
|
|
|
modTime := src.ModTime(ctx)
|
2017-07-05 23:20:40 +02:00
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
return o, o.Update(ctx, in, src, options...)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mkdir creates the container if it doesn't exist
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
2020-05-11 18:24:37 +02:00
|
|
|
_, err := f.dirCache.FindDir(ctx, dir, true)
|
2017-07-05 23:20:40 +02:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteObject removes an object by ID
|
2019-09-04 21:00:37 +02:00
|
|
|
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
2017-07-05 23:20:40 +02:00
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "DELETE",
|
|
|
|
Path: "/files/" + id,
|
|
|
|
NoResponse: true,
|
|
|
|
}
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err := f.srv.Call(ctx, &opts)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// purgeCheck removes the root directory, if check is set then it
|
|
|
|
// refuses to do so if it has anything in
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
2017-07-05 23:20:40 +02:00
|
|
|
root := path.Join(f.root, dir)
|
|
|
|
if root == "" {
|
|
|
|
return errors.New("can't purge root directory")
|
|
|
|
}
|
|
|
|
dc := f.dirCache
|
2019-06-17 10:34:30 +02:00
|
|
|
rootID, err := dc.FindDir(ctx, dir, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "DELETE",
|
|
|
|
Path: "/folders/" + rootID,
|
|
|
|
Parameters: url.Values{},
|
|
|
|
NoResponse: true,
|
|
|
|
}
|
|
|
|
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.Call(ctx, &opts)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "rmdir failed")
|
|
|
|
}
|
|
|
|
f.dirCache.FlushDir(dir)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rmdir deletes the root folder
|
|
|
|
//
|
|
|
|
// Returns an error if it isn't empty
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
|
|
|
return f.purgeCheck(ctx, dir, true)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Precision return the precision of this Fs
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
|
|
|
return time.Second
|
|
|
|
}
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
// Copy src to this remote using server-side copy operations.
|
2017-07-05 23:20:40 +02:00
|
|
|
//
|
|
|
|
// This is stored with the remote path given
|
|
|
|
//
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
err := srcObj.readMetaData(ctx)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcPath := srcObj.fs.rootSlash() + srcObj.remote
|
|
|
|
dstPath := f.rootSlash() + remote
|
|
|
|
if strings.ToLower(srcPath) == strings.ToLower(dstPath) {
|
|
|
|
return nil, errors.Errorf("can't copy %q -> %q as are same name when lowercase", srcPath, dstPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create temporary object
|
2019-06-17 10:34:30 +02:00
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy the object
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "POST",
|
|
|
|
Path: "/files/" + srcObj.id + "/copy",
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
2018-08-04 12:16:43 +02:00
|
|
|
copyFile := api.CopyFile{
|
2020-01-14 18:33:35 +01:00
|
|
|
Name: f.opt.Enc.FromStandardName(leaf),
|
2017-07-05 23:20:40 +02:00
|
|
|
Parent: api.Parent{
|
|
|
|
ID: directoryID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp *http.Response
|
|
|
|
var info *api.Item
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = dstObj.setMetaData(info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dstObj, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Purge deletes all the files and the container
|
|
|
|
//
|
|
|
|
// Optional interface: Only implement this if you have a way of
|
|
|
|
// deleting all the files quicker than just running Remove() on the
|
|
|
|
// result of List()
|
2020-06-04 23:25:14 +02:00
|
|
|
func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|
|
|
return f.purgeCheck(ctx, dir, false)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// move a file or folder
|
2019-09-04 21:00:37 +02:00
|
|
|
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
// Move the object
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "PUT",
|
|
|
|
Path: endpoint + id,
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
move := api.UpdateFileMove{
|
2020-01-14 18:33:35 +01:00
|
|
|
Name: f.opt.Enc.FromStandardName(leaf),
|
2017-07-05 23:20:40 +02:00
|
|
|
Parent: api.Parent{
|
|
|
|
ID: directoryID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return info, nil
|
|
|
|
}
|
|
|
|
|
2020-05-23 14:11:22 +02:00
|
|
|
// About gets quota information
|
|
|
|
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
Path: "/users/me",
|
|
|
|
}
|
|
|
|
var user api.User
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &user)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2020-05-23 14:11:22 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to read user info")
|
|
|
|
}
|
|
|
|
// FIXME max upload size would be useful to use in Update
|
|
|
|
usage = &fs.Usage{
|
2020-05-25 17:47:34 +02:00
|
|
|
Used: fs.NewUsageValue(user.SpaceUsed), // bytes in use
|
|
|
|
Total: fs.NewUsageValue(user.SpaceAmount), // bytes total
|
|
|
|
Free: fs.NewUsageValue(user.SpaceAmount - user.SpaceUsed), // bytes free
|
2020-05-23 14:11:22 +02:00
|
|
|
}
|
|
|
|
return usage, nil
|
|
|
|
}
|
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
// Move src to this remote using server-side move operations.
|
2017-07-05 23:20:40 +02:00
|
|
|
//
|
|
|
|
// This is stored with the remote path given
|
|
|
|
//
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
srcObj, ok := src.(*Object)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create temporary object
|
2019-06-17 10:34:30 +02:00
|
|
|
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the move
|
2019-09-04 21:00:37 +02:00
|
|
|
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = dstObj.setMetaData(info)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return dstObj, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
2020-10-13 23:43:40 +02:00
|
|
|
// using server-side move operations.
|
2017-07-05 23:20:40 +02:00
|
|
|
//
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
//
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
//
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2019-06-17 10:34:30 +02:00
|
|
|
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
2017-07-05 23:20:40 +02:00
|
|
|
srcFs, ok := src.(*Fs)
|
|
|
|
if !ok {
|
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
}
|
|
|
|
|
2020-05-12 12:39:30 +02:00
|
|
|
srcID, _, _, dstDirectoryID, dstLeaf, err := f.dirCache.DirMove(ctx, srcFs.dirCache, srcFs.root, srcRemote, f.root, dstRemote)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the move
|
2020-05-12 12:39:30 +02:00
|
|
|
_, err = f.move(ctx, "/folders/", srcID, dstLeaf, dstDirectoryID)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
srcFs.dirCache.FlushDir(srcRemote)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-19 15:13:33 +02:00
|
|
|
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
2020-05-31 23:18:01 +02:00
|
|
|
func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) {
|
2019-06-17 10:34:30 +02:00
|
|
|
id, err := f.dirCache.FindDir(ctx, remote, false)
|
2018-08-19 15:13:33 +02:00
|
|
|
var opts rest.Opts
|
|
|
|
if err == nil {
|
|
|
|
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
|
|
|
|
|
|
|
opts = rest.Opts{
|
|
|
|
Method: "PUT",
|
|
|
|
Path: "/folders/" + id,
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
2019-06-17 10:34:30 +02:00
|
|
|
o, err := f.NewObject(ctx, remote)
|
2018-08-19 15:13:33 +02:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
if o.(*Object).publicLink != "" {
|
|
|
|
return o.(*Object).publicLink, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
opts = rest.Opts{
|
|
|
|
Method: "PUT",
|
|
|
|
Path: "/files/" + o.(*Object).id,
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
shareLink := api.CreateSharedLink{}
|
|
|
|
var info api.Item
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2018-08-19 15:13:33 +02:00
|
|
|
})
|
|
|
|
return info.SharedLink.URL, err
|
|
|
|
}
|
|
|
|
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
// deletePermanently permanently deletes a trashed file
|
2020-06-17 14:04:33 +02:00
|
|
|
func (f *Fs) deletePermanently(ctx context.Context, itemType, id string) error {
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "DELETE",
|
|
|
|
NoResponse: true,
|
|
|
|
}
|
|
|
|
if itemType == api.ItemTypeFile {
|
|
|
|
opts.Path = "/files/" + id + "/trash"
|
|
|
|
} else {
|
|
|
|
opts.Path = "/folders/" + id + "/trash"
|
|
|
|
}
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
|
|
|
resp, err := f.srv.Call(ctx, &opts)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2020-06-17 14:04:33 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// CleanUp empties the trash
|
|
|
|
func (f *Fs) CleanUp(ctx context.Context) (err error) {
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
Path: "/folders/trash/items",
|
|
|
|
Parameters: url.Values{
|
|
|
|
"fields": []string{"type", "id"},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
opts.Parameters.Set("limit", strconv.Itoa(listChunks))
|
|
|
|
offset := 0
|
|
|
|
for {
|
|
|
|
opts.Parameters.Set("offset", strconv.Itoa(offset))
|
|
|
|
|
|
|
|
var result api.FolderItems
|
|
|
|
var resp *http.Response
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2020-06-17 14:04:33 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "couldn't list trash")
|
|
|
|
}
|
|
|
|
for i := range result.Entries {
|
|
|
|
item := &result.Entries[i]
|
|
|
|
if item.Type == api.ItemTypeFolder || item.Type == api.ItemTypeFile {
|
|
|
|
err := f.deletePermanently(ctx, item.Type, item.ID)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to delete file")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fs.Debugf(f, "Ignoring %q - unknown type %q", item.Name, item.Type)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
offset += result.Limit
|
|
|
|
if offset >= result.TotalCount {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-05 23:20:40 +02:00
|
|
|
// DirCacheFlush resets the directory cache - used in testing as an
|
|
|
|
// optional interface
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
|
|
|
f.dirCache.ResetRoot()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
func (f *Fs) Hashes() hash.Set {
|
2018-01-18 21:27:52 +01:00
|
|
|
return hash.Set(hash.SHA1)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
|
|
// Fs returns the parent Fs
|
|
|
|
func (o *Object) Fs() fs.Info {
|
|
|
|
return o.fs
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return a string version
|
|
|
|
func (o *Object) String() string {
|
|
|
|
if o == nil {
|
|
|
|
return "<nil>"
|
|
|
|
}
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remote returns the remote path
|
|
|
|
func (o *Object) Remote() string {
|
|
|
|
return o.remote
|
|
|
|
}
|
|
|
|
|
|
|
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
2018-01-18 21:27:52 +01:00
|
|
|
if t != hash.SHA1 {
|
|
|
|
return "", hash.ErrUnsupported
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
return o.sha1, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
|
|
|
func (o *Object) Size() int64 {
|
2019-06-17 10:34:30 +02:00
|
|
|
err := o.readMetaData(context.TODO())
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return o.size
|
|
|
|
}
|
|
|
|
|
|
|
|
// setMetaData sets the metadata from info
|
|
|
|
func (o *Object) setMetaData(info *api.Item) (err error) {
|
|
|
|
if info.Type != api.ItemTypeFile {
|
|
|
|
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
|
|
|
}
|
|
|
|
o.hasMetaData = true
|
2018-04-19 10:18:14 +02:00
|
|
|
o.size = int64(info.Size)
|
2017-07-05 23:20:40 +02:00
|
|
|
o.sha1 = info.SHA1
|
|
|
|
o.modTime = info.ModTime()
|
|
|
|
o.id = info.ID
|
2018-08-19 15:13:33 +02:00
|
|
|
o.publicLink = info.SharedLink.URL
|
2017-07-05 23:20:40 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readMetaData gets the metadata if it hasn't already been fetched
|
|
|
|
//
|
|
|
|
// it also sets the info
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
if o.hasMetaData {
|
|
|
|
return nil
|
|
|
|
}
|
2019-06-17 10:34:30 +02:00
|
|
|
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
if apiErr, ok := err.(*api.Error); ok {
|
|
|
|
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
|
|
|
return fs.ErrorObjectNotFound
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return o.setMetaData(info)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
|
|
|
//
|
|
|
|
//
|
|
|
|
// It attempts to read the objects mtime and if that isn't present the
|
|
|
|
// LastModified returned in the http headers
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) ModTime(ctx context.Context) time.Time {
|
|
|
|
err := o.readMetaData(ctx)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
fs.Logf(o, "Failed to read metadata: %v", err)
|
|
|
|
return time.Now()
|
|
|
|
}
|
|
|
|
return o.modTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// setModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "PUT",
|
|
|
|
Path: "/files/" + o.id,
|
|
|
|
Parameters: fieldsValue(),
|
|
|
|
}
|
|
|
|
update := api.UpdateFileModTime{
|
|
|
|
ContentModifiedAt: api.Time(modTime),
|
|
|
|
}
|
|
|
|
var info *api.Item
|
|
|
|
err := o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetModTime sets the modification time of the local fs object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|
|
|
info, err := o.setModTime(ctx, modTime)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return o.setMetaData(info)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Storable returns a boolean showing whether this object storable
|
|
|
|
func (o *Object) Storable() bool {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Open an object for read
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
if o.id == "" {
|
|
|
|
return nil, errors.New("can't download - no id")
|
|
|
|
}
|
2018-01-22 18:05:47 +01:00
|
|
|
fs.FixRangeOption(options, o.size)
|
2017-07-05 23:20:40 +02:00
|
|
|
var resp *http.Response
|
|
|
|
opts := rest.Opts{
|
|
|
|
Method: "GET",
|
|
|
|
Path: "/files/" + o.id + "/content",
|
|
|
|
Options: options,
|
|
|
|
}
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = o.fs.srv.Call(ctx, &opts)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return resp.Body, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// upload does a single non-multipart upload
|
|
|
|
//
|
|
|
|
// This is recommended for less than 50 MB of content
|
2020-03-21 22:45:57 +01:00
|
|
|
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time, options ...fs.OpenOption) (err error) {
|
2017-07-05 23:20:40 +02:00
|
|
|
upload := api.UploadFile{
|
2020-01-14 18:33:35 +01:00
|
|
|
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
2017-07-05 23:20:40 +02:00
|
|
|
ContentModifiedAt: api.Time(modTime),
|
|
|
|
ContentCreatedAt: api.Time(modTime),
|
|
|
|
Parent: api.Parent{
|
|
|
|
ID: directoryID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp *http.Response
|
|
|
|
var result api.FolderItems
|
|
|
|
opts := rest.Opts{
|
2018-08-18 13:33:33 +02:00
|
|
|
Method: "POST",
|
|
|
|
Body: in,
|
2017-07-05 23:20:40 +02:00
|
|
|
MultipartMetadataName: "attributes",
|
|
|
|
MultipartContentName: "contents",
|
|
|
|
MultipartFileName: upload.Name,
|
|
|
|
RootURL: uploadURL,
|
2020-03-21 22:45:57 +01:00
|
|
|
Options: options,
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
// If object has an ID then it is existing so create a new version
|
|
|
|
if o.id != "" {
|
|
|
|
opts.Path = "/files/" + o.id + "/content"
|
|
|
|
} else {
|
|
|
|
opts.Path = "/files/content"
|
|
|
|
}
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
2019-09-04 21:00:37 +02:00
|
|
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
2021-03-11 15:44:01 +01:00
|
|
|
return shouldRetry(ctx, resp, err)
|
2017-07-05 23:20:40 +02:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
|
|
|
return errors.Errorf("failed to upload %v - not sure why", o)
|
|
|
|
}
|
|
|
|
return o.setMetaData(&result.Entries[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the object with the contents of the io.Reader, modTime and size
|
|
|
|
//
|
|
|
|
// If existing is set then it updates the object rather than creating a new one
|
|
|
|
//
|
|
|
|
// The new object may have been created if an error is returned
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
2020-05-22 11:59:45 +02:00
|
|
|
if o.fs.tokenRenewer != nil {
|
|
|
|
o.fs.tokenRenewer.Start()
|
|
|
|
defer o.fs.tokenRenewer.Stop()
|
|
|
|
}
|
2017-07-05 23:20:40 +02:00
|
|
|
|
|
|
|
size := src.Size()
|
2019-06-17 10:34:30 +02:00
|
|
|
modTime := src.ModTime(ctx)
|
2017-09-14 18:21:11 +02:00
|
|
|
remote := o.Remote()
|
2017-07-05 23:20:40 +02:00
|
|
|
|
|
|
|
// Create the directory for the object if it doesn't exist
|
2020-05-11 18:24:37 +02:00
|
|
|
leaf, directoryID, err := o.fs.dirCache.FindPath(ctx, remote, true)
|
2017-07-05 23:20:40 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload with simple or multipart
|
2018-05-14 19:06:57 +02:00
|
|
|
if size <= int64(o.fs.opt.UploadCutoff) {
|
2020-03-21 22:45:57 +01:00
|
|
|
err = o.upload(ctx, in, leaf, directoryID, modTime, options...)
|
2017-07-05 23:20:40 +02:00
|
|
|
} else {
|
2020-03-21 22:49:12 +01:00
|
|
|
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime, options...)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove an object
|
2019-06-17 10:34:30 +02:00
|
|
|
func (o *Object) Remove(ctx context.Context) error {
|
2019-09-04 21:00:37 +02:00
|
|
|
return o.fs.deleteObject(ctx, o.id)
|
2017-07-05 23:20:40 +02:00
|
|
|
}
|
|
|
|
|
2018-05-13 10:16:56 +02:00
|
|
|
// ID returns the ID of the Object if known, or "" if not
|
|
|
|
func (o *Object) ID() string {
|
|
|
|
return o.id
|
|
|
|
}
|
|
|
|
|
2017-07-05 23:20:40 +02:00
|
|
|
// Check the interfaces are satisfied
|
|
|
|
var (
|
|
|
|
_ fs.Fs = (*Fs)(nil)
|
|
|
|
_ fs.Purger = (*Fs)(nil)
|
2017-08-19 12:32:56 +02:00
|
|
|
_ fs.PutStreamer = (*Fs)(nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
_ fs.Copier = (*Fs)(nil)
|
2020-05-23 14:11:22 +02:00
|
|
|
_ fs.Abouter = (*Fs)(nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
_ fs.DirMover = (*Fs)(nil)
|
|
|
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
2018-08-19 15:13:33 +02:00
|
|
|
_ fs.PublicLinker = (*Fs)(nil)
|
2020-06-17 14:04:33 +02:00
|
|
|
_ fs.CleanUpper = (*Fs)(nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
_ fs.Object = (*Object)(nil)
|
2018-05-13 10:16:56 +02:00
|
|
|
_ fs.IDer = (*Object)(nil)
|
2017-07-05 23:20:40 +02:00
|
|
|
)
|