mirror of
https://github.com/rclone/rclone.git
synced 2025-01-11 08:49:37 +01:00
Implement new backend config system
This unifies the 3 methods of reading config * command line * environment variable * config file And allows them all to be configured in all places. This is done by making the []fs.Option in the backend registration be the master source of what the backend options are. The backend changes are: * Use the new configmap.Mapper parameter * Use configstruct to parse it into an Options struct * Add all config to []fs.Option including defaults and help * Remove all uses of pflag * Remove all uses of config.FileGet
This commit is contained in:
parent
3c89406886
commit
f3f48d7d49
@ -7,7 +7,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -17,29 +18,42 @@ func init() {
|
|||||||
Description: "Alias for a existing remote",
|
Description: "Alias for a existing remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||||
|
Required: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Remote string `config:"remote"`
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path.
|
// NewFs contstructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
remote := config.FileGet(name, "remote")
|
// Parse config into Options struct
|
||||||
if remote == "" {
|
opt := new(Options)
|
||||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
err := configstruct.Set(m, opt)
|
||||||
}
|
|
||||||
if strings.HasPrefix(remote, name+":") {
|
|
||||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
|
||||||
}
|
|
||||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if opt.Remote == "" {
|
||||||
root = filepath.ToSlash(root)
|
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
}
|
||||||
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
|
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||||
|
}
|
||||||
|
_, configName, fsPath, err := fs.ParseRemote(opt.Remote)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
root = path.Join(fsPath, filepath.ToSlash(root))
|
||||||
|
if configName == "local" {
|
||||||
|
return fs.NewFs(root)
|
||||||
|
}
|
||||||
|
return fs.NewFs(configName + ":" + root)
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,8 @@ import (
|
|||||||
"github.com/ncw/go-acd"
|
"github.com/ncw/go-acd"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -37,19 +38,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
folderKind = "FOLDER"
|
folderKind = "FOLDER"
|
||||||
fileKind = "FILE"
|
fileKind = "FILE"
|
||||||
statusAvailable = "AVAILABLE"
|
statusAvailable = "AVAILABLE"
|
||||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||||
minSleep = 20 * time.Millisecond
|
minSleep = 20 * time.Millisecond
|
||||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||||
|
defaultTempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
|
||||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
|
||||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
acdConfig = &oauth2.Config{
|
acdConfig = &oauth2.Config{
|
||||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||||
@ -67,35 +66,62 @@ var (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "amazon cloud drive",
|
Name: "amazon cloud drive",
|
||||||
|
Prefix: "acd",
|
||||||
Description: "Amazon Drive",
|
Description: "Amazon Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("amazon cloud drive", name, acdConfig)
|
err := oauthutil.Config("amazon cloud drive", name, m, acdConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Amazon Application Client Id - required.",
|
Help: "Amazon Application Client ID.",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Amazon Application Client Secret - required.",
|
Help: "Amazon Application Client Secret.",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigAuthURL,
|
Name: config.ConfigAuthURL,
|
||||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
Help: "Auth server URL.\nLeave blank to use Amazon's.",
|
||||||
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigTokenURL,
|
Name: config.ConfigTokenURL,
|
||||||
Help: "Token server url - leave blank to use Amazon's.",
|
Help: "Token server url.\nleave blank to use Amazon's.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "checkpoint",
|
||||||
|
Help: "Checkpoint for internal polling (debug).",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "upload_wait_per_gb",
|
||||||
|
Help: "Additional time per GB to wait after a failed complete upload to see if it appears.",
|
||||||
|
Default: fs.Duration(180 * time.Second),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "templink_threshold",
|
||||||
|
Help: "Files >= this size will be downloaded via their tempLink.",
|
||||||
|
Default: defaultTempLinkThreshold,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Checkpoint string `config:"checkpoint"`
|
||||||
|
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||||
|
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote acd server
|
// Fs represents a remote acd server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this Fs
|
||||||
c *acd.Client // the connection to the acd server
|
c *acd.Client // the connection to the acd server
|
||||||
noAuthClient *http.Client // unauthenticated http client
|
noAuthClient *http.Client // unauthenticated http client
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
@ -191,7 +217,13 @@ func filterRequest(req *http.Request) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
baseClient := fshttp.NewClient(fs.Config)
|
baseClient := fshttp.NewClient(fs.Config)
|
||||||
if do, ok := baseClient.Transport.(interface {
|
if do, ok := baseClient.Transport.(interface {
|
||||||
@ -201,7 +233,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
} else {
|
} else {
|
||||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||||
}
|
}
|
||||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, acdConfig, baseClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||||
}
|
}
|
||||||
@ -210,6 +242,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
c: c,
|
c: c,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||||
noAuthClient: fshttp.NewClient(fs.Config),
|
noAuthClient: fshttp.NewClient(fs.Config),
|
||||||
@ -527,13 +560,13 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Don't wait for uploads - assume they will appear later
|
// Don't wait for uploads - assume they will appear later
|
||||||
if *uploadWaitPerGB <= 0 {
|
if f.opt.UploadWaitPerGB <= 0 {
|
||||||
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
fs.Debugf(src, "Upload error detected but waiting disabled: %v (%q)", inErr, httpStatus)
|
||||||
return false, inInfo, inErr
|
return false, inInfo, inErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Time we should wait for the upload
|
// Time we should wait for the upload
|
||||||
uploadWaitPerByte := float64(*uploadWaitPerGB) / 1024 / 1024 / 1024
|
uploadWaitPerByte := float64(f.opt.UploadWaitPerGB) / 1024 / 1024 / 1024
|
||||||
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
timeToWait := time.Duration(uploadWaitPerByte * float64(src.Size()))
|
||||||
|
|
||||||
const sleepTime = 5 * time.Second // sleep between tries
|
const sleepTime = 5 * time.Second // sleep between tries
|
||||||
@ -1015,7 +1048,7 @@ func (o *Object) Storable() bool {
|
|||||||
|
|
||||||
// Open an object for read
|
// Open an object for read
|
||||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||||
if bigObject {
|
if bigObject {
|
||||||
fs.Debugf(o, "Downloading large object via tempLink")
|
fs.Debugf(o, "Downloading large object via tempLink")
|
||||||
}
|
}
|
||||||
@ -1208,7 +1241,7 @@ func (o *Object) MimeType() string {
|
|||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
checkpoint := f.opt.Checkpoint
|
||||||
|
|
||||||
quit := make(chan bool)
|
quit := make(chan bool)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -24,8 +24,8 @@ import (
|
|||||||
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
"github.com/Azure/azure-storage-blob-go/2018-03-28/azblob"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/fs/walk"
|
"github.com/ncw/rclone/fs/walk"
|
||||||
@ -44,14 +44,10 @@ const (
|
|||||||
maxTotalParts = 50000 // in multipart upload
|
maxTotalParts = 50000 // in multipart upload
|
||||||
storageDefaultBaseURL = "blob.core.windows.net"
|
storageDefaultBaseURL = "blob.core.windows.net"
|
||||||
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
// maxUncommittedSize = 9 << 30 // can't upload bigger than this
|
||||||
)
|
defaultChunkSize = 4 * 1024 * 1024
|
||||||
|
maxChunkSize = 100 * 1024 * 1024
|
||||||
// Globals
|
defaultUploadCutoff = 256 * 1024 * 1024
|
||||||
var (
|
maxUploadCutoff = 256 * 1024 * 1024
|
||||||
maxChunkSize = fs.SizeSuffix(100 * 1024 * 1024)
|
|
||||||
chunkSize = fs.SizeSuffix(4 * 1024 * 1024)
|
|
||||||
uploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
|
||||||
maxUploadCutoff = fs.SizeSuffix(256 * 1024 * 1024)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -70,22 +66,39 @@ func init() {
|
|||||||
Name: "sas_url",
|
Name: "sas_url",
|
||||||
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
Help: "SAS URL for container level access only\n(leave blank if using account/key or connection string)",
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service - leave blank normally.",
|
Help: "Endpoint for the service\nLeave blank normally.",
|
||||||
},
|
Advanced: true,
|
||||||
},
|
}, {
|
||||||
|
Name: "upload_cutoff",
|
||||||
|
Help: "Cutoff for switching to chunked upload.",
|
||||||
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_size",
|
||||||
|
Help: "Upload chunk size. Must fit in memory.",
|
||||||
|
Default: fs.SizeSuffix(defaultChunkSize),
|
||||||
|
Advanced: true,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&uploadCutoff, "azureblob-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
}
|
||||||
flags.VarP(&chunkSize, "azureblob-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Account string `config:"account"`
|
||||||
|
Key string `config:"key"`
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
SASURL string `config:"sas_url"`
|
||||||
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote azure server
|
// Fs represents a remote azure server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
|
opt Options // parsed config options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
account string // account name
|
|
||||||
endpoint string // name of the starting api endpoint
|
|
||||||
svcURL *azblob.ServiceURL // reference to serviceURL
|
svcURL *azblob.ServiceURL // reference to serviceURL
|
||||||
cntURL *azblob.ContainerURL // reference to containerURL
|
cntURL *azblob.ContainerURL // reference to containerURL
|
||||||
container string // the container we are working on
|
container string // the container we are working on
|
||||||
@ -177,21 +190,27 @@ func (f *Fs) shouldRetry(err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if uploadCutoff > maxUploadCutoff {
|
// Parse config into Options struct
|
||||||
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", uploadCutoff, maxUploadCutoff)
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if chunkSize > maxChunkSize {
|
|
||||||
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, chunkSize)
|
if opt.UploadCutoff > maxUploadCutoff {
|
||||||
|
return nil, errors.Errorf("azure: upload cutoff (%v) must be less than or equal to %v", opt.UploadCutoff, maxUploadCutoff)
|
||||||
|
}
|
||||||
|
if opt.ChunkSize > maxChunkSize {
|
||||||
|
return nil, errors.Errorf("azure: chunk size can't be greater than %v - was %v", maxChunkSize, opt.ChunkSize)
|
||||||
}
|
}
|
||||||
container, directory, err := parsePath(root)
|
container, directory, err := parsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
account := config.FileGet(name, "account")
|
if opt.Endpoint == "" {
|
||||||
key := config.FileGet(name, "key")
|
opt.Endpoint = storageDefaultBaseURL
|
||||||
sasURL := config.FileGet(name, "sas_url")
|
}
|
||||||
endpoint := config.FileGet(name, "endpoint", storageDefaultBaseURL)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
u *url.URL
|
u *url.URL
|
||||||
@ -199,17 +218,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
containerURL azblob.ContainerURL
|
containerURL azblob.ContainerURL
|
||||||
)
|
)
|
||||||
switch {
|
switch {
|
||||||
case account != "" && key != "":
|
case opt.Account != "" && opt.Key != "":
|
||||||
credential := azblob.NewSharedKeyCredential(account, key)
|
credential := azblob.NewSharedKeyCredential(opt.Account, opt.Key)
|
||||||
u, err = url.Parse(fmt.Sprintf("https://%s.%s", account, endpoint))
|
u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint")
|
||||||
}
|
}
|
||||||
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{})
|
||||||
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
serviceURL = azblob.NewServiceURL(*u, pipeline)
|
||||||
containerURL = serviceURL.NewContainerURL(container)
|
containerURL = serviceURL.NewContainerURL(container)
|
||||||
case sasURL != "":
|
case opt.SASURL != "":
|
||||||
u, err = url.Parse(sasURL)
|
u, err = url.Parse(opt.SASURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
return nil, errors.Wrapf(err, "failed to parse SAS URL")
|
||||||
}
|
}
|
||||||
@ -234,10 +253,9 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
container: container,
|
container: container,
|
||||||
root: directory,
|
root: directory,
|
||||||
account: account,
|
|
||||||
endpoint: endpoint,
|
|
||||||
svcURL: &serviceURL,
|
svcURL: &serviceURL,
|
||||||
cntURL: &containerURL,
|
cntURL: &containerURL,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
@ -990,7 +1008,7 @@ type readSeeker struct {
|
|||||||
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
// Write a larger blob, using CreateBlockBlob, PutBlock, and PutBlockList.
|
||||||
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
func (o *Object) uploadMultipart(in io.Reader, size int64, blob *azblob.BlobURL, httpHeaders *azblob.BlobHTTPHeaders) (err error) {
|
||||||
// Calculate correct chunkSize
|
// Calculate correct chunkSize
|
||||||
chunkSize := int64(chunkSize)
|
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||||
var totalParts int64
|
var totalParts int64
|
||||||
for {
|
for {
|
||||||
// Calculate number of parts
|
// Calculate number of parts
|
||||||
@ -1147,7 +1165,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
httpHeaders.ContentType = fs.MimeType(o)
|
httpHeaders.ContentType = fs.MimeType(o)
|
||||||
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
// Multipart upload doesn't support MD5 checksums at put block calls, hence calculate
|
||||||
// MD5 only for PutBlob requests
|
// MD5 only for PutBlob requests
|
||||||
if size < int64(uploadCutoff) {
|
if size < int64(o.fs.opt.UploadCutoff) {
|
||||||
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
if sourceMD5, _ := src.Hash(hash.MD5); sourceMD5 != "" {
|
||||||
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
sourceMD5bytes, err := hex.DecodeString(sourceMD5)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -1159,7 +1177,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
putBlobOptions := azblob.UploadStreamToBlockBlobOptions{
|
||||||
BufferSize: int(chunkSize),
|
BufferSize: int(o.fs.opt.ChunkSize),
|
||||||
MaxBuffers: 4,
|
MaxBuffers: 4,
|
||||||
Metadata: o.meta,
|
Metadata: o.meta,
|
||||||
BlobHTTPHeaders: httpHeaders,
|
BlobHTTPHeaders: httpHeaders,
|
||||||
@ -1168,7 +1186,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
if size >= int64(uploadCutoff) {
|
if size >= int64(o.fs.opt.UploadCutoff) {
|
||||||
// If a large file upload in chunks
|
// If a large file upload in chunks
|
||||||
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
err = o.uploadMultipart(in, size, &blob, &httpHeaders)
|
||||||
} else {
|
} else {
|
||||||
|
162
backend/b2/b2.go
162
backend/b2/b2.go
@ -22,8 +22,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/b2/api"
|
"github.com/ncw/rclone/backend/b2/api"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/accounting"
|
"github.com/ncw/rclone/fs/accounting"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -34,30 +34,27 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultEndpoint = "https://api.backblazeb2.com"
|
defaultEndpoint = "https://api.backblazeb2.com"
|
||||||
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
headerPrefix = "x-bz-info-" // lower case as that is what the server returns
|
||||||
timeKey = "src_last_modified_millis"
|
timeKey = "src_last_modified_millis"
|
||||||
timeHeader = headerPrefix + timeKey
|
timeHeader = headerPrefix + timeKey
|
||||||
sha1Key = "large_file_sha1"
|
sha1Key = "large_file_sha1"
|
||||||
sha1Header = "X-Bz-Content-Sha1"
|
sha1Header = "X-Bz-Content-Sha1"
|
||||||
sha1InfoHeader = headerPrefix + sha1Key
|
sha1InfoHeader = headerPrefix + sha1Key
|
||||||
testModeHeader = "X-Bz-Test-Mode"
|
testModeHeader = "X-Bz-Test-Mode"
|
||||||
retryAfterHeader = "Retry-After"
|
retryAfterHeader = "Retry-After"
|
||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 5 * time.Minute
|
maxSleep = 5 * time.Minute
|
||||||
decayConstant = 1 // bigger for slower decay, exponential
|
decayConstant = 1 // bigger for slower decay, exponential
|
||||||
maxParts = 10000
|
maxParts = 10000
|
||||||
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
maxVersions = 100 // maximum number of versions we search in --b2-versions mode
|
||||||
|
minChunkSize = 5E6
|
||||||
|
defaultChunkSize = 96 * 1024 * 1024
|
||||||
|
defaultUploadCutoff = 200E6
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
minChunkSize = fs.SizeSuffix(5E6)
|
|
||||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
|
||||||
uploadCutoff = fs.SizeSuffix(200E6)
|
|
||||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
|
||||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
|
||||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
|
||||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,29 +65,64 @@ func init() {
|
|||||||
Description: "Backblaze B2",
|
Description: "Backblaze B2",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "account",
|
Name: "account",
|
||||||
Help: "Account ID",
|
Help: "Account ID",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Help: "Application Key",
|
Help: "Application Key",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Endpoint for the service - leave blank normally.",
|
Help: "Endpoint for the service.\nLeave blank normally.",
|
||||||
},
|
Advanced: true,
|
||||||
},
|
}, {
|
||||||
|
Name: "test_mode",
|
||||||
|
Help: "A flag string for X-Bz-Test-Mode header for debugging.",
|
||||||
|
Default: "",
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "versions",
|
||||||
|
Help: "Include old versions in directory listings.",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "hard_delete",
|
||||||
|
Help: "Permanently delete files on remote removal, otherwise hide files.",
|
||||||
|
Default: false,
|
||||||
|
}, {
|
||||||
|
Name: "upload_cutoff",
|
||||||
|
Help: "Cutoff for switching to chunked upload.",
|
||||||
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_size",
|
||||||
|
Help: "Upload chunk size. Must fit in memory.",
|
||||||
|
Default: fs.SizeSuffix(defaultChunkSize),
|
||||||
|
Advanced: true,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
}
|
||||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Account string `config:"account"`
|
||||||
|
Key string `config:"key"`
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
TestMode string `config:"test_mode"`
|
||||||
|
Versions bool `config:"versions"`
|
||||||
|
HardDelete bool `config:"hard_delete"`
|
||||||
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote b2 server
|
// Fs represents a remote b2 server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
|
opt Options // parsed config options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
account string // account name
|
|
||||||
key string // auth key
|
|
||||||
endpoint string // name of the starting api endpoint
|
|
||||||
srv *rest.Client // the connection to the b2 server
|
srv *rest.Client // the connection to the b2 server
|
||||||
bucket string // the bucket we are working on
|
bucket string // the bucket we are working on
|
||||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||||
@ -232,33 +264,37 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs contstructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if uploadCutoff < chunkSize {
|
// Parse config into Options struct
|
||||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if chunkSize < minChunkSize {
|
if opt.UploadCutoff < opt.ChunkSize {
|
||||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", opt.UploadCutoff, opt.ChunkSize)
|
||||||
|
}
|
||||||
|
if opt.ChunkSize < minChunkSize {
|
||||||
|
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, opt.ChunkSize)
|
||||||
}
|
}
|
||||||
bucket, directory, err := parsePath(root)
|
bucket, directory, err := parsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
account := config.FileGet(name, "account")
|
if opt.Account == "" {
|
||||||
if account == "" {
|
|
||||||
return nil, errors.New("account not found")
|
return nil, errors.New("account not found")
|
||||||
}
|
}
|
||||||
key := config.FileGet(name, "key")
|
if opt.Key == "" {
|
||||||
if key == "" {
|
|
||||||
return nil, errors.New("key not found")
|
return nil, errors.New("key not found")
|
||||||
}
|
}
|
||||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
if opt.Endpoint == "" {
|
||||||
|
opt.Endpoint = defaultEndpoint
|
||||||
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
root: directory,
|
root: directory,
|
||||||
account: account,
|
|
||||||
key: key,
|
|
||||||
endpoint: endpoint,
|
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||||
@ -269,8 +305,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
// Set the test flag if required
|
// Set the test flag if required
|
||||||
if *b2TestMode != "" {
|
if opt.TestMode != "" {
|
||||||
testMode := strings.TrimSpace(*b2TestMode)
|
testMode := strings.TrimSpace(opt.TestMode)
|
||||||
f.srv.SetHeader(testModeHeader, testMode)
|
f.srv.SetHeader(testModeHeader, testMode)
|
||||||
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
|
||||||
}
|
}
|
||||||
@ -316,9 +352,9 @@ func (f *Fs) authorizeAccount() error {
|
|||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
Path: "/b2api/v1/b2_authorize_account",
|
Path: "/b2api/v1/b2_authorize_account",
|
||||||
RootURL: f.endpoint,
|
RootURL: f.opt.Endpoint,
|
||||||
UserName: f.account,
|
UserName: f.opt.Account,
|
||||||
Password: f.key,
|
Password: f.opt.Key,
|
||||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||||
}
|
}
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
@ -384,7 +420,7 @@ func (f *Fs) clearUploadURL() {
|
|||||||
func (f *Fs) getUploadBlock() []byte {
|
func (f *Fs) getUploadBlock() []byte {
|
||||||
buf := <-f.bufferTokens
|
buf := <-f.bufferTokens
|
||||||
if buf == nil {
|
if buf == nil {
|
||||||
buf = make([]byte, chunkSize)
|
buf = make([]byte, f.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
// fs.Debugf(f, "Getting upload block %p", buf)
|
// fs.Debugf(f, "Getting upload block %p", buf)
|
||||||
return buf
|
return buf
|
||||||
@ -393,7 +429,7 @@ func (f *Fs) getUploadBlock() []byte {
|
|||||||
// putUploadBlock returns a block to the pool of size chunkSize
|
// putUploadBlock returns a block to the pool of size chunkSize
|
||||||
func (f *Fs) putUploadBlock(buf []byte) {
|
func (f *Fs) putUploadBlock(buf []byte) {
|
||||||
buf = buf[:cap(buf)]
|
buf = buf[:cap(buf)]
|
||||||
if len(buf) != int(chunkSize) {
|
if len(buf) != int(f.opt.ChunkSize) {
|
||||||
panic("bad blocksize returned to pool")
|
panic("bad blocksize returned to pool")
|
||||||
}
|
}
|
||||||
// fs.Debugf(f, "Returning upload block %p", buf)
|
// fs.Debugf(f, "Returning upload block %p", buf)
|
||||||
@ -563,7 +599,7 @@ func (f *Fs) markBucketOK() {
|
|||||||
// listDir lists a single directory
|
// listDir lists a single directory
|
||||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||||
last := ""
|
last := ""
|
||||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
err = f.list(dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -635,7 +671,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
}
|
}
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
last := ""
|
last := ""
|
||||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
err = f.list(dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1035,12 +1071,12 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
maxSearched := 1
|
maxSearched := 1
|
||||||
var timestamp api.Timestamp
|
var timestamp api.Timestamp
|
||||||
baseRemote := o.remote
|
baseRemote := o.remote
|
||||||
if *b2Versions {
|
if o.fs.opt.Versions {
|
||||||
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
timestamp, baseRemote = api.RemoveVersion(baseRemote)
|
||||||
maxSearched = maxVersions
|
maxSearched = maxVersions
|
||||||
}
|
}
|
||||||
var info *api.File
|
var info *api.File
|
||||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
err = o.fs.list("", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1254,7 +1290,7 @@ func urlEncode(in string) string {
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||||
if *b2Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
err = o.fs.Mkdir("")
|
err = o.fs.Mkdir("")
|
||||||
@ -1289,7 +1325,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if size > int64(uploadCutoff) {
|
} else if size > int64(o.fs.opt.UploadCutoff) {
|
||||||
up, err := o.fs.newLargeUpload(o, in, src)
|
up, err := o.fs.newLargeUpload(o, in, src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1408,10 +1444,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
// Remove an object
|
// Remove an object
|
||||||
func (o *Object) Remove() error {
|
func (o *Object) Remove() error {
|
||||||
if *b2Versions {
|
if o.fs.opt.Versions {
|
||||||
return errNotWithVersions
|
return errNotWithVersions
|
||||||
}
|
}
|
||||||
if *b2HardDelete {
|
if o.fs.opt.HardDelete {
|
||||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||||
}
|
}
|
||||||
return o.fs.hide(o.fs.root + o.remote)
|
return o.fs.hide(o.fs.root + o.remote)
|
||||||
|
@ -86,10 +86,10 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
|||||||
parts := int64(0)
|
parts := int64(0)
|
||||||
sha1SliceSize := int64(maxParts)
|
sha1SliceSize := int64(maxParts)
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", f.opt.ChunkSize, maxParts*f.opt.ChunkSize)
|
||||||
} else {
|
} else {
|
||||||
parts = size / int64(chunkSize)
|
parts = size / int64(o.fs.opt.ChunkSize)
|
||||||
if size%int64(chunkSize) != 0 {
|
if size%int64(o.fs.opt.ChunkSize) != 0 {
|
||||||
parts++
|
parts++
|
||||||
}
|
}
|
||||||
if parts > maxParts {
|
if parts > maxParts {
|
||||||
@ -409,8 +409,8 @@ outer:
|
|||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := remaining
|
||||||
if reqSize >= int64(chunkSize) {
|
if reqSize >= int64(up.f.opt.ChunkSize) {
|
||||||
reqSize = int64(chunkSize)
|
reqSize = int64(up.f.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a block of memory
|
// Get a block of memory
|
||||||
|
@ -23,7 +23,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/box/api"
|
"github.com/ncw/rclone/backend/box/api"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -46,6 +47,7 @@ const (
|
|||||||
uploadURL = "https://upload.box.com/api/2.0"
|
uploadURL = "https://upload.box.com/api/2.0"
|
||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||||
|
defaultUploadCutoff = 50 * 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
@ -61,7 +63,6 @@ var (
|
|||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
RedirectURL: oauthutil.RedirectURL,
|
RedirectURL: oauthutil.RedirectURL,
|
||||||
}
|
}
|
||||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -70,27 +71,37 @@ func init() {
|
|||||||
Name: "box",
|
Name: "box",
|
||||||
Description: "Box",
|
Description: "Box",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("box", name, oauthConfig)
|
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Box App Client Id - leave blank normally.",
|
Help: "Box App Client Id.\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Box App Client Secret - leave blank normally.",
|
Help: "Box App Client Secret\nLeave blank normally.",
|
||||||
|
}, {
|
||||||
|
Name: "upload_cutoff",
|
||||||
|
Help: "Cutoff for switching to multipart upload.",
|
||||||
|
Default: fs.SizeSuffix(defaultUploadCutoff),
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote box
|
// Fs represents a remote box
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
@ -219,13 +230,20 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if uploadCutoff < minUploadCutoff {
|
// Parse config into Options struct
|
||||||
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", uploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.UploadCutoff < minUploadCutoff {
|
||||||
|
return nil, errors.Errorf("box: upload cutoff (%v) must be greater than equal to %v", opt.UploadCutoff, fs.SizeSuffix(minUploadCutoff))
|
||||||
}
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure Box: %v", err)
|
log.Fatalf("Failed to configure Box: %v", err)
|
||||||
}
|
}
|
||||||
@ -233,6 +251,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
uploadToken: pacer.NewTokenDispenser(fs.Config.Transfers),
|
||||||
@ -1035,7 +1054,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Upload with simple or multipart
|
// Upload with simple or multipart
|
||||||
if size <= int64(uploadCutoff) {
|
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||||
err = o.upload(in, leaf, directoryID, modTime)
|
err = o.upload(in, leaf, directoryID, modTime)
|
||||||
} else {
|
} else {
|
||||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||||
|
438
backend/cache/cache.go
vendored
438
backend/cache/cache.go
vendored
@ -18,7 +18,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/crypt"
|
"github.com/ncw/rclone/backend/crypt"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/fs/rc"
|
"github.com/ncw/rclone/fs/rc"
|
||||||
@ -30,13 +31,13 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// DefCacheChunkSize is the default value for chunk size
|
// DefCacheChunkSize is the default value for chunk size
|
||||||
DefCacheChunkSize = "5M"
|
DefCacheChunkSize = fs.SizeSuffix(5 * 1024 * 1024)
|
||||||
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
|
// DefCacheTotalChunkSize is the default value for the maximum size of stored chunks
|
||||||
DefCacheTotalChunkSize = "10G"
|
DefCacheTotalChunkSize = fs.SizeSuffix(10 * 1024 * 1024 * 1024)
|
||||||
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
|
// DefCacheChunkCleanInterval is the interval at which chunks are cleaned
|
||||||
DefCacheChunkCleanInterval = "1m"
|
DefCacheChunkCleanInterval = fs.Duration(time.Minute)
|
||||||
// DefCacheInfoAge is the default value for object info age
|
// DefCacheInfoAge is the default value for object info age
|
||||||
DefCacheInfoAge = "6h"
|
DefCacheInfoAge = fs.Duration(6 * time.Hour)
|
||||||
// DefCacheReadRetries is the default value for read retries
|
// DefCacheReadRetries is the default value for read retries
|
||||||
DefCacheReadRetries = 10
|
DefCacheReadRetries = 10
|
||||||
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
// DefCacheTotalWorkers is how many workers run in parallel to download chunks
|
||||||
@ -48,29 +49,9 @@ const (
|
|||||||
// DefCacheWrites will cache file data on writes through the cache
|
// DefCacheWrites will cache file data on writes through the cache
|
||||||
DefCacheWrites = false
|
DefCacheWrites = false
|
||||||
// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
|
// DefCacheTmpWaitTime says how long should files be stored in local cache before being uploaded
|
||||||
DefCacheTmpWaitTime = "15m"
|
DefCacheTmpWaitTime = fs.Duration(15 * time.Second)
|
||||||
// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
|
// DefCacheDbWaitTime defines how long the cache backend should wait for the DB to be available
|
||||||
DefCacheDbWaitTime = 1 * time.Second
|
DefCacheDbWaitTime = fs.Duration(1 * time.Second)
|
||||||
)
|
|
||||||
|
|
||||||
// Globals
|
|
||||||
var (
|
|
||||||
// Flags
|
|
||||||
cacheDbPath = flags.StringP("cache-db-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cache DB")
|
|
||||||
cacheChunkPath = flags.StringP("cache-chunk-path", "", filepath.Join(config.CacheDir, "cache-backend"), "Directory to cached chunk files")
|
|
||||||
cacheDbPurge = flags.BoolP("cache-db-purge", "", false, "Purge the cache DB before")
|
|
||||||
cacheChunkSize = flags.StringP("cache-chunk-size", "", DefCacheChunkSize, "The size of a chunk")
|
|
||||||
cacheTotalChunkSize = flags.StringP("cache-total-chunk-size", "", DefCacheTotalChunkSize, "The total size which the chunks can take up from the disk")
|
|
||||||
cacheChunkCleanInterval = flags.StringP("cache-chunk-clean-interval", "", DefCacheChunkCleanInterval, "Interval at which chunk cleanup runs")
|
|
||||||
cacheInfoAge = flags.StringP("cache-info-age", "", DefCacheInfoAge, "How much time should object info be stored in cache")
|
|
||||||
cacheReadRetries = flags.IntP("cache-read-retries", "", DefCacheReadRetries, "How many times to retry a read from a cache storage")
|
|
||||||
cacheTotalWorkers = flags.IntP("cache-workers", "", DefCacheTotalWorkers, "How many workers should run in parallel to download chunks")
|
|
||||||
cacheChunkNoMemory = flags.BoolP("cache-chunk-no-memory", "", DefCacheChunkNoMemory, "Disable the in-memory cache for storing chunks during streaming")
|
|
||||||
cacheRps = flags.IntP("cache-rps", "", int(DefCacheRps), "Limits the number of requests per second to the source FS. -1 disables the rate limiter")
|
|
||||||
cacheStoreWrites = flags.BoolP("cache-writes", "", DefCacheWrites, "Will cache file data on writes through the FS")
|
|
||||||
cacheTempWritePath = flags.StringP("cache-tmp-upload-path", "", "", "Directory to keep temporary files until they are uploaded to the cloud storage")
|
|
||||||
cacheTempWaitTime = flags.StringP("cache-tmp-wait-time", "", DefCacheTmpWaitTime, "How long should files be stored in local cache before being uploaded")
|
|
||||||
cacheDbWaitTime = flags.DurationP("cache-db-wait-time", "", DefCacheDbWaitTime, "How long to wait for the DB to be available - 0 is unlimited")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -80,73 +61,155 @@ func init() {
|
|||||||
Description: "Cache a remote",
|
Description: "Cache a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to cache.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_url",
|
Name: "plex_url",
|
||||||
Help: "Optional: The URL of the Plex server",
|
Help: "The URL of the Plex server",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_username",
|
Name: "plex_username",
|
||||||
Help: "Optional: The username of the Plex user",
|
Help: "The username of the Plex user",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "plex_password",
|
Name: "plex_password",
|
||||||
Help: "Optional: The password of the Plex user",
|
Help: "The password of the Plex user",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_size",
|
Name: "plex_token",
|
||||||
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading. \nDefault: " + DefCacheChunkSize,
|
Help: "The plex token for authentication - auto set normally",
|
||||||
Examples: []fs.OptionExample{
|
Hide: fs.OptionHideBoth,
|
||||||
{
|
Advanced: true,
|
||||||
Value: "1m",
|
|
||||||
Help: "1MB",
|
|
||||||
}, {
|
|
||||||
Value: "5M",
|
|
||||||
Help: "5 MB",
|
|
||||||
}, {
|
|
||||||
Value: "10M",
|
|
||||||
Help: "10 MB",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "info_age",
|
Name: "chunk_size",
|
||||||
Help: "How much time should object info (file size, file hashes etc) be stored in cache. Use a very high value if you don't plan on changing the source FS from outside the cache. \nAccepted units are: \"s\", \"m\", \"h\".\nDefault: " + DefCacheInfoAge,
|
Help: "The size of a chunk. Lower value good for slow connections but can affect seamless reading.",
|
||||||
Examples: []fs.OptionExample{
|
Default: DefCacheChunkSize,
|
||||||
{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "1h",
|
Value: "1m",
|
||||||
Help: "1 hour",
|
Help: "1MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "24h",
|
Value: "5M",
|
||||||
Help: "24 hours",
|
Help: "5 MB",
|
||||||
}, {
|
}, {
|
||||||
Value: "48h",
|
Value: "10M",
|
||||||
Help: "48 hours",
|
Help: "10 MB",
|
||||||
},
|
}},
|
||||||
},
|
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "chunk_total_size",
|
Name: "info_age",
|
||||||
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted. \nDefault: " + DefCacheTotalChunkSize,
|
Help: "How much time should object info (file size, file hashes etc) be stored in cache.\nUse a very high value if you don't plan on changing the source FS from outside the cache.\nAccepted units are: \"s\", \"m\", \"h\".",
|
||||||
Examples: []fs.OptionExample{
|
Default: DefCacheInfoAge,
|
||||||
{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "500M",
|
Value: "1h",
|
||||||
Help: "500 MB",
|
Help: "1 hour",
|
||||||
}, {
|
}, {
|
||||||
Value: "1G",
|
Value: "24h",
|
||||||
Help: "1 GB",
|
Help: "24 hours",
|
||||||
}, {
|
}, {
|
||||||
Value: "10G",
|
Value: "48h",
|
||||||
Help: "10 GB",
|
Help: "48 hours",
|
||||||
},
|
}},
|
||||||
},
|
}, {
|
||||||
Optional: true,
|
Name: "chunk_total_size",
|
||||||
|
Help: "The maximum size of stored chunks. When the storage grows beyond this size, the oldest chunks will be deleted.",
|
||||||
|
Default: DefCacheTotalChunkSize,
|
||||||
|
Examples: []fs.OptionExample{{
|
||||||
|
Value: "500M",
|
||||||
|
Help: "500 MB",
|
||||||
|
}, {
|
||||||
|
Value: "1G",
|
||||||
|
Help: "1 GB",
|
||||||
|
}, {
|
||||||
|
Value: "10G",
|
||||||
|
Help: "10 GB",
|
||||||
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "db_path",
|
||||||
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
|
Help: "Directory to cache DB",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_path",
|
||||||
|
Default: filepath.Join(config.CacheDir, "cache-backend"),
|
||||||
|
Help: "Directory to cache chunk files",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "db_purge",
|
||||||
|
Default: false,
|
||||||
|
Help: "Purge the cache DB before",
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_clean_interval",
|
||||||
|
Default: DefCacheChunkCleanInterval,
|
||||||
|
Help: "Interval at which chunk cleanup runs",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "read_retries",
|
||||||
|
Default: DefCacheReadRetries,
|
||||||
|
Help: "How many times to retry a read from a cache storage",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "workers",
|
||||||
|
Default: DefCacheTotalWorkers,
|
||||||
|
Help: "How many workers should run in parallel to download chunks",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_no_memory",
|
||||||
|
Default: DefCacheChunkNoMemory,
|
||||||
|
Help: "Disable the in-memory cache for storing chunks during streaming",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "rps",
|
||||||
|
Default: int(DefCacheRps),
|
||||||
|
Help: "Limits the number of requests per second to the source FS. -1 disables the rate limiter",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "writes",
|
||||||
|
Default: DefCacheWrites,
|
||||||
|
Help: "Will cache file data on writes through the FS",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "tmp_upload_path",
|
||||||
|
Default: "",
|
||||||
|
Help: "Directory to keep temporary files until they are uploaded to the cloud storage",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "tmp_wait_time",
|
||||||
|
Default: DefCacheTmpWaitTime,
|
||||||
|
Help: "How long should files be stored in local cache before being uploaded",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "db_wait_time",
|
||||||
|
Default: DefCacheDbWaitTime,
|
||||||
|
Help: "How long to wait for the DB to be available - 0 is unlimited",
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Remote string `config:"remote"`
|
||||||
|
PlexURL string `config:"plex_url"`
|
||||||
|
PlexUsername string `config:"plex_username"`
|
||||||
|
PlexPassword string `config:"plex_password"`
|
||||||
|
PlexToken string `config:"plex_token"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
InfoAge fs.Duration `config:"info_age"`
|
||||||
|
ChunkTotalSize fs.SizeSuffix `config:"chunk_total_size"`
|
||||||
|
DbPath string `config:"db_path"`
|
||||||
|
ChunkPath string `config:"chunk_path"`
|
||||||
|
DbPurge bool `config:"db_purge"`
|
||||||
|
ChunkCleanInterval fs.Duration `config:"chunk_clean_interval"`
|
||||||
|
ReadRetries int `config:"read_retries"`
|
||||||
|
TotalWorkers int `config:"workers"`
|
||||||
|
ChunkNoMemory bool `config:"chunk_no_memory"`
|
||||||
|
Rps int `config:"rps"`
|
||||||
|
StoreWrites bool `config:"writes"`
|
||||||
|
TempWritePath string `config:"tmp_upload_path"`
|
||||||
|
TempWaitTime fs.Duration `config:"tmp_wait_time"`
|
||||||
|
DbWaitTime fs.Duration `config:"db_wait_time"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
@ -154,21 +217,10 @@ type Fs struct {
|
|||||||
|
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
cache *Persistent
|
cache *Persistent
|
||||||
|
tempFs fs.Fs
|
||||||
fileAge time.Duration
|
|
||||||
chunkSize int64
|
|
||||||
chunkTotalSize int64
|
|
||||||
chunkCleanInterval time.Duration
|
|
||||||
readRetries int
|
|
||||||
totalWorkers int
|
|
||||||
totalMaxWorkers int
|
|
||||||
chunkMemory bool
|
|
||||||
cacheWrites bool
|
|
||||||
tempWritePath string
|
|
||||||
tempWriteWait time.Duration
|
|
||||||
tempFs fs.Fs
|
|
||||||
|
|
||||||
lastChunkCleanup time.Time
|
lastChunkCleanup time.Time
|
||||||
cleanupMu sync.Mutex
|
cleanupMu sync.Mutex
|
||||||
@ -188,9 +240,19 @@ func parseRootPath(path string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs a Fs from the path, container:path
|
// NewFs constructs a Fs from the path, container:path
|
||||||
func NewFs(name, rootPath string) (fs.Fs, error) {
|
func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
remote := config.FileGet(name, "remote")
|
// Parse config into Options struct
|
||||||
if strings.HasPrefix(remote, name+":") {
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.ChunkTotalSize < opt.ChunkSize*fs.SizeSuffix(opt.TotalWorkers) {
|
||||||
|
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
||||||
|
opt.ChunkTotalSize, opt.ChunkSize, opt.TotalWorkers)
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(opt.Remote, name+":") {
|
||||||
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point cache remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,7 +261,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
return nil, errors.Wrapf(err, "failed to clean root path %q", rootPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
remotePath := path.Join(remote, rpath)
|
remotePath := path.Join(opt.Remote, rpath)
|
||||||
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
wrappedFs, wrapErr := fs.NewFs(remotePath)
|
||||||
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
if wrapErr != nil && wrapErr != fs.ErrorIsFile {
|
||||||
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
return nil, errors.Wrapf(wrapErr, "failed to make remote %q to wrap", remotePath)
|
||||||
@ -210,97 +272,46 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
fsErr = fs.ErrorIsFile
|
fsErr = fs.ErrorIsFile
|
||||||
rpath = cleanPath(path.Dir(rpath))
|
rpath = cleanPath(path.Dir(rpath))
|
||||||
}
|
}
|
||||||
plexURL := config.FileGet(name, "plex_url")
|
|
||||||
plexToken := config.FileGet(name, "plex_token")
|
|
||||||
var chunkSize fs.SizeSuffix
|
|
||||||
chunkSizeString := config.FileGet(name, "chunk_size", DefCacheChunkSize)
|
|
||||||
if *cacheChunkSize != DefCacheChunkSize {
|
|
||||||
chunkSizeString = *cacheChunkSize
|
|
||||||
}
|
|
||||||
err = chunkSize.Set(chunkSizeString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand chunk size %v", chunkSizeString)
|
|
||||||
}
|
|
||||||
var chunkTotalSize fs.SizeSuffix
|
|
||||||
chunkTotalSizeString := config.FileGet(name, "chunk_total_size", DefCacheTotalChunkSize)
|
|
||||||
if *cacheTotalChunkSize != DefCacheTotalChunkSize {
|
|
||||||
chunkTotalSizeString = *cacheTotalChunkSize
|
|
||||||
}
|
|
||||||
err = chunkTotalSize.Set(chunkTotalSizeString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand chunk total size %v", chunkTotalSizeString)
|
|
||||||
}
|
|
||||||
chunkCleanIntervalStr := *cacheChunkCleanInterval
|
|
||||||
chunkCleanInterval, err := time.ParseDuration(chunkCleanIntervalStr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration %v", chunkCleanIntervalStr)
|
|
||||||
}
|
|
||||||
infoAge := config.FileGet(name, "info_age", DefCacheInfoAge)
|
|
||||||
if *cacheInfoAge != DefCacheInfoAge {
|
|
||||||
infoAge = *cacheInfoAge
|
|
||||||
}
|
|
||||||
infoDuration, err := time.ParseDuration(infoAge)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration %v", infoAge)
|
|
||||||
}
|
|
||||||
waitTime, err := time.ParseDuration(*cacheTempWaitTime)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to understand duration %v", *cacheTempWaitTime)
|
|
||||||
}
|
|
||||||
// configure cache backend
|
// configure cache backend
|
||||||
if *cacheDbPurge {
|
if opt.DbPurge {
|
||||||
fs.Debugf(name, "Purging the DB")
|
fs.Debugf(name, "Purging the DB")
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
fileAge: infoDuration,
|
opt: *opt,
|
||||||
chunkSize: int64(chunkSize),
|
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
||||||
chunkTotalSize: int64(chunkTotalSize),
|
cleanupChan: make(chan bool, 1),
|
||||||
chunkCleanInterval: chunkCleanInterval,
|
notifiedRemotes: make(map[string]bool),
|
||||||
readRetries: *cacheReadRetries,
|
|
||||||
totalWorkers: *cacheTotalWorkers,
|
|
||||||
totalMaxWorkers: *cacheTotalWorkers,
|
|
||||||
chunkMemory: !*cacheChunkNoMemory,
|
|
||||||
cacheWrites: *cacheStoreWrites,
|
|
||||||
lastChunkCleanup: time.Now().Truncate(time.Hour * 24 * 30),
|
|
||||||
tempWritePath: *cacheTempWritePath,
|
|
||||||
tempWriteWait: waitTime,
|
|
||||||
cleanupChan: make(chan bool, 1),
|
|
||||||
notifiedRemotes: make(map[string]bool),
|
|
||||||
}
|
}
|
||||||
if f.chunkTotalSize < (f.chunkSize * int64(f.totalWorkers)) {
|
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(opt.Rps)), opt.TotalWorkers)
|
||||||
return nil, errors.Errorf("don't set cache-total-chunk-size(%v) less than cache-chunk-size(%v) * cache-workers(%v)",
|
|
||||||
f.chunkTotalSize, f.chunkSize, f.totalWorkers)
|
|
||||||
}
|
|
||||||
f.rateLimiter = rate.NewLimiter(rate.Limit(float64(*cacheRps)), f.totalWorkers)
|
|
||||||
|
|
||||||
f.plexConnector = &plexConnector{}
|
f.plexConnector = &plexConnector{}
|
||||||
if plexURL != "" {
|
if opt.PlexURL != "" {
|
||||||
if plexToken != "" {
|
if opt.PlexToken != "" {
|
||||||
f.plexConnector, err = newPlexConnectorWithToken(f, plexURL, plexToken)
|
f.plexConnector, err = newPlexConnectorWithToken(f, opt.PlexURL, opt.PlexToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
plexUsername := config.FileGet(name, "plex_username")
|
if opt.PlexPassword != "" && opt.PlexUsername != "" {
|
||||||
plexPassword := config.FileGet(name, "plex_password")
|
decPass, err := obscure.Reveal(opt.PlexPassword)
|
||||||
if plexPassword != "" && plexUsername != "" {
|
|
||||||
decPass, err := obscure.Reveal(plexPassword)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
decPass = plexPassword
|
decPass = opt.PlexPassword
|
||||||
}
|
}
|
||||||
f.plexConnector, err = newPlexConnector(f, plexURL, plexUsername, decPass)
|
f.plexConnector, err = newPlexConnector(f, opt.PlexURL, opt.PlexUsername, decPass, func(token string) {
|
||||||
|
m.Set("plex_token", token)
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", plexURL)
|
return nil, errors.Wrapf(err, "failed to connect to the Plex API %v", opt.PlexURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dbPath := *cacheDbPath
|
dbPath := f.opt.DbPath
|
||||||
chunkPath := *cacheChunkPath
|
chunkPath := f.opt.ChunkPath
|
||||||
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
// if the dbPath is non default but the chunk path is default, we overwrite the last to follow the same one as dbPath
|
||||||
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
if dbPath != filepath.Join(config.CacheDir, "cache-backend") &&
|
||||||
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
chunkPath == filepath.Join(config.CacheDir, "cache-backend") {
|
||||||
@ -326,7 +337,8 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
fs.Infof(name, "Cache DB path: %v", dbPath)
|
fs.Infof(name, "Cache DB path: %v", dbPath)
|
||||||
fs.Infof(name, "Cache chunk path: %v", chunkPath)
|
fs.Infof(name, "Cache chunk path: %v", chunkPath)
|
||||||
f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
|
f.cache, err = GetPersistent(dbPath, chunkPath, &Features{
|
||||||
PurgeDb: *cacheDbPurge,
|
PurgeDb: opt.DbPurge,
|
||||||
|
DbWaitTime: time.Duration(opt.DbWaitTime),
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to start cache db")
|
return nil, errors.Wrapf(err, "failed to start cache db")
|
||||||
@ -335,7 +347,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, syscall.SIGHUP)
|
signal.Notify(c, syscall.SIGHUP)
|
||||||
atexit.Register(func() {
|
atexit.Register(func() {
|
||||||
if plexURL != "" {
|
if opt.PlexURL != "" {
|
||||||
f.plexConnector.closeWebsocket()
|
f.plexConnector.closeWebsocket()
|
||||||
}
|
}
|
||||||
f.StopBackgroundRunners()
|
f.StopBackgroundRunners()
|
||||||
@ -350,35 +362,35 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
fs.Infof(name, "Chunk Memory: %v", f.chunkMemory)
|
fs.Infof(name, "Chunk Memory: %v", !f.opt.ChunkNoMemory)
|
||||||
fs.Infof(name, "Chunk Size: %v", fs.SizeSuffix(f.chunkSize))
|
fs.Infof(name, "Chunk Size: %v", f.opt.ChunkSize)
|
||||||
fs.Infof(name, "Chunk Total Size: %v", fs.SizeSuffix(f.chunkTotalSize))
|
fs.Infof(name, "Chunk Total Size: %v", f.opt.ChunkTotalSize)
|
||||||
fs.Infof(name, "Chunk Clean Interval: %v", f.chunkCleanInterval.String())
|
fs.Infof(name, "Chunk Clean Interval: %v", f.opt.ChunkCleanInterval)
|
||||||
fs.Infof(name, "Workers: %v", f.totalWorkers)
|
fs.Infof(name, "Workers: %v", f.opt.TotalWorkers)
|
||||||
fs.Infof(name, "File Age: %v", f.fileAge.String())
|
fs.Infof(name, "File Age: %v", f.opt.InfoAge)
|
||||||
if f.cacheWrites {
|
if !f.opt.StoreWrites {
|
||||||
fs.Infof(name, "Cache Writes: enabled")
|
fs.Infof(name, "Cache Writes: enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
err = os.MkdirAll(f.tempWritePath, os.ModePerm)
|
err = os.MkdirAll(f.opt.TempWritePath, os.ModePerm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.tempWritePath)
|
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||||
}
|
}
|
||||||
f.tempWritePath = filepath.ToSlash(f.tempWritePath)
|
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||||
f.tempFs, err = fs.NewFs(f.tempWritePath)
|
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||||
}
|
}
|
||||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.tempWriteWait.String())
|
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||||
fs.Infof(name, "Upload Temp FS: %v", f.tempWritePath)
|
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||||
f.backgroundRunner, _ = initBackgroundUploader(f)
|
f.backgroundRunner, _ = initBackgroundUploader(f)
|
||||||
go f.backgroundRunner.run()
|
go f.backgroundRunner.run()
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
time.Sleep(f.chunkCleanInterval)
|
time.Sleep(time.Duration(f.opt.ChunkCleanInterval))
|
||||||
select {
|
select {
|
||||||
case <-f.cleanupChan:
|
case <-f.cleanupChan:
|
||||||
fs.Infof(f, "stopping cleanup")
|
fs.Infof(f, "stopping cleanup")
|
||||||
@ -391,7 +403,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||||
doChangeNotify(f.receiveChangeNotify, f.chunkCleanInterval)
|
doChangeNotify(f.receiveChangeNotify, time.Duration(f.opt.ChunkCleanInterval))
|
||||||
}
|
}
|
||||||
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
@ -400,7 +412,7 @@ func NewFs(name, rootPath string) (fs.Fs, error) {
|
|||||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||||
// override only those features that use a temp fs and it doesn't support them
|
// override only those features that use a temp fs and it doesn't support them
|
||||||
//f.features.ChangeNotify = f.ChangeNotify
|
//f.features.ChangeNotify = f.ChangeNotify
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
if f.tempFs.Features().Copy == nil {
|
if f.tempFs.Features().Copy == nil {
|
||||||
f.features.Copy = nil
|
f.features.Copy = nil
|
||||||
}
|
}
|
||||||
@ -563,7 +575,7 @@ func (f *Fs) receiveChangeNotify(forgetPath string, entryType fs.EntryType) {
|
|||||||
// notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
|
// notifyChangeUpstreamIfNeeded will check if the wrapped remote doesn't notify on changes
|
||||||
// or if we use a temp fs
|
// or if we use a temp fs
|
||||||
func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
|
func (f *Fs) notifyChangeUpstreamIfNeeded(remote string, entryType fs.EntryType) {
|
||||||
if f.Fs.Features().ChangeNotify == nil || f.tempWritePath != "" {
|
if f.Fs.Features().ChangeNotify == nil || f.opt.TempWritePath != "" {
|
||||||
f.notifyChangeUpstream(remote, entryType)
|
f.notifyChangeUpstream(remote, entryType)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -613,17 +625,17 @@ func (f *Fs) String() string {
|
|||||||
|
|
||||||
// ChunkSize returns the configured chunk size
|
// ChunkSize returns the configured chunk size
|
||||||
func (f *Fs) ChunkSize() int64 {
|
func (f *Fs) ChunkSize() int64 {
|
||||||
return f.chunkSize
|
return int64(f.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InfoAge returns the configured file age
|
// InfoAge returns the configured file age
|
||||||
func (f *Fs) InfoAge() time.Duration {
|
func (f *Fs) InfoAge() time.Duration {
|
||||||
return f.fileAge
|
return time.Duration(f.opt.InfoAge)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TempUploadWaitTime returns the configured temp file upload wait time
|
// TempUploadWaitTime returns the configured temp file upload wait time
|
||||||
func (f *Fs) TempUploadWaitTime() time.Duration {
|
func (f *Fs) TempUploadWaitTime() time.Duration {
|
||||||
return f.tempWriteWait
|
return time.Duration(f.opt.TempWaitTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewObject finds the Object at remote.
|
// NewObject finds the Object at remote.
|
||||||
@ -636,16 +648,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
|||||||
err = f.cache.GetObject(co)
|
err = f.cache.GetObject(co)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(remote, "find: error: %v", err)
|
fs.Debugf(remote, "find: error: %v", err)
|
||||||
} else if time.Now().After(co.CacheTs.Add(f.fileAge)) {
|
} else if time.Now().After(co.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||||
fs.Debugf(co, "find: cold object: %+v", co)
|
fs.Debugf(co, "find: cold object: %+v", co)
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(f.fileAge))
|
fs.Debugf(co, "find: warm object: %v, expiring on: %v", co, co.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||||
return co, nil
|
return co, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// search for entry in source or temp fs
|
// search for entry in source or temp fs
|
||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
obj, err = f.tempFs.NewObject(remote)
|
obj, err = f.tempFs.NewObject(remote)
|
||||||
// not found in temp fs
|
// not found in temp fs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -679,13 +691,13 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
entries, err = f.cache.GetDirEntries(cd)
|
entries, err = f.cache.GetDirEntries(cd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(dir, "list: error: %v", err)
|
fs.Debugf(dir, "list: error: %v", err)
|
||||||
} else if time.Now().After(cd.CacheTs.Add(f.fileAge)) {
|
} else if time.Now().After(cd.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||||
fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
|
fs.Debugf(dir, "list: cold listing: %v", cd.CacheTs)
|
||||||
} else if len(entries) == 0 {
|
} else if len(entries) == 0 {
|
||||||
// TODO: read empty dirs from source?
|
// TODO: read empty dirs from source?
|
||||||
fs.Debugf(dir, "list: empty listing")
|
fs.Debugf(dir, "list: empty listing")
|
||||||
} else {
|
} else {
|
||||||
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(f.fileAge))
|
fs.Debugf(dir, "list: warm %v from cache for: %v, expiring on: %v", len(entries), cd.abs(), cd.CacheTs.Add(time.Duration(f.opt.InfoAge)))
|
||||||
fs.Debugf(dir, "list: cached entries: %v", entries)
|
fs.Debugf(dir, "list: cached entries: %v", entries)
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
@ -693,7 +705,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
|
|
||||||
// we first search any temporary files stored locally
|
// we first search any temporary files stored locally
|
||||||
var cachedEntries fs.DirEntries
|
var cachedEntries fs.DirEntries
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
|
queuedEntries, err := f.cache.searchPendingUploadFromDir(cd.abs())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(dir, "list: error getting pending uploads: %v", err)
|
fs.Errorf(dir, "list: error getting pending uploads: %v", err)
|
||||||
@ -744,7 +756,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
case fs.Directory:
|
case fs.Directory:
|
||||||
cdd := DirectoryFromOriginal(f, o)
|
cdd := DirectoryFromOriginal(f, o)
|
||||||
// check if the dir isn't expired and add it in cache if it isn't
|
// check if the dir isn't expired and add it in cache if it isn't
|
||||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(f.fileAge)) {
|
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||||
batchDirectories = append(batchDirectories, cdd)
|
batchDirectories = append(batchDirectories, cdd)
|
||||||
}
|
}
|
||||||
cachedEntries = append(cachedEntries, cdd)
|
cachedEntries = append(cachedEntries, cdd)
|
||||||
@ -867,7 +879,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
func (f *Fs) Rmdir(dir string) error {
|
func (f *Fs) Rmdir(dir string) error {
|
||||||
fs.Debugf(f, "rmdir '%s'", dir)
|
fs.Debugf(f, "rmdir '%s'", dir)
|
||||||
|
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
// pause background uploads
|
// pause background uploads
|
||||||
f.backgroundRunner.pause()
|
f.backgroundRunner.pause()
|
||||||
defer f.backgroundRunner.play()
|
defer f.backgroundRunner.play()
|
||||||
@ -952,7 +964,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
return fs.ErrorCantDirMove
|
return fs.ErrorCantDirMove
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
// pause background uploads
|
// pause background uploads
|
||||||
f.backgroundRunner.pause()
|
f.backgroundRunner.pause()
|
||||||
defer f.backgroundRunner.play()
|
defer f.backgroundRunner.play()
|
||||||
@ -1079,7 +1091,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
go func() {
|
go func() {
|
||||||
var offset int64
|
var offset int64
|
||||||
for {
|
for {
|
||||||
chunk := make([]byte, f.chunkSize)
|
chunk := make([]byte, f.opt.ChunkSize)
|
||||||
readSize, err := io.ReadFull(pr, chunk)
|
readSize, err := io.ReadFull(pr, chunk)
|
||||||
// we ignore 3 failures which are ok:
|
// we ignore 3 failures which are ok:
|
||||||
// 1. EOF - original reading finished and we got a full buffer too
|
// 1. EOF - original reading finished and we got a full buffer too
|
||||||
@ -1127,7 +1139,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
var obj fs.Object
|
var obj fs.Object
|
||||||
|
|
||||||
// queue for upload and store in temp fs if configured
|
// queue for upload and store in temp fs if configured
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
// we need to clear the caches before a put through temp fs
|
// we need to clear the caches before a put through temp fs
|
||||||
parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
|
parentCd := NewDirectory(f, cleanPath(path.Dir(src.Remote())))
|
||||||
_ = f.cache.ExpireDir(parentCd)
|
_ = f.cache.ExpireDir(parentCd)
|
||||||
@ -1146,7 +1158,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
|||||||
}
|
}
|
||||||
fs.Infof(obj, "put: queued for upload")
|
fs.Infof(obj, "put: queued for upload")
|
||||||
// if cache writes is enabled write it first through cache
|
// if cache writes is enabled write it first through cache
|
||||||
} else if f.cacheWrites {
|
} else if f.opt.StoreWrites {
|
||||||
f.cacheReader(in, src, func(inn io.Reader) {
|
f.cacheReader(in, src, func(inn io.Reader) {
|
||||||
obj, err = put(inn, src, options...)
|
obj, err = put(inn, src, options...)
|
||||||
})
|
})
|
||||||
@ -1243,7 +1255,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is stil active
|
||||||
if f.tempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
}
|
}
|
||||||
@ -1319,7 +1331,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
// if this is a temp object then we perform the changes locally
|
// if this is a temp object then we perform the changes locally
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is stil active
|
||||||
if f.tempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
}
|
}
|
||||||
@ -1460,8 +1472,8 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
|||||||
f.cleanupMu.Lock()
|
f.cleanupMu.Lock()
|
||||||
defer f.cleanupMu.Unlock()
|
defer f.cleanupMu.Unlock()
|
||||||
|
|
||||||
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(f.chunkCleanInterval)) {
|
if ignoreLastTs || time.Now().After(f.lastChunkCleanup.Add(time.Duration(f.opt.ChunkCleanInterval))) {
|
||||||
f.cache.CleanChunksBySize(f.chunkTotalSize)
|
f.cache.CleanChunksBySize(int64(f.opt.ChunkTotalSize))
|
||||||
f.lastChunkCleanup = time.Now()
|
f.lastChunkCleanup = time.Now()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1470,7 +1482,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
|
|||||||
// can be triggered from a terminate signal or from testing between runs
|
// can be triggered from a terminate signal or from testing between runs
|
||||||
func (f *Fs) StopBackgroundRunners() {
|
func (f *Fs) StopBackgroundRunners() {
|
||||||
f.cleanupChan <- false
|
f.cleanupChan <- false
|
||||||
if f.tempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
if f.opt.TempWritePath != "" && f.backgroundRunner != nil && f.backgroundRunner.isRunning() {
|
||||||
f.backgroundRunner.close()
|
f.backgroundRunner.close()
|
||||||
}
|
}
|
||||||
f.cache.Close()
|
f.cache.Close()
|
||||||
@ -1528,7 +1540,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
|
// GetBackgroundUploadChannel returns a channel that can be listened to for remote activities that happen
|
||||||
// in the background
|
// in the background
|
||||||
func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
|
func (f *Fs) GetBackgroundUploadChannel() chan BackgroundUploadState {
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
return f.backgroundRunner.notifyCh
|
return f.backgroundRunner.notifyCh
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
82
backend/cache/cache_internal_test.go
vendored
82
backend/cache/cache_internal_test.go
vendored
@ -33,13 +33,13 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/local"
|
"github.com/ncw/rclone/backend/local"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/object"
|
"github.com/ncw/rclone/fs/object"
|
||||||
"github.com/ncw/rclone/fs/rc"
|
"github.com/ncw/rclone/fs/rc"
|
||||||
"github.com/ncw/rclone/fs/rc/rcflags"
|
"github.com/ncw/rclone/fs/rc/rcflags"
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/ncw/rclone/fstest"
|
||||||
"github.com/ncw/rclone/vfs"
|
"github.com/ncw/rclone/vfs"
|
||||||
"github.com/ncw/rclone/vfs/vfsflags"
|
"github.com/ncw/rclone/vfs/vfsflags"
|
||||||
flag "github.com/spf13/pflag"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ func TestInternalVfsCache(t *testing.T) {
|
|||||||
|
|
||||||
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
vfsflags.Opt.CacheMode = vfs.CacheModeWrites
|
||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"cache-writes": "true", "cache-info-age": "1h"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir("test")
|
||||||
@ -699,7 +699,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
rc.Start(&rcflags.Opt)
|
rc.Start(&rcflags.Opt)
|
||||||
|
|
||||||
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
id := fmt.Sprintf("ticsarc%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"rc": "true"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
if !runInstance.useMount {
|
if !runInstance.useMount {
|
||||||
@ -774,7 +774,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalCacheWrites(t *testing.T) {
|
func TestInternalCacheWrites(t *testing.T) {
|
||||||
id := "ticw"
|
id := "ticw"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-writes": "true"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"writes": "true"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
@ -793,7 +793,7 @@ func TestInternalCacheWrites(t *testing.T) {
|
|||||||
|
|
||||||
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||||
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
id := fmt.Sprintf("timcsr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"cache-workers": "1"})
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, map[string]string{"workers": "1"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
cfs, err := runInstance.getCacheFs(rootFs)
|
cfs, err := runInstance.getCacheFs(rootFs)
|
||||||
@ -868,7 +868,7 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
|
|
||||||
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
id := fmt.Sprintf("tib2117%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil,
|
||||||
map[string]string{"cache-info-age": "72h", "cache-chunk-clean-interval": "15m"})
|
map[string]string{"info_age": "72h", "chunk_clean_interval": "15m"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
if runInstance.rootIsCrypt {
|
if runInstance.rootIsCrypt {
|
||||||
@ -918,10 +918,7 @@ func TestInternalBug2117(t *testing.T) {
|
|||||||
// run holds the remotes for a test run
|
// run holds the remotes for a test run
|
||||||
type run struct {
|
type run struct {
|
||||||
okDiff time.Duration
|
okDiff time.Duration
|
||||||
allCfgMap map[string]string
|
runDefaultCfgMap configmap.Simple
|
||||||
allFlagMap map[string]string
|
|
||||||
runDefaultCfgMap map[string]string
|
|
||||||
runDefaultFlagMap map[string]string
|
|
||||||
mntDir string
|
mntDir string
|
||||||
tmpUploadDir string
|
tmpUploadDir string
|
||||||
useMount bool
|
useMount bool
|
||||||
@ -945,38 +942,16 @@ func newRun() *run {
|
|||||||
isMounted: false,
|
isMounted: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
r.allCfgMap = map[string]string{
|
// Read in all the defaults for all the options
|
||||||
"plex_url": "",
|
fsInfo, err := fs.Find("cache")
|
||||||
"plex_username": "",
|
if err != nil {
|
||||||
"plex_password": "",
|
panic(fmt.Sprintf("Couldn't find cache remote: %v", err))
|
||||||
"chunk_size": cache.DefCacheChunkSize,
|
|
||||||
"info_age": cache.DefCacheInfoAge,
|
|
||||||
"chunk_total_size": cache.DefCacheTotalChunkSize,
|
|
||||||
}
|
}
|
||||||
r.allFlagMap = map[string]string{
|
r.runDefaultCfgMap = configmap.Simple{}
|
||||||
"cache-db-path": filepath.Join(config.CacheDir, "cache-backend"),
|
for _, option := range fsInfo.Options {
|
||||||
"cache-chunk-path": filepath.Join(config.CacheDir, "cache-backend"),
|
r.runDefaultCfgMap.Set(option.Name, fmt.Sprint(option.Default))
|
||||||
"cache-db-purge": "true",
|
|
||||||
"cache-chunk-size": cache.DefCacheChunkSize,
|
|
||||||
"cache-total-chunk-size": cache.DefCacheTotalChunkSize,
|
|
||||||
"cache-chunk-clean-interval": cache.DefCacheChunkCleanInterval,
|
|
||||||
"cache-info-age": cache.DefCacheInfoAge,
|
|
||||||
"cache-read-retries": strconv.Itoa(cache.DefCacheReadRetries),
|
|
||||||
"cache-workers": strconv.Itoa(cache.DefCacheTotalWorkers),
|
|
||||||
"cache-chunk-no-memory": "false",
|
|
||||||
"cache-rps": strconv.Itoa(cache.DefCacheRps),
|
|
||||||
"cache-writes": "false",
|
|
||||||
"cache-tmp-upload-path": "",
|
|
||||||
"cache-tmp-wait-time": cache.DefCacheTmpWaitTime,
|
|
||||||
}
|
|
||||||
r.runDefaultCfgMap = make(map[string]string)
|
|
||||||
for key, value := range r.allCfgMap {
|
|
||||||
r.runDefaultCfgMap[key] = value
|
|
||||||
}
|
|
||||||
r.runDefaultFlagMap = make(map[string]string)
|
|
||||||
for key, value := range r.allFlagMap {
|
|
||||||
r.runDefaultFlagMap[key] = value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if mountDir == "" {
|
if mountDir == "" {
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
r.mntDir, err = ioutil.TempDir("", "rclonecache-mount")
|
||||||
@ -1086,28 +1061,22 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
|||||||
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
boltDb, err := cache.GetPersistent(runInstance.dbPath, runInstance.chunkPath, &cache.Features{PurgeDb: true})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for k, v := range r.runDefaultCfgMap {
|
|
||||||
if c, ok := cfg[k]; ok {
|
|
||||||
config.FileSet(cacheRemote, k, c)
|
|
||||||
} else {
|
|
||||||
config.FileSet(cacheRemote, k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range r.runDefaultFlagMap {
|
|
||||||
if c, ok := flags[k]; ok {
|
|
||||||
_ = flag.Set(k, c)
|
|
||||||
} else {
|
|
||||||
_ = flag.Set(k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fs.Config.LowLevelRetries = 1
|
fs.Config.LowLevelRetries = 1
|
||||||
|
|
||||||
|
m := configmap.Simple{}
|
||||||
|
for k, v := range r.runDefaultCfgMap {
|
||||||
|
m.Set(k, v)
|
||||||
|
}
|
||||||
|
for k, v := range flags {
|
||||||
|
m.Set(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
// Instantiate root
|
// Instantiate root
|
||||||
if purge {
|
if purge {
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
_ = os.RemoveAll(path.Join(runInstance.tmpUploadDir, id))
|
||||||
}
|
}
|
||||||
f, err := fs.NewFs(remote + ":" + id)
|
f, err := cache.NewFs(remote, id, m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cfs, err := r.getCacheFs(f)
|
cfs, err := r.getCacheFs(f)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -1157,9 +1126,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
|||||||
}
|
}
|
||||||
r.tempFiles = nil
|
r.tempFiles = nil
|
||||||
debug.FreeOSMemory()
|
debug.FreeOSMemory()
|
||||||
for k, v := range r.runDefaultFlagMap {
|
|
||||||
_ = flag.Set(k, v)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||||
|
14
backend/cache/cache_upload_test.go
vendored
14
backend/cache/cache_upload_test.go
vendored
@ -22,7 +22,7 @@ func TestInternalUploadTempDirCreated(t *testing.T) {
|
|||||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
@ -63,7 +63,7 @@ func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
|||||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "0s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
@ -73,7 +73,7 @@ func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
|||||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1m"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
@ -83,7 +83,7 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
|||||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("one")
|
err := rootFs.Mkdir("one")
|
||||||
@ -163,7 +163,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
err := rootFs.Mkdir("test")
|
err := rootFs.Mkdir("test")
|
||||||
@ -213,7 +213,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
|||||||
id := "tiutfo"
|
id := "tiutfo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
@ -343,7 +343,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
|||||||
id := "tiuufo"
|
id := "tiuufo"
|
||||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
nil,
|
nil,
|
||||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1h"})
|
||||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
boltDb.PurgeTempUploads()
|
boltDb.PurgeTempUploads()
|
||||||
|
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
455
backend/cache/cache_upload_test.go.orig
vendored
Normal file
@ -0,0 +1,455 @@
|
|||||||
|
// +build !plan9
|
||||||
|
|
||||||
|
package cache_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/backend/cache"
|
||||||
|
_ "github.com/ncw/rclone/backend/drive"
|
||||||
|
"github.com/ncw/rclone/fs"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(524288000)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||||
|
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if runInstance.rootIsCrypt {
|
||||||
|
require.Equal(t, int64(524416032), ti.Size())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, testSize, ti.Size())
|
||||||
|
}
|
||||||
|
de1, err := runInstance.list(t, rootFs, "")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||||
|
// check if it was removed from temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, data2, 1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(10485760)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||||
|
|
||||||
|
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 5)
|
||||||
|
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||||
|
//require.NoError(t, err)
|
||||||
|
|
||||||
|
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = rootFs.Mkdir("second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
testSize := int64(1048576)
|
||||||
|
testReader := runInstance.randomReader(t, testSize)
|
||||||
|
testReader2 := runInstance.randomReader(t, testSize)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||||
|
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||||
|
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||||
|
require.False(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
de1, err = runInstance.list(t, rootFs, "second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||||
|
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
err := rootFs.Mkdir("test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
minSize := 5242880
|
||||||
|
maxSize := 10485760
|
||||||
|
totalFiles := 10
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
|
||||||
|
lastFile := ""
|
||||||
|
for i := 0; i < totalFiles; i++ {
|
||||||
|
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||||
|
testReader := runInstance.randomReader(t, size)
|
||||||
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
|
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||||
|
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||||
|
|
||||||
|
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||||
|
time.Sleep(time.Second * 3)
|
||||||
|
}
|
||||||
|
lastFile = remote
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if cache lists all files, likely temp upload didn't finish yet
|
||||||
|
de1, err := runInstance.list(t, rootFs, "test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, totalFiles)
|
||||||
|
|
||||||
|
// wait for background uploader to do its thing
|
||||||
|
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||||
|
|
||||||
|
// retry until we have no more temp files and fail if they don't go down to 0
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||||
|
require.True(t, os.IsNotExist(err))
|
||||||
|
|
||||||
|
// check if cache lists all files
|
||||||
|
de1, err = runInstance.list(t, rootFs, "test")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, de1, totalFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||||
|
id := "tiutfo"
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data1)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test DirMove - allowed
|
||||||
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("second/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
var started bool
|
||||||
|
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.False(t, started)
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Rmdir - allowed
|
||||||
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "directory not empty")
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||||
|
require.False(t, started)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test Move/Rename -- allowed
|
||||||
|
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
// try to read from it
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/second")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Copy -- allowed
|
||||||
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/third")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Remove -- allowed
|
||||||
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// test Update -- allowed
|
||||||
|
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||||
|
require.NoError(t, err)
|
||||||
|
obj2, err := rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||||
|
require.Equal(t, "one content updated", string(data2))
|
||||||
|
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
if runInstance.rootIsCrypt {
|
||||||
|
require.Equal(t, int64(67), tmpInfo.Size())
|
||||||
|
} else {
|
||||||
|
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
// test SetModTime -- allowed
|
||||||
|
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, secondModTime, firstModTime)
|
||||||
|
require.NotEqual(t, time.Time{}, firstModTime)
|
||||||
|
require.NotEqual(t, time.Time{}, secondModTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||||
|
id := "tiuufo"
|
||||||
|
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||||
|
nil,
|
||||||
|
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||||
|
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||||
|
|
||||||
|
boltDb.PurgeTempUploads()
|
||||||
|
|
||||||
|
// create some rand test data
|
||||||
|
runInstance.mkdir(t, rootFs, "test")
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// check if it can be read
|
||||||
|
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data1)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test DirMove
|
||||||
|
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Rmdir
|
||||||
|
err = runInstance.rm(t, rootFs, "test")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// test Move/Rename
|
||||||
|
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.Error(t, err)
|
||||||
|
// try to read from it
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/second")
|
||||||
|
require.Error(t, err)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Copy -- allowed
|
||||||
|
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||||
|
if err != errNotSupported {
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/third")
|
||||||
|
require.NoError(t, err)
|
||||||
|
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, []byte("one content"), data2)
|
||||||
|
// validate that it exists in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// test Remove
|
||||||
|
err = runInstance.rm(t, rootFs, "test/one")
|
||||||
|
require.Error(t, err)
|
||||||
|
_, err = rootFs.NewObject("test/one")
|
||||||
|
require.NoError(t, err)
|
||||||
|
// validate that it doesn't exist in temp fs
|
||||||
|
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
require.NoError(t, err)
|
||||||
|
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||||
|
|
||||||
|
// test Update - this seems to work. Why? FIXME
|
||||||
|
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
//require.NoError(t, err)
|
||||||
|
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||||
|
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||||
|
// require.Equal(t, "one content", string(data2))
|
||||||
|
//
|
||||||
|
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||||
|
// require.NoError(t, err)
|
||||||
|
// if runInstance.rootIsCrypt {
|
||||||
|
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||||
|
// } else {
|
||||||
|
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||||
|
// }
|
||||||
|
//})
|
||||||
|
//require.Error(t, err)
|
||||||
|
|
||||||
|
// test SetModTime -- seems to work cause of previous
|
||||||
|
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||||
|
//require.NoError(t, err)
|
||||||
|
//require.Equal(t, secondModTime, firstModTime)
|
||||||
|
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||||
|
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||||
|
}
|
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
12
backend/cache/cache_upload_test.go.rej
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
--- cache_upload_test.go
|
||||||
|
+++ cache_upload_test.go
|
||||||
|
@@ -1500,9 +1469,6 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||||
|
}
|
||||||
|
r.tempFiles = nil
|
||||||
|
debug.FreeOSMemory()
|
||||||
|
- for k, v := range r.runDefaultFlagMap {
|
||||||
|
- _ = flag.Set(k, v)
|
||||||
|
- }
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *run) randomBytes(t *testing.T, size int64) []byte {
|
28
backend/cache/handle.go
vendored
28
backend/cache/handle.go
vendored
@ -65,14 +65,14 @@ func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
|||||||
offset: 0,
|
offset: 0,
|
||||||
preloadOffset: -1, // -1 to trigger the first preload
|
preloadOffset: -1, // -1 to trigger the first preload
|
||||||
|
|
||||||
UseMemory: cfs.chunkMemory,
|
UseMemory: !cfs.opt.ChunkNoMemory,
|
||||||
reading: false,
|
reading: false,
|
||||||
}
|
}
|
||||||
r.seenOffsets = make(map[int64]bool)
|
r.seenOffsets = make(map[int64]bool)
|
||||||
r.memory = NewMemory(-1)
|
r.memory = NewMemory(-1)
|
||||||
|
|
||||||
// create a larger buffer to queue up requests
|
// create a larger buffer to queue up requests
|
||||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
r.preloadQueue = make(chan int64, r.cfs.opt.TotalWorkers*10)
|
||||||
r.confirmReading = make(chan bool)
|
r.confirmReading = make(chan bool)
|
||||||
r.startReadWorkers()
|
r.startReadWorkers()
|
||||||
return r
|
return r
|
||||||
@ -98,7 +98,7 @@ func (r *Handle) startReadWorkers() {
|
|||||||
if r.hasAtLeastOneWorker() {
|
if r.hasAtLeastOneWorker() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
totalWorkers := r.cacheFs().totalWorkers
|
totalWorkers := r.cacheFs().opt.TotalWorkers
|
||||||
|
|
||||||
if r.cacheFs().plexConnector.isConfigured() {
|
if r.cacheFs().plexConnector.isConfigured() {
|
||||||
if !r.cacheFs().plexConnector.isConnected() {
|
if !r.cacheFs().plexConnector.isConnected() {
|
||||||
@ -156,7 +156,7 @@ func (r *Handle) confirmExternalReading() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
fs.Infof(r, "confirmed reading by external reader")
|
fs.Infof(r, "confirmed reading by external reader")
|
||||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
r.scaleWorkers(r.cacheFs().opt.TotalWorkers)
|
||||||
}
|
}
|
||||||
|
|
||||||
// queueOffset will send an offset to the workers if it's different from the last one
|
// queueOffset will send an offset to the workers if it's different from the last one
|
||||||
@ -179,7 +179,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(r.workers); i++ {
|
for i := 0; i < len(r.workers); i++ {
|
||||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -211,7 +211,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
// we calculate the modulus of the requested offset with the size of a chunk
|
// we calculate the modulus of the requested offset with the size of a chunk
|
||||||
offset := chunkStart % r.cacheFs().chunkSize
|
offset := chunkStart % int64(r.cacheFs().opt.ChunkSize)
|
||||||
|
|
||||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||||
chunkStart = chunkStart - offset
|
chunkStart = chunkStart - offset
|
||||||
@ -228,7 +228,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
@ -255,7 +255,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if offset > 0 {
|
if offset > 0 {
|
||||||
if offset > int64(len(data)) {
|
if offset > int64(len(data)) {
|
||||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
r.offset, chunkStart, len(data), offset, r.cacheFs().opt.ChunkSize, r.cachedObject.Size())
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
data = data[int(offset):]
|
data = data[int(offset):]
|
||||||
@ -338,9 +338,9 @@ func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
|||||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
chunkStart := r.offset - (r.offset % int64(r.cacheFs().opt.ChunkSize))
|
||||||
if chunkStart >= r.cacheFs().chunkSize {
|
if chunkStart >= int64(r.cacheFs().opt.ChunkSize) {
|
||||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
chunkStart = chunkStart - int64(r.cacheFs().opt.ChunkSize)
|
||||||
}
|
}
|
||||||
r.queueOffset(chunkStart)
|
r.queueOffset(chunkStart)
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ func (w *worker) run() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
chunkEnd := chunkStart + int64(w.r.cacheFs().opt.ChunkSize)
|
||||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||||
//if chunkEnd > w.r.cachedObject.Size() {
|
//if chunkEnd > w.r.cachedObject.Size() {
|
||||||
// chunkEnd = w.r.cachedObject.Size()
|
// chunkEnd = w.r.cachedObject.Size()
|
||||||
@ -466,7 +466,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
|||||||
var data []byte
|
var data []byte
|
||||||
|
|
||||||
// stop retries
|
// stop retries
|
||||||
if retry >= w.r.cacheFs().readRetries {
|
if retry >= w.r.cacheFs().opt.ReadRetries {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// back-off between retries
|
// back-off between retries
|
||||||
@ -612,7 +612,7 @@ func (b *backgroundWriter) run() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), time.Duration(b.fs.opt.TempWaitTime))
|
||||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
continue
|
continue
|
||||||
|
10
backend/cache/object.go
vendored
10
backend/cache/object.go
vendored
@ -44,7 +44,7 @@ func NewObject(f *Fs, remote string) *Object {
|
|||||||
|
|
||||||
cacheType := objectInCache
|
cacheType := objectInCache
|
||||||
parentFs := f.UnWrap()
|
parentFs := f.UnWrap()
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||||
if err == nil { // queued for upload
|
if err == nil { // queued for upload
|
||||||
cacheType = objectPendingUpload
|
cacheType = objectPendingUpload
|
||||||
@ -75,7 +75,7 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
|||||||
|
|
||||||
cacheType := objectInCache
|
cacheType := objectInCache
|
||||||
parentFs := f.UnWrap()
|
parentFs := f.UnWrap()
|
||||||
if f.tempWritePath != "" {
|
if f.opt.TempWritePath != "" {
|
||||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||||
if err == nil { // queued for upload
|
if err == nil { // queued for upload
|
||||||
cacheType = objectPendingUpload
|
cacheType = objectPendingUpload
|
||||||
@ -153,7 +153,7 @@ func (o *Object) Storable() bool {
|
|||||||
// 2. is not pending a notification from the wrapped fs
|
// 2. is not pending a notification from the wrapped fs
|
||||||
func (o *Object) refresh() error {
|
func (o *Object) refresh() error {
|
||||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||||
if !isExpired && !isNotified {
|
if !isExpired && !isNotified {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -237,7 +237,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
if o.CacheFs.tempWritePath != "" {
|
if o.CacheFs.opt.TempWritePath != "" {
|
||||||
o.CacheFs.backgroundRunner.pause()
|
o.CacheFs.backgroundRunner.pause()
|
||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
@ -274,7 +274,7 @@ func (o *Object) Remove() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// pause background uploads if active
|
// pause background uploads if active
|
||||||
if o.CacheFs.tempWritePath != "" {
|
if o.CacheFs.opt.TempWritePath != "" {
|
||||||
o.CacheFs.backgroundRunner.pause()
|
o.CacheFs.backgroundRunner.pause()
|
||||||
defer o.CacheFs.backgroundRunner.play()
|
defer o.CacheFs.backgroundRunner.play()
|
||||||
// don't allow started uploads
|
// don't allow started uploads
|
||||||
|
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@ -16,7 +16,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/patrickmn/go-cache"
|
"github.com/patrickmn/go-cache"
|
||||||
"golang.org/x/net/websocket"
|
"golang.org/x/net/websocket"
|
||||||
)
|
)
|
||||||
@ -60,10 +59,11 @@ type plexConnector struct {
|
|||||||
running bool
|
running bool
|
||||||
runningMu sync.Mutex
|
runningMu sync.Mutex
|
||||||
stateCache *cache.Cache
|
stateCache *cache.Cache
|
||||||
|
saveToken func(string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newPlexConnector connects to a Plex server and generates a token
|
// newPlexConnector connects to a Plex server and generates a token
|
||||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
func newPlexConnector(f *Fs, plexURL, username, password string, saveToken func(string)) (*plexConnector, error) {
|
||||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -76,6 +76,7 @@ func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector
|
|||||||
password: password,
|
password: password,
|
||||||
token: "",
|
token: "",
|
||||||
stateCache: cache.New(time.Hour, time.Minute),
|
stateCache: cache.New(time.Hour, time.Minute),
|
||||||
|
saveToken: saveToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
return pc, nil
|
return pc, nil
|
||||||
@ -209,8 +210,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
}
|
}
|
||||||
p.token = token
|
p.token = token
|
||||||
if p.token != "" {
|
if p.token != "" {
|
||||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
p.saveToken(p.token)
|
||||||
config.SaveConfig()
|
|
||||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||||
}
|
}
|
||||||
p.listenWebsocket()
|
p.listenWebsocket()
|
||||||
|
9
backend/cache/storage_persistent.go
vendored
9
backend/cache/storage_persistent.go
vendored
@ -34,7 +34,8 @@ const (
|
|||||||
|
|
||||||
// Features flags for this storage type
|
// Features flags for this storage type
|
||||||
type Features struct {
|
type Features struct {
|
||||||
PurgeDb bool // purge the db before starting
|
PurgeDb bool // purge the db before starting
|
||||||
|
DbWaitTime time.Duration // time to wait for DB to be available
|
||||||
}
|
}
|
||||||
|
|
||||||
var boltMap = make(map[string]*Persistent)
|
var boltMap = make(map[string]*Persistent)
|
||||||
@ -122,7 +123,7 @@ func (b *Persistent) connect() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
return errors.Wrapf(err, "failed to create a data directory %q", b.dataPath)
|
||||||
}
|
}
|
||||||
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: *cacheDbWaitTime})
|
b.db, err = bolt.Open(b.dbPath, 0644, &bolt.Options{Timeout: b.features.DbWaitTime})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
return errors.Wrapf(err, "failed to open a cache connection to %q", b.dbPath)
|
||||||
}
|
}
|
||||||
@ -342,7 +343,7 @@ func (b *Persistent) RemoveDir(fp string) error {
|
|||||||
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
// ExpireDir will flush a CachedDirectory and all its objects from the objects
|
||||||
// chunks will remain as they are
|
// chunks will remain as they are
|
||||||
func (b *Persistent) ExpireDir(cd *Directory) error {
|
func (b *Persistent) ExpireDir(cd *Directory) error {
|
||||||
t := time.Now().Add(cd.CacheFs.fileAge * -1)
|
t := time.Now().Add(time.Duration(-cd.CacheFs.opt.InfoAge))
|
||||||
cd.CacheTs = &t
|
cd.CacheTs = &t
|
||||||
|
|
||||||
// expire all parents
|
// expire all parents
|
||||||
@ -429,7 +430,7 @@ func (b *Persistent) RemoveObject(fp string) error {
|
|||||||
|
|
||||||
// ExpireObject will flush an Object and all its data if desired
|
// ExpireObject will flush an Object and all its data if desired
|
||||||
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
func (b *Persistent) ExpireObject(co *Object, withData bool) error {
|
||||||
co.CacheTs = time.Now().Add(co.CacheFs.fileAge * -1)
|
co.CacheTs = time.Now().Add(time.Duration(-co.CacheFs.opt.InfoAge))
|
||||||
err := b.AddObject(co)
|
err := b.AddObject(co)
|
||||||
if withData {
|
if withData {
|
||||||
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
_ = os.RemoveAll(path.Join(b.dataPath, co.abs()))
|
||||||
|
@ -5,24 +5,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
|
||||||
// Flags
|
|
||||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@ -30,11 +24,13 @@ func init() {
|
|||||||
Description: "Encrypt/Decrypt a remote",
|
Description: "Encrypt/Decrypt a remote",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "remote",
|
Name: "remote",
|
||||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "filename_encryption",
|
Name: "filename_encryption",
|
||||||
Help: "How to encrypt the filenames.",
|
Help: "How to encrypt the filenames.",
|
||||||
|
Default: "standard",
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "off",
|
Value: "off",
|
||||||
@ -48,8 +44,9 @@ func init() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "directory_name_encryption",
|
Name: "directory_name_encryption",
|
||||||
Help: "Option to either encrypt directory names or leave them intact.",
|
Help: "Option to either encrypt directory names or leave them intact.",
|
||||||
|
Default: true,
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
@ -68,50 +65,67 @@ func init() {
|
|||||||
Name: "password2",
|
Name: "password2",
|
||||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Optional: true,
|
}, {
|
||||||
|
Name: "show_mapping",
|
||||||
|
Help: "For all files listed show how the names encrypt.",
|
||||||
|
Default: false,
|
||||||
|
Hide: fs.OptionHideConfigurator,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewCipher constructs a Cipher for the given config name
|
// newCipherForConfig constructs a Cipher for the given config name
|
||||||
func NewCipher(name string) (Cipher, error) {
|
func newCipherForConfig(opt *Options) (Cipher, error) {
|
||||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
mode, err := NewNameEncryptionMode(opt.FilenameEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
if opt.Password == "" {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
password := config.FileGet(name, "password", "")
|
|
||||||
if password == "" {
|
|
||||||
return nil, errors.New("password not set in config file")
|
return nil, errors.New("password not set in config file")
|
||||||
}
|
}
|
||||||
password, err = obscure.Reveal(password)
|
password, err := obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||||
}
|
}
|
||||||
salt := config.FileGet(name, "password2", "")
|
var salt string
|
||||||
if salt != "" {
|
if opt.Password2 != "" {
|
||||||
salt, err = obscure.Reveal(salt)
|
salt, err = obscure.Reveal(opt.Password2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to make cipher")
|
return nil, errors.Wrap(err, "failed to make cipher")
|
||||||
}
|
}
|
||||||
return cipher, nil
|
return cipher, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewCipher constructs a Cipher for the given config
|
||||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
func NewCipher(m configmap.Mapper) (Cipher, error) {
|
||||||
cipher, err := NewCipher(name)
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
remote := config.FileGet(name, "remote")
|
return newCipherForConfig(opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
|
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cipher, err := newCipherForConfig(opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
remote := opt.Remote
|
||||||
if strings.HasPrefix(remote, name+":") {
|
if strings.HasPrefix(remote, name+":") {
|
||||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||||
}
|
}
|
||||||
@ -130,6 +144,7 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
Fs: wrappedFs,
|
Fs: wrappedFs,
|
||||||
name: name,
|
name: name,
|
||||||
root: rpath,
|
root: rpath,
|
||||||
|
opt: *opt,
|
||||||
cipher: cipher,
|
cipher: cipher,
|
||||||
}
|
}
|
||||||
// the features here are ones we could support, and they are
|
// the features here are ones we could support, and they are
|
||||||
@ -161,11 +176,22 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
|||||||
return f, err
|
return f, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Remote string `config:"remote"`
|
||||||
|
FilenameEncryption string `config:"filename_encryption"`
|
||||||
|
DirectoryNameEncryption bool `config:"directory_name_encryption"`
|
||||||
|
Password string `config:"password"`
|
||||||
|
Password2 string `config:"password2"`
|
||||||
|
ShowMapping bool `config:"show_mapping"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a wrapped fs.Fs
|
// Fs represents a wrapped fs.Fs
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
fs.Fs
|
fs.Fs
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
|
opt Options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
cipher Cipher
|
cipher Cipher
|
||||||
}
|
}
|
||||||
@ -198,7 +224,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
|||||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if *cryptShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newObject(obj))
|
*entries = append(*entries, f.newObject(obj))
|
||||||
@ -212,7 +238,7 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
|||||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if *cryptShowMapping {
|
if f.opt.ShowMapping {
|
||||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||||
}
|
}
|
||||||
*entries = append(*entries, f.newDir(dir))
|
*entries = append(*entries, f.newDir(dir))
|
||||||
|
@ -23,7 +23,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
@ -49,27 +50,13 @@ const (
|
|||||||
defaultExtensions = "docx,xlsx,pptx,svg"
|
defaultExtensions = "docx,xlsx,pptx,svg"
|
||||||
scopePrefix = "https://www.googleapis.com/auth/"
|
scopePrefix = "https://www.googleapis.com/auth/"
|
||||||
defaultScope = "drive"
|
defaultScope = "drive"
|
||||||
|
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||||
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||||
|
defaultChunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Globals
|
||||||
var (
|
var (
|
||||||
// Flags
|
|
||||||
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
|
||||||
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
|
||||||
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
|
||||||
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
|
||||||
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
|
||||||
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
|
||||||
driveUseCreatedDate = flags.BoolP("drive-use-created-date", "", false, "Use created date instead of modified date.")
|
|
||||||
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
|
||||||
driveImpersonate = flags.StringP("drive-impersonate", "", "", "Impersonate this user when using a service account.")
|
|
||||||
driveAlternateExport = flags.BoolP("drive-alternate-export", "", false, "Use alternate export URLs for google documents export.")
|
|
||||||
driveAcknowledgeAbuse = flags.BoolP("drive-acknowledge-abuse", "", false, "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.")
|
|
||||||
driveKeepRevisionForever = flags.BoolP("drive-keep-revision-forever", "", false, "Keep new head revision forever.")
|
|
||||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
|
||||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
|
||||||
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
|
||||||
driveUploadCutoff = chunkSize
|
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
driveConfig = &oauth2.Config{
|
driveConfig = &oauth2.Config{
|
||||||
Scopes: []string{scopePrefix + "drive"},
|
Scopes: []string{scopePrefix + "drive"},
|
||||||
@ -112,38 +99,43 @@ func init() {
|
|||||||
Name: "drive",
|
Name: "drive",
|
||||||
Description: "Google Drive",
|
Description: "Google Drive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
var err error
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
fs.Errorf(nil, "Couldn't parse config into struct: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
// Fill in the scopes
|
// Fill in the scopes
|
||||||
scope := config.FileGet(name, "scope")
|
if opt.Scope == "" {
|
||||||
if scope == "" {
|
opt.Scope = defaultScope
|
||||||
scope = defaultScope
|
|
||||||
}
|
}
|
||||||
driveConfig.Scopes = nil
|
driveConfig.Scopes = nil
|
||||||
for _, scope := range strings.Split(scope, ",") {
|
for _, scope := range strings.Split(opt.Scope, ",") {
|
||||||
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
driveConfig.Scopes = append(driveConfig.Scopes, scopePrefix+strings.TrimSpace(scope))
|
||||||
// Set the root_folder_id if using drive.appfolder
|
// Set the root_folder_id if using drive.appfolder
|
||||||
if scope == "drive.appfolder" {
|
if scope == "drive.appfolder" {
|
||||||
config.FileSet(name, "root_folder_id", "appDataFolder")
|
m.Set("root_folder_id", "appDataFolder")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if config.FileGet(name, "service_account_file") == "" {
|
if opt.ServiceAccountFile == "" {
|
||||||
err = oauthutil.Config("drive", name, driveConfig)
|
err = oauthutil.Config("drive", name, m, driveConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = configTeamDrive(name)
|
err = configTeamDrive(opt, m, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure team drive: %v", err)
|
log.Fatalf("Failed to configure team drive: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Google Application Client Id - leave blank normally.",
|
Help: "Google Application Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Google Application Client Secret - leave blank normally.",
|
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: "scope",
|
Name: "scope",
|
||||||
Help: "Scope that rclone should use when requesting access from drive.",
|
Help: "Scope that rclone should use when requesting access from drive.",
|
||||||
@ -165,14 +157,92 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "root_folder_id",
|
Name: "root_folder_id",
|
||||||
Help: "ID of the root folder - leave blank normally. Fill in to access \"Computers\" folders. (see docs).",
|
Help: "ID of the root folder\nLeave blank normally.\nFill in to access \"Computers\" folders. (see docs).",
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path - leave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON file path \nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
|
}, {
|
||||||
|
Name: "service_account_credentials",
|
||||||
|
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "team_drive",
|
||||||
|
Help: "ID of the Team Drive",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "auth_owner_only",
|
||||||
|
Default: false,
|
||||||
|
Help: "Only consider files owned by the authenticated user.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_trash",
|
||||||
|
Default: true,
|
||||||
|
Help: "Send files to the trash instead of deleting permanently.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "skip_gdocs",
|
||||||
|
Default: false,
|
||||||
|
Help: "Skip google documents in all listings.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "shared_with_me",
|
||||||
|
Default: false,
|
||||||
|
Help: "Only show files that are shared with me",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "trashed_only",
|
||||||
|
Default: false,
|
||||||
|
Help: "Only show files that are in the trash",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "formats",
|
||||||
|
Default: defaultExtensions,
|
||||||
|
Help: "Comma separated list of preferred formats for downloading Google docs.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "use_created_date",
|
||||||
|
Default: false,
|
||||||
|
Help: "Use created date instead of modified date.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "list_chunk",
|
||||||
|
Default: 1000,
|
||||||
|
Help: "Size of listing chunk 100-1000. 0 to disable.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "impersonate",
|
||||||
|
Default: "",
|
||||||
|
Help: "Impersonate this user when using a service account.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "alternate_export",
|
||||||
|
Default: false,
|
||||||
|
Help: "Use alternate export URLs for google documents export.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "upload_cutoff",
|
||||||
|
Default: defaultChunkSize,
|
||||||
|
Help: "Cutoff for switching to chunked upload",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "chunk_size",
|
||||||
|
Default: defaultChunkSize,
|
||||||
|
Help: "Upload chunk size. Must a power of 2 >= 256k.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "acknowledge_abuse",
|
||||||
|
Default: false,
|
||||||
|
Help: "Set to allow files which return cannotDownloadAbusiveFile to be downloaded.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "keep_revision_forever",
|
||||||
|
Default: false,
|
||||||
|
Help: "Keep new head revision forever.",
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
|
||||||
flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
|
||||||
|
|
||||||
// Invert mimeTypeToExtension
|
// Invert mimeTypeToExtension
|
||||||
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
||||||
@ -181,10 +251,34 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Scope string `config:"scope"`
|
||||||
|
RootFolderID string `config:"root_folder_id"`
|
||||||
|
ServiceAccountFile string `config:"service_account_file"`
|
||||||
|
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||||
|
TeamDriveID string `config:"team_drive"`
|
||||||
|
AuthOwnerOnly bool `config:"auth_owner_only"`
|
||||||
|
UseTrash bool `config:"use_trash"`
|
||||||
|
SkipGdocs bool `config:"skip_gdocs"`
|
||||||
|
SharedWithMe bool `config:"shared_with_me"`
|
||||||
|
TrashedOnly bool `config:"trashed_only"`
|
||||||
|
Extensions string `config:"formats"`
|
||||||
|
UseCreatedDate bool `config:"use_created_date"`
|
||||||
|
ListChunk int64 `config:"list_chunk"`
|
||||||
|
Impersonate string `config:"impersonate"`
|
||||||
|
AlternateExport bool `config:"alternate_export"`
|
||||||
|
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
AcknowledgeAbuse bool `config:"acknowledge_abuse"`
|
||||||
|
KeepRevisionForever bool `config:"keep_revision_forever"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote drive server
|
// Fs represents a remote drive server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
svc *drive.Service // the connection to the drive server
|
svc *drive.Service // the connection to the drive server
|
||||||
client *http.Client // authorized client
|
client *http.Client // authorized client
|
||||||
@ -192,7 +286,6 @@ type Fs struct {
|
|||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
pacer *pacer.Pacer // To pace the API calls
|
pacer *pacer.Pacer // To pace the API calls
|
||||||
extensions []string // preferred extensions to download docs
|
extensions []string // preferred extensions to download docs
|
||||||
teamDriveID string // team drive ID, may be ""
|
|
||||||
isTeamDrive bool // true if this is a team drive
|
isTeamDrive bool // true if this is a team drive
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -274,8 +367,8 @@ type listFn func(*drive.File) bool
|
|||||||
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
||||||
var query []string
|
var query []string
|
||||||
if !includeAll {
|
if !includeAll {
|
||||||
q := "trashed=" + strconv.FormatBool(*driveTrashedOnly)
|
q := "trashed=" + strconv.FormatBool(f.opt.TrashedOnly)
|
||||||
if *driveTrashedOnly {
|
if f.opt.TrashedOnly {
|
||||||
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
||||||
}
|
}
|
||||||
query = append(query, q)
|
query = append(query, q)
|
||||||
@ -283,10 +376,10 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
|||||||
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
||||||
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
||||||
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
||||||
if *driveSharedWithMe && dirID == f.rootFolderID {
|
if f.opt.SharedWithMe && dirID == f.rootFolderID {
|
||||||
query = append(query, "sharedWithMe=true")
|
query = append(query, "sharedWithMe=true")
|
||||||
}
|
}
|
||||||
if dirID != "" && !(*driveSharedWithMe && dirID == f.rootFolderID) {
|
if dirID != "" && !(f.opt.SharedWithMe && dirID == f.rootFolderID) {
|
||||||
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
||||||
}
|
}
|
||||||
if title != "" {
|
if title != "" {
|
||||||
@ -308,11 +401,11 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
|||||||
list.Q(strings.Join(query, " and "))
|
list.Q(strings.Join(query, " and "))
|
||||||
// fmt.Printf("list Query = %q\n", query)
|
// fmt.Printf("list Query = %q\n", query)
|
||||||
}
|
}
|
||||||
if *driveListChunk > 0 {
|
if f.opt.ListChunk > 0 {
|
||||||
list.PageSize(*driveListChunk)
|
list.PageSize(f.opt.ListChunk)
|
||||||
}
|
}
|
||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
list.TeamDriveId(f.teamDriveID)
|
list.TeamDriveId(f.opt.TeamDriveID)
|
||||||
list.SupportsTeamDrives(true)
|
list.SupportsTeamDrives(true)
|
||||||
list.IncludeTeamDriveItems(true)
|
list.IncludeTeamDriveItems(true)
|
||||||
list.Corpora("teamDrive")
|
list.Corpora("teamDrive")
|
||||||
@ -324,7 +417,7 @@ func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bo
|
|||||||
|
|
||||||
var fields = partialFields
|
var fields = partialFields
|
||||||
|
|
||||||
if *driveAuthOwnerOnly {
|
if f.opt.AuthOwnerOnly {
|
||||||
fields += ",owners"
|
fields += ",owners"
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,17 +488,16 @@ func (f *Fs) parseExtensions(extensions string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Figure out if the user wants to use a team drive
|
// Figure out if the user wants to use a team drive
|
||||||
func configTeamDrive(name string) error {
|
func configTeamDrive(opt *Options, m configmap.Mapper, name string) error {
|
||||||
teamDrive := config.FileGet(name, "team_drive")
|
if opt.TeamDriveID == "" {
|
||||||
if teamDrive == "" {
|
|
||||||
fmt.Printf("Configure this as a team drive?\n")
|
fmt.Printf("Configure this as a team drive?\n")
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("Change current team drive ID %q?\n", teamDrive)
|
fmt.Printf("Change current team drive ID %q?\n", opt.TeamDriveID)
|
||||||
}
|
}
|
||||||
if !config.ConfirmWithDefault(false) {
|
if !config.ConfirmWithDefault(false) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
client, err := createOAuthClient(name)
|
client, err := createOAuthClient(opt, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "config team drive failed to create oauth client")
|
return errors.Wrap(err, "config team drive failed to create oauth client")
|
||||||
}
|
}
|
||||||
@ -440,7 +532,8 @@ func configTeamDrive(name string) error {
|
|||||||
} else {
|
} else {
|
||||||
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
||||||
}
|
}
|
||||||
config.FileSet(name, "team_drive", driveID)
|
m.Set("team_drive", driveID)
|
||||||
|
opt.TeamDriveID = driveID
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,39 +542,37 @@ func newPacer() *pacer.Pacer {
|
|||||||
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
func getServiceAccountClient(opt *Options, credentialsData []byte) (*http.Client, error) {
|
||||||
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
conf, err := google.JWTConfigFromJSON(credentialsData, driveConfig.Scopes...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error processing credentials")
|
return nil, errors.Wrap(err, "error processing credentials")
|
||||||
}
|
}
|
||||||
if *driveImpersonate != "" {
|
if opt.Impersonate != "" {
|
||||||
conf.Subject = *driveImpersonate
|
conf.Subject = opt.Impersonate
|
||||||
}
|
}
|
||||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createOAuthClient(name string) (*http.Client, error) {
|
func createOAuthClient(opt *Options, name string, m configmap.Mapper) (*http.Client, error) {
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// try loading service account credentials from env variable, then from a file
|
// try loading service account credentials from env variable, then from a file
|
||||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
if len(opt.ServiceAccountCredentials) == 0 && opt.ServiceAccountFile != "" {
|
||||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
|
||||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||||
}
|
}
|
||||||
serviceAccountCreds = loadedCreds
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if len(serviceAccountCreds) > 0 {
|
if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
oAuthClient, err = getServiceAccountClient(opt, []byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
return nil, errors.Wrap(err, "failed to create oauth client from service account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, driveConfig)
|
oAuthClient, _, err = oauthutil.NewClient(name, m, driveConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to create oauth client")
|
return nil, errors.Wrap(err, "failed to create oauth client")
|
||||||
}
|
}
|
||||||
@ -491,15 +582,21 @@ func createOAuthClient(name string) (*http.Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, path string) (fs.Fs, error) {
|
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if !isPowerOfTwo(int64(chunkSize)) {
|
// Parse config into Options struct
|
||||||
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if chunkSize < 256*1024 {
|
if !isPowerOfTwo(int64(opt.ChunkSize)) {
|
||||||
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", opt.ChunkSize)
|
||||||
|
}
|
||||||
|
if opt.ChunkSize < 256*1024 {
|
||||||
|
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", opt.ChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, err := createOAuthClient(name)
|
oAuthClient, err := createOAuthClient(opt, name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
return nil, errors.Wrap(err, "drive: failed when making oauth client")
|
||||||
}
|
}
|
||||||
@ -512,10 +609,10 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
pacer: newPacer(),
|
pacer: newPacer(),
|
||||||
}
|
}
|
||||||
f.teamDriveID = config.FileGet(name, "team_drive")
|
f.isTeamDrive = opt.TeamDriveID != ""
|
||||||
f.isTeamDrive = f.teamDriveID != ""
|
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
DuplicateFiles: true,
|
DuplicateFiles: true,
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
@ -532,20 +629,20 @@ func NewFs(name, path string) (fs.Fs, error) {
|
|||||||
|
|
||||||
// set root folder for a team drive or query the user root folder
|
// set root folder for a team drive or query the user root folder
|
||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
f.rootFolderID = f.teamDriveID
|
f.rootFolderID = f.opt.TeamDriveID
|
||||||
} else {
|
} else {
|
||||||
f.rootFolderID = "root"
|
f.rootFolderID = "root"
|
||||||
}
|
}
|
||||||
|
|
||||||
// override root folder if set in the config
|
// override root folder if set in the config
|
||||||
if rootID := config.FileGet(name, "root_folder_id"); rootID != "" {
|
if opt.RootFolderID != "" {
|
||||||
f.rootFolderID = rootID
|
f.rootFolderID = opt.RootFolderID
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
f.dirCache = dircache.New(root, f.rootFolderID, f)
|
||||||
|
|
||||||
// Parse extensions
|
// Parse extensions
|
||||||
err = f.parseExtensions(*driveExtensions)
|
err = f.parseExtensions(opt.Extensions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -729,7 +826,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
when, _ := time.Parse(timeFormatIn, item.ModifiedTime)
|
||||||
d := fs.NewDir(remote, when).SetID(item.Id)
|
d := fs.NewDir(remote, when).SetID(item.Id)
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
case *driveAuthOwnerOnly && !isAuthOwned(item):
|
case f.opt.AuthOwnerOnly && !isAuthOwned(item):
|
||||||
// ignore object
|
// ignore object
|
||||||
case item.Md5Checksum != "" || item.Size > 0:
|
case item.Md5Checksum != "" || item.Size > 0:
|
||||||
// If item has MD5 sum or a length it is a file stored on drive
|
// If item has MD5 sum or a length it is a file stored on drive
|
||||||
@ -739,7 +836,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
entries = append(entries, o)
|
entries = append(entries, o)
|
||||||
case *driveSkipGdocs:
|
case f.opt.SkipGdocs:
|
||||||
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
fs.Debugf(remote, "Skipping google document type %q", item.MimeType)
|
||||||
default:
|
default:
|
||||||
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
exportMimeTypes, isDocument := f.exportFormats()[item.MimeType]
|
||||||
@ -760,7 +857,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
}
|
}
|
||||||
obj := o.(*Object)
|
obj := o.(*Object)
|
||||||
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
obj.url = fmt.Sprintf("%sfiles/%s/export?mimeType=%s", f.svc.BasePath, item.Id, url.QueryEscape(exportMimeType))
|
||||||
if *driveAlternateExport {
|
if f.opt.AlternateExport {
|
||||||
switch item.MimeType {
|
switch item.MimeType {
|
||||||
case "application/vnd.google-apps.drawing":
|
case "application/vnd.google-apps.drawing":
|
||||||
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
obj.url = fmt.Sprintf("https://docs.google.com/drawings/d/%s/export/%s", item.Id, extension)
|
||||||
@ -854,11 +951,11 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
|||||||
}
|
}
|
||||||
|
|
||||||
var info *drive.File
|
var info *drive.File
|
||||||
if size == 0 || size < int64(driveUploadCutoff) {
|
if size == 0 || size < int64(f.opt.UploadCutoff) {
|
||||||
// Make the API request to upload metadata and file data.
|
// Make the API request to upload metadata and file data.
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||||
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
info, err = f.svc.Files.Create(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -972,7 +1069,7 @@ func (f *Fs) Rmdir(dir string) error {
|
|||||||
// trash the directory if it had trashed files
|
// trash the directory if it had trashed files
|
||||||
// in or the user wants to trash, otherwise
|
// in or the user wants to trash, otherwise
|
||||||
// delete it.
|
// delete it.
|
||||||
err = f.rmdir(directoryID, trashedFiles || *driveUseTrash)
|
err = f.rmdir(directoryID, trashedFiles || f.opt.UseTrash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1015,7 +1112,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
var info *drive.File
|
var info *drive.File
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).KeepRevisionForever(f.opt.KeepRevisionForever).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1040,7 +1137,7 @@ func (f *Fs) Purge() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
if *driveUseTrash {
|
if f.opt.UseTrash {
|
||||||
info := drive.File{
|
info := drive.File{
|
||||||
Trashed: true,
|
Trashed: true,
|
||||||
}
|
}
|
||||||
@ -1316,11 +1413,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), pollInter
|
|||||||
|
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
|
changesCall := f.svc.Changes.List(pageToken).Fields("nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))")
|
||||||
if *driveListChunk > 0 {
|
if f.opt.ListChunk > 0 {
|
||||||
changesCall.PageSize(*driveListChunk)
|
changesCall.PageSize(f.opt.ListChunk)
|
||||||
}
|
}
|
||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
changesCall.TeamDriveId(f.teamDriveID)
|
changesCall.TeamDriveId(f.opt.TeamDriveID)
|
||||||
changesCall.SupportsTeamDrives(true)
|
changesCall.SupportsTeamDrives(true)
|
||||||
changesCall.IncludeTeamDriveItems(true)
|
changesCall.IncludeTeamDriveItems(true)
|
||||||
}
|
}
|
||||||
@ -1444,7 +1541,7 @@ func (o *Object) setMetaData(info *drive.File) {
|
|||||||
o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id)
|
o.url = fmt.Sprintf("%sfiles/%s?alt=media", o.fs.svc.BasePath, info.Id)
|
||||||
o.md5sum = strings.ToLower(info.Md5Checksum)
|
o.md5sum = strings.ToLower(info.Md5Checksum)
|
||||||
o.bytes = info.Size
|
o.bytes = info.Size
|
||||||
if *driveUseCreatedDate {
|
if o.fs.opt.UseCreatedDate {
|
||||||
o.modifiedDate = info.CreatedTime
|
o.modifiedDate = info.CreatedTime
|
||||||
} else {
|
} else {
|
||||||
o.modifiedDate = info.ModifiedTime
|
o.modifiedDate = info.ModifiedTime
|
||||||
@ -1617,7 +1714,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
_, res, err := o.httpResponse("GET", options)
|
_, res, err := o.httpResponse("GET", options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
if isGoogleError(err, "cannotDownloadAbusiveFile") {
|
||||||
if *driveAcknowledgeAbuse {
|
if o.fs.opt.AcknowledgeAbuse {
|
||||||
// Retry acknowledging abuse
|
// Retry acknowledging abuse
|
||||||
if strings.ContainsRune(o.url, '?') {
|
if strings.ContainsRune(o.url, '?') {
|
||||||
o.url += "&"
|
o.url += "&"
|
||||||
@ -1663,10 +1760,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// Make the API request to upload metadata and file data.
|
// Make the API request to upload metadata and file data.
|
||||||
var err error
|
var err error
|
||||||
var info *drive.File
|
var info *drive.File
|
||||||
if size == 0 || size < int64(driveUploadCutoff) {
|
if size == 0 || size < int64(o.fs.opt.UploadCutoff) {
|
||||||
// Don't retry, return a retry error instead
|
// Don't retry, return a retry error instead
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(*driveKeepRevisionForever).Do()
|
info, err = o.fs.svc.Files.Update(o.id, updateInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).KeepRevisionForever(o.fs.opt.KeepRevisionForever).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1690,7 +1787,7 @@ func (o *Object) Remove() error {
|
|||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
if *driveUseTrash {
|
if o.fs.opt.UseTrash {
|
||||||
info := drive.File{
|
info := drive.File{
|
||||||
Trashed: true,
|
Trashed: true,
|
||||||
}
|
}
|
||||||
|
@ -58,7 +58,7 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string,
|
|||||||
if f.isTeamDrive {
|
if f.isTeamDrive {
|
||||||
params.Set("supportsTeamDrives", "true")
|
params.Set("supportsTeamDrives", "true")
|
||||||
}
|
}
|
||||||
if *driveKeepRevisionForever {
|
if f.opt.KeepRevisionForever {
|
||||||
params.Set("keepRevisionForever", "true")
|
params.Set("keepRevisionForever", "true")
|
||||||
}
|
}
|
||||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||||
@ -197,11 +197,11 @@ func (rx *resumableUpload) Upload() (*drive.File, error) {
|
|||||||
start := int64(0)
|
start := int64(0)
|
||||||
var StatusCode int
|
var StatusCode int
|
||||||
var err error
|
var err error
|
||||||
buf := make([]byte, int(chunkSize))
|
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||||
for start < rx.ContentLength {
|
for start < rx.ContentLength {
|
||||||
reqSize := rx.ContentLength - start
|
reqSize := rx.ContentLength - start
|
||||||
if reqSize >= int64(chunkSize) {
|
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||||
reqSize = int64(chunkSize)
|
reqSize = int64(rx.f.opt.ChunkSize)
|
||||||
}
|
}
|
||||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||||
|
|
||||||
|
@ -37,7 +37,8 @@ import (
|
|||||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -55,24 +56,6 @@ const (
|
|||||||
minSleep = 10 * time.Millisecond
|
minSleep = 10 * time.Millisecond
|
||||||
maxSleep = 2 * time.Second
|
maxSleep = 2 * time.Second
|
||||||
decayConstant = 2 // bigger for slower decay, exponential
|
decayConstant = 2 // bigger for slower decay, exponential
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Description of how to auth for this app
|
|
||||||
dropboxConfig = &oauth2.Config{
|
|
||||||
Scopes: []string{},
|
|
||||||
// Endpoint: oauth2.Endpoint{
|
|
||||||
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
|
||||||
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
|
||||||
// },
|
|
||||||
Endpoint: dropbox.OAuthEndpoint(""),
|
|
||||||
ClientID: rcloneClientID,
|
|
||||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
|
||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
|
||||||
}
|
|
||||||
// A regexp matching path names for files Dropbox ignores
|
|
||||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
|
||||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
|
||||||
// Upload chunk size - setting too small makes uploads slow.
|
// Upload chunk size - setting too small makes uploads slow.
|
||||||
// Chunks are buffered into memory for retries.
|
// Chunks are buffered into memory for retries.
|
||||||
//
|
//
|
||||||
@ -96,8 +79,26 @@ var (
|
|||||||
// Choose 48MB which is 91% of Maximum speed. rclone by
|
// Choose 48MB which is 91% of Maximum speed. rclone by
|
||||||
// default does 4 transfers so this should use 4*48MB = 192MB
|
// default does 4 transfers so this should use 4*48MB = 192MB
|
||||||
// by default.
|
// by default.
|
||||||
uploadChunkSize = fs.SizeSuffix(48 * 1024 * 1024)
|
defaultChunkSize = 48 * 1024 * 1024
|
||||||
maxUploadChunkSize = fs.SizeSuffix(150 * 1024 * 1024)
|
maxChunkSize = 150 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Description of how to auth for this app
|
||||||
|
dropboxConfig = &oauth2.Config{
|
||||||
|
Scopes: []string{},
|
||||||
|
// Endpoint: oauth2.Endpoint{
|
||||||
|
// AuthURL: "https://www.dropbox.com/1/oauth2/authorize",
|
||||||
|
// TokenURL: "https://api.dropboxapi.com/1/oauth2/token",
|
||||||
|
// },
|
||||||
|
Endpoint: dropbox.OAuthEndpoint(""),
|
||||||
|
ClientID: rcloneClientID,
|
||||||
|
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||||
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
|
}
|
||||||
|
// A regexp matching path names for files Dropbox ignores
|
||||||
|
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||||
|
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -106,27 +107,37 @@ func init() {
|
|||||||
Name: "dropbox",
|
Name: "dropbox",
|
||||||
Description: "Dropbox",
|
Description: "Dropbox",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.ConfigNoOffline("dropbox", name, dropboxConfig)
|
err := oauthutil.ConfigNoOffline("dropbox", name, m, dropboxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Dropbox App Client Id - leave blank normally.",
|
Help: "Dropbox App Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
Help: "Dropbox App Client Secret\nLeave blank normally.",
|
||||||
|
}, {
|
||||||
|
Name: "chunk_size",
|
||||||
|
Help: fmt.Sprintf("Upload chunk size. Max %v.", fs.SizeSuffix(maxChunkSize)),
|
||||||
|
Default: fs.SizeSuffix(defaultChunkSize),
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote dropbox server
|
// Fs represents a remote dropbox server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv files.Client // the connection to the dropbox server
|
srv files.Client // the connection to the dropbox server
|
||||||
sharing sharing.Client // as above, but for generating sharing links
|
sharing sharing.Client // as above, but for generating sharing links
|
||||||
@ -185,15 +196,22 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
if uploadChunkSize > maxUploadChunkSize {
|
// Parse config into Options struct
|
||||||
return nil, errors.Errorf("chunk size too big, must be < %v", maxUploadChunkSize)
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.ChunkSize > maxChunkSize {
|
||||||
|
return nil, errors.Errorf("chunk size too big, must be < %v", maxChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the old token if it exists. The old token was just
|
// Convert the old token if it exists. The old token was just
|
||||||
// just a string, the new one is a JSON blob
|
// just a string, the new one is a JSON blob
|
||||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
oldToken, ok := m.Get(config.ConfigToken)
|
||||||
if oldToken != "" && oldToken[0] != '{' {
|
oldToken = strings.TrimSpace(oldToken)
|
||||||
|
if ok && oldToken != "" && oldToken[0] != '{' {
|
||||||
fs.Infof(name, "Converting token to new format")
|
fs.Infof(name, "Converting token to new format")
|
||||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||||
@ -202,13 +220,14 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
oAuthClient, _, err := oauthutil.NewClient(name, m, dropboxConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
}
|
}
|
||||||
config := dropbox.Config{
|
config := dropbox.Config{
|
||||||
@ -911,7 +930,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
// unknown (i.e. -1) or smaller than uploadChunkSize, the method incurs an
|
||||||
// avoidable request to the Dropbox API that does not carry payload.
|
// avoidable request to the Dropbox API that does not carry payload.
|
||||||
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size int64) (entry *files.FileMetadata, err error) {
|
||||||
chunkSize := int64(uploadChunkSize)
|
chunkSize := int64(o.fs.opt.ChunkSize)
|
||||||
chunks := 0
|
chunks := 0
|
||||||
if size != -1 {
|
if size != -1 {
|
||||||
chunks = int(size/chunkSize) + 1
|
chunks = int(size/chunkSize) + 1
|
||||||
@ -1026,7 +1045,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
size := src.Size()
|
size := src.Size()
|
||||||
var err error
|
var err error
|
||||||
var entry *files.FileMetadata
|
var entry *files.FileMetadata
|
||||||
if size > int64(uploadChunkSize) || size == -1 {
|
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||||
entry, err = o.uploadChunked(in, commitInfo, size)
|
entry, err = o.uploadChunked(in, commitInfo, size)
|
||||||
} else {
|
} else {
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
|
@ -4,16 +4,15 @@ package ftp
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
"net/url"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jlaffaye/ftp"
|
"github.com/jlaffaye/ftp"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/lib/readers"
|
"github.com/ncw/rclone/lib/readers"
|
||||||
@ -30,33 +29,40 @@ func init() {
|
|||||||
{
|
{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Help: "FTP host to connect to",
|
Help: "FTP host to connect to",
|
||||||
Optional: false,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "ftp.example.com",
|
Value: "ftp.example.com",
|
||||||
Help: "Connect to ftp.example.com",
|
Help: "Connect to ftp.example.com",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "FTP port, leave blank to use default (21) ",
|
Help: "FTP port, leave blank to use default (21)",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "FTP password",
|
Help: "FTP password",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
Optional: false,
|
Required: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Host string `config:"host"`
|
||||||
|
User string `config:"user"`
|
||||||
|
Pass string `config:"pass"`
|
||||||
|
Port string `config:"port"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote FTP server
|
// Fs represents a remote FTP server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
url string
|
url string
|
||||||
user string
|
user string
|
||||||
@ -161,51 +167,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
// Parse config into Options struct
|
||||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
opt := new(Options)
|
||||||
fs.Infof(name, "Converting old configuration")
|
err = configstruct.Set(m, opt)
|
||||||
u, err := url.Parse(ftpURL)
|
if err != nil {
|
||||||
if err != nil {
|
return nil, err
|
||||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
|
||||||
}
|
|
||||||
parts := strings.Split(u.Host, ":")
|
|
||||||
config.FileSet(name, "host", parts[0])
|
|
||||||
if len(parts) > 1 {
|
|
||||||
config.FileSet(name, "port", parts[1])
|
|
||||||
}
|
|
||||||
config.FileSet(name, "host", u.Host)
|
|
||||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
|
||||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
|
||||||
config.FileDeleteKey(name, "username")
|
|
||||||
config.FileDeleteKey(name, "password")
|
|
||||||
config.FileDeleteKey(name, "url")
|
|
||||||
config.SaveConfig()
|
|
||||||
if u.Path != "" && u.Path != "/" {
|
|
||||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
host := config.FileGet(name, "host")
|
pass, err := obscure.Reveal(opt.Pass)
|
||||||
user := config.FileGet(name, "user")
|
|
||||||
pass := config.FileGet(name, "pass")
|
|
||||||
port := config.FileGet(name, "port")
|
|
||||||
pass, err = obscure.Reveal(pass)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||||
}
|
}
|
||||||
|
user := opt.User
|
||||||
if user == "" {
|
if user == "" {
|
||||||
user = os.Getenv("USER")
|
user = os.Getenv("USER")
|
||||||
}
|
}
|
||||||
|
port := opt.Port
|
||||||
if port == "" {
|
if port == "" {
|
||||||
port = "21"
|
port = "21"
|
||||||
}
|
}
|
||||||
|
|
||||||
dialAddr := host + ":" + port
|
dialAddr := opt.Host + ":" + port
|
||||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
url: u,
|
url: u,
|
||||||
user: user,
|
user: user,
|
||||||
pass: pass,
|
pass: pass,
|
||||||
|
@ -29,7 +29,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
@ -55,8 +56,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
|
||||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
|
||||||
// Description of how to auth for this app
|
// Description of how to auth for this app
|
||||||
storageConfig = &oauth2.Config{
|
storageConfig = &oauth2.Config{
|
||||||
Scopes: []string{storage.DevstorageFullControlScope},
|
Scopes: []string{storage.DevstorageFullControlScope},
|
||||||
@ -71,29 +70,36 @@ var (
|
|||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
Name: "google cloud storage",
|
Name: "google cloud storage",
|
||||||
|
Prefix: "gcs",
|
||||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
if config.FileGet(name, "service_account_file") != "" {
|
saFile, _ := m.Get("service_account_file")
|
||||||
|
saCreds, _ := m.Get("service_account_credentials")
|
||||||
|
if saFile != "" || saCreds != "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
err := oauthutil.Config("google cloud storage", name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Google Application Client Id - leave blank normally.",
|
Help: "Google Application Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Google Application Client Secret - leave blank normally.",
|
Help: "Google Application Client Secret\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: "project_number",
|
Name: "project_number",
|
||||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.",
|
||||||
}, {
|
}, {
|
||||||
Name: "service_account_file",
|
Name: "service_account_file",
|
||||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
|
}, {
|
||||||
|
Name: "service_account_credentials",
|
||||||
|
Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
}, {
|
}, {
|
||||||
Name: "object_acl",
|
Name: "object_acl",
|
||||||
Help: "Access Control List for new objects.",
|
Help: "Access Control List for new objects.",
|
||||||
@ -207,22 +213,29 @@ func init() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
ProjectNumber string `config:"project_number"`
|
||||||
|
ServiceAccountFile string `config:"service_account_file"`
|
||||||
|
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||||
|
ObjectACL string `config:"object_acl"`
|
||||||
|
BucketACL string `config:"bucket_acl"`
|
||||||
|
Location string `config:"location"`
|
||||||
|
StorageClass string `config:"storage_class"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote storage server
|
// Fs represents a remote storage server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
features *fs.Features // optional features
|
opt Options // parsed options
|
||||||
svc *storage.Service // the connection to the storage server
|
features *fs.Features // optional features
|
||||||
client *http.Client // authorized client
|
svc *storage.Service // the connection to the storage server
|
||||||
bucket string // the bucket we are working on
|
client *http.Client // authorized client
|
||||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
bucket string // the bucket we are working on
|
||||||
bucketOK bool // true if we have created the bucket
|
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||||
projectNumber string // used for finding buckets
|
bucketOK bool // true if we have created the bucket
|
||||||
objectACL string // used when creating new objects
|
pacer *pacer.Pacer // To pace the API calls
|
||||||
bucketACL string // used when creating new buckets
|
|
||||||
location string // location of new buckets
|
|
||||||
storageClass string // storage class of new buckets
|
|
||||||
pacer *pacer.Pacer // To pace the API calls
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a storage object
|
// Object describes a storage object
|
||||||
@ -315,27 +328,37 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs contstructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
var err error
|
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.ObjectACL == "" {
|
||||||
|
opt.ObjectACL = "private"
|
||||||
|
}
|
||||||
|
if opt.BucketACL == "" {
|
||||||
|
opt.BucketACL = "private"
|
||||||
|
}
|
||||||
|
|
||||||
// try loading service account credentials from env variable, then from a file
|
// try loading service account credentials from env variable, then from a file
|
||||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
if opt.ServiceAccountCredentials != "" && opt.ServiceAccountFile != "" {
|
||||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(opt.ServiceAccountFile))
|
||||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
|
||||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||||
}
|
}
|
||||||
serviceAccountCreds = loadedCreds
|
opt.ServiceAccountCredentials = string(loadedCreds)
|
||||||
}
|
}
|
||||||
if len(serviceAccountCreds) > 0 {
|
if opt.ServiceAccountCredentials != "" {
|
||||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
oAuthClient, err = getServiceAccountClient([]byte(opt.ServiceAccountCredentials))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
oAuthClient, _, err = oauthutil.NewClient(name, m, storageConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||||
}
|
}
|
||||||
@ -347,33 +370,17 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
root: directory,
|
root: directory,
|
||||||
projectNumber: config.FileGet(name, "project_number"),
|
opt: *opt,
|
||||||
objectACL: config.FileGet(name, "object_acl"),
|
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
|
||||||
location: config.FileGet(name, "location"),
|
|
||||||
storageClass: config.FileGet(name, "storage_class"),
|
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
if f.objectACL == "" {
|
|
||||||
f.objectACL = "private"
|
|
||||||
}
|
|
||||||
if f.bucketACL == "" {
|
|
||||||
f.bucketACL = "private"
|
|
||||||
}
|
|
||||||
if *gcsLocation != "" {
|
|
||||||
f.location = *gcsLocation
|
|
||||||
}
|
|
||||||
if *gcsStorageClass != "" {
|
|
||||||
f.storageClass = *gcsStorageClass
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a new authorized Drive client.
|
// Create a new authorized Drive client.
|
||||||
f.client = oAuthClient
|
f.client = oAuthClient
|
||||||
@ -550,10 +557,10 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
|||||||
if dir != "" {
|
if dir != "" {
|
||||||
return nil, fs.ErrorListBucketRequired
|
return nil, fs.ErrorListBucketRequired
|
||||||
}
|
}
|
||||||
if f.projectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return nil, errors.New("can't list buckets without project number")
|
return nil, errors.New("can't list buckets without project number")
|
||||||
}
|
}
|
||||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks)
|
||||||
for {
|
for {
|
||||||
var buckets *storage.Buckets
|
var buckets *storage.Buckets
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -672,17 +679,17 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
|||||||
return errors.Wrap(err, "failed to get bucket")
|
return errors.Wrap(err, "failed to get bucket")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.projectNumber == "" {
|
if f.opt.ProjectNumber == "" {
|
||||||
return errors.New("can't make bucket without project number")
|
return errors.New("can't make bucket without project number")
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := storage.Bucket{
|
bucket := storage.Bucket{
|
||||||
Name: f.bucket,
|
Name: f.bucket,
|
||||||
Location: f.location,
|
Location: f.opt.Location,
|
||||||
StorageClass: f.storageClass,
|
StorageClass: f.opt.StorageClass,
|
||||||
}
|
}
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
_, err = f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket).PredefinedAcl(f.opt.BucketACL).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -948,7 +955,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
}
|
}
|
||||||
var newObject *storage.Object
|
var newObject *storage.Object
|
||||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.opt.ObjectACL).Do()
|
||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -14,7 +14,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/lib/rest"
|
"github.com/ncw/rclone/lib/rest"
|
||||||
@ -35,7 +36,7 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of http host to connect to",
|
Help: "URL of http host to connect to",
|
||||||
Optional: false,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://example.com",
|
Value: "https://example.com",
|
||||||
Help: "Connect to example.com",
|
Help: "Connect to example.com",
|
||||||
@ -45,11 +46,17 @@ func init() {
|
|||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Endpoint string `config:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote HTTP files
|
// Fs stores the interface to the remote HTTP files
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this backend
|
||||||
endpoint *url.URL
|
endpoint *url.URL
|
||||||
endpointURL string // endpoint as a string
|
endpointURL string // endpoint as a string
|
||||||
httpClient *http.Client
|
httpClient *http.Client
|
||||||
@ -78,14 +85,20 @@ func statusError(res *http.Response, err error) error {
|
|||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
endpoint := config.FileGet(name, "url")
|
// Parse config into Options struct
|
||||||
if !strings.HasSuffix(endpoint, "/") {
|
opt := new(Options)
|
||||||
endpoint += "/"
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||||
|
opt.Endpoint += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the endpoint and stick the root onto it
|
// Parse the endpoint and stick the root onto it
|
||||||
base, err := url.Parse(endpoint)
|
base, err := url.Parse(opt.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -130,6 +143,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
httpClient: client,
|
httpClient: client,
|
||||||
endpoint: u,
|
endpoint: u,
|
||||||
endpointURL: u.String(),
|
endpointURL: u.String(),
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fstest"
|
"github.com/ncw/rclone/fstest"
|
||||||
"github.com/ncw/rclone/lib/rest"
|
"github.com/ncw/rclone/lib/rest"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@ -29,7 +30,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// prepareServer the test server and return a function to tidy it up afterwards
|
// prepareServer the test server and return a function to tidy it up afterwards
|
||||||
func prepareServer(t *testing.T) func() {
|
func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||||
// file server for test/files
|
// file server for test/files
|
||||||
fileServer := http.FileServer(http.Dir(filesPath))
|
fileServer := http.FileServer(http.Dir(filesPath))
|
||||||
|
|
||||||
@ -41,19 +42,24 @@ func prepareServer(t *testing.T) func() {
|
|||||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||||
// fs.Config.DumpHeaders = true
|
// fs.Config.DumpHeaders = true
|
||||||
// fs.Config.DumpBodies = true
|
// fs.Config.DumpBodies = true
|
||||||
config.FileSet(remoteName, "type", "http")
|
// config.FileSet(remoteName, "type", "http")
|
||||||
config.FileSet(remoteName, "url", ts.URL)
|
// config.FileSet(remoteName, "url", ts.URL)
|
||||||
|
|
||||||
|
m := configmap.Simple{
|
||||||
|
"type": "http",
|
||||||
|
"url": ts.URL,
|
||||||
|
}
|
||||||
|
|
||||||
// return a function to tidy up
|
// return a function to tidy up
|
||||||
return ts.Close
|
return m, ts.Close
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare the test server and return a function to tidy it up afterwards
|
// prepare the test server and return a function to tidy it up afterwards
|
||||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||||
tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
|
|
||||||
// Instantiate it
|
// Instantiate it
|
||||||
f, err := NewFs(remoteName, "")
|
f, err := NewFs(remoteName, "", m)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return f, tidy
|
return f, tidy
|
||||||
@ -177,20 +183,20 @@ func TestMimeType(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileRoot(t *testing.T) {
|
func TestIsAFileRoot(t *testing.T) {
|
||||||
tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "one%.txt")
|
f, err := NewFs(remoteName, "one%.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
testListRoot(t, f)
|
testListRoot(t, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsAFileSubDir(t *testing.T) {
|
func TestIsAFileSubDir(t *testing.T) {
|
||||||
tidy := prepareServer(t)
|
m, tidy := prepareServer(t)
|
||||||
defer tidy()
|
defer tidy()
|
||||||
|
|
||||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||||
assert.Equal(t, err, fs.ErrorIsFile)
|
assert.Equal(t, err, fs.ErrorIsFile)
|
||||||
|
|
||||||
entries, err := f.List("")
|
entries, err := f.List("")
|
||||||
|
@ -16,6 +16,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/swift"
|
"github.com/ncw/rclone/backend/swift"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/lib/oauthutil"
|
"github.com/ncw/rclone/lib/oauthutil"
|
||||||
@ -52,18 +54,18 @@ func init() {
|
|||||||
Name: "hubic",
|
Name: "hubic",
|
||||||
Description: "Hubic",
|
Description: "Hubic",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("hubic", name, oauthConfig)
|
err := oauthutil.Config("hubic", name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Hubic Client Id - leave blank normally.",
|
Help: "Hubic Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Hubic Client Secret - leave blank normally.",
|
Help: "Hubic Client Secret\nLeave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -145,8 +147,8 @@ func (f *Fs) getCredentials() (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
client, _, err := oauthutil.NewClient(name, oauthConfig)
|
client, _, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to configure Hubic")
|
return nil, errors.Wrap(err, "failed to configure Hubic")
|
||||||
}
|
}
|
||||||
@ -167,8 +169,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
return nil, errors.Wrap(err, "error authenticating swift connection")
|
return nil, errors.Wrap(err, "error authenticating swift connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Parse config into swift.Options struct
|
||||||
|
opt := new(swift.Options)
|
||||||
|
err = configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
// Make inner swift Fs from the connection
|
// Make inner swift Fs from the connection
|
||||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
swiftFs, err := swift.NewFsWithConnection(opt, name, root, c, true)
|
||||||
if err != nil && err != fs.ErrorIsFile {
|
if err != nil && err != fs.ErrorIsFile {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -16,21 +16,14 @@ import (
|
|||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/lib/readers"
|
"github.com/ncw/rclone/lib/readers"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/appengine/log"
|
"google.golang.org/appengine/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
|
||||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
|
||||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
|
||||||
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||||
|
|
||||||
@ -41,29 +34,68 @@ func init() {
|
|||||||
Description: "Local Disk",
|
Description: "Local Disk",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "nounc",
|
Name: "nounc",
|
||||||
Help: "Disable UNC (long path names) conversion on Windows",
|
Help: "Disable UNC (long path names) conversion on Windows",
|
||||||
Optional: true,
|
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Disables long file names",
|
Help: "Disables long file names",
|
||||||
}},
|
}},
|
||||||
|
}, {
|
||||||
|
Name: "copy_links",
|
||||||
|
Help: "Follow symlinks and copy the pointed to item.",
|
||||||
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
ShortOpt: "L",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "skip_links",
|
||||||
|
Help: "Don't warn about skipped symlinks.",
|
||||||
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_unicode_normalization",
|
||||||
|
Help: "Don't apply unicode normalization to paths and filenames",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "no_check_updated",
|
||||||
|
Help: "Don't check to see if the files change during upload",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "one_file_system",
|
||||||
|
Help: "Don't cross filesystem boundaries (unix/macOS only).",
|
||||||
|
Default: false,
|
||||||
|
NoPrefix: true,
|
||||||
|
ShortOpt: "x",
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
FollowSymlinks bool `config:"copy_links"`
|
||||||
|
SkipSymlinks bool `config:"skip_links"`
|
||||||
|
NoUTFNorm bool `config:"no_unicode_normalization"`
|
||||||
|
NoCheckUpdated bool `config:"no_check_updated"`
|
||||||
|
NoUNC bool `config:"nounc"`
|
||||||
|
OneFileSystem bool `config:"one_file_system"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a local filesystem rooted at root
|
// Fs represents a local filesystem rooted at root
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // the name of the remote
|
name string // the name of the remote
|
||||||
root string // The root directory (OS path)
|
root string // The root directory (OS path)
|
||||||
|
opt Options // parsed config options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
dev uint64 // device number of root node
|
dev uint64 // device number of root node
|
||||||
precisionOk sync.Once // Whether we need to read the precision
|
precisionOk sync.Once // Whether we need to read the precision
|
||||||
precision time.Duration // precision of local filesystem
|
precision time.Duration // precision of local filesystem
|
||||||
wmu sync.Mutex // used for locking access to 'warned'.
|
wmu sync.Mutex // used for locking access to 'warned'.
|
||||||
warned map[string]struct{} // whether we have warned about this string
|
warned map[string]struct{} // whether we have warned about this string
|
||||||
nounc bool // Skip UNC conversion on Windows
|
|
||||||
// do os.Lstat or os.Stat
|
// do os.Lstat or os.Stat
|
||||||
lstat func(name string) (os.FileInfo, error)
|
lstat func(name string) (os.FileInfo, error)
|
||||||
dirNames *mapper // directory name mapping
|
dirNames *mapper // directory name mapping
|
||||||
@ -84,18 +116,22 @@ type Object struct {
|
|||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path
|
// NewFs constructs an Fs from the path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
var err error
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if *noUTFNorm {
|
if opt.NoUTFNorm {
|
||||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||||
}
|
}
|
||||||
|
|
||||||
nounc := config.FileGet(name, "nounc")
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
warned: make(map[string]struct{}),
|
warned: make(map[string]struct{}),
|
||||||
nounc: nounc == "true",
|
|
||||||
dev: devUnset,
|
dev: devUnset,
|
||||||
lstat: os.Lstat,
|
lstat: os.Lstat,
|
||||||
dirNames: newMapper(),
|
dirNames: newMapper(),
|
||||||
@ -105,14 +141,14 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
CaseInsensitive: f.caseInsensitive(),
|
CaseInsensitive: f.caseInsensitive(),
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
if *followSymlinks {
|
if opt.FollowSymlinks {
|
||||||
f.lstat = os.Stat
|
f.lstat = os.Stat
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check to see if this points to a file
|
// Check to see if this points to a file
|
||||||
fi, err := f.lstat(f.root)
|
fi, err := f.lstat(f.root)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
f.dev = readDevice(fi)
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||||
}
|
}
|
||||||
if err == nil && fi.Mode().IsRegular() {
|
if err == nil && fi.Mode().IsRegular() {
|
||||||
// It is a file, so use the parent as the root
|
// It is a file, so use the parent as the root
|
||||||
@ -243,7 +279,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
newRemote := path.Join(remote, name)
|
newRemote := path.Join(remote, name)
|
||||||
newPath := filepath.Join(fsDirPath, name)
|
newPath := filepath.Join(fsDirPath, name)
|
||||||
// Follow symlinks if required
|
// Follow symlinks if required
|
||||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||||
fi, err = os.Stat(newPath)
|
fi, err = os.Stat(newPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -253,7 +289,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
if fi.IsDir() {
|
if fi.IsDir() {
|
||||||
// Ignore directories which are symlinks. These are junction points under windows which
|
// Ignore directories which are symlinks. These are junction points under windows which
|
||||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) {
|
||||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||||
entries = append(entries, d)
|
entries = append(entries, d)
|
||||||
}
|
}
|
||||||
@ -357,7 +393,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f.dev = readDevice(fi)
|
f.dev = readDevice(fi, f.opt.OneFileSystem)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -643,7 +679,7 @@ func (o *Object) Storable() bool {
|
|||||||
}
|
}
|
||||||
mode := o.mode
|
mode := o.mode
|
||||||
if mode&os.ModeSymlink != 0 {
|
if mode&os.ModeSymlink != 0 {
|
||||||
if !*skipSymlinks {
|
if !o.fs.opt.SkipSymlinks {
|
||||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@ -668,7 +704,7 @@ type localOpenFile struct {
|
|||||||
|
|
||||||
// Read bytes from the object - see io.Reader
|
// Read bytes from the object - see io.Reader
|
||||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||||
if !*noCheckUpdated {
|
if !file.o.fs.opt.NoCheckUpdated {
|
||||||
// Check if file has the same size and modTime
|
// Check if file has the same size and modTime
|
||||||
fi, err := file.fd.Stat()
|
fi, err := file.fd.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -878,7 +914,7 @@ func (f *Fs) cleanPath(s string) string {
|
|||||||
s = s2
|
s = s2
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !f.nounc {
|
if !f.opt.NoUNC {
|
||||||
// Convert to UNC
|
// Convert to UNC
|
||||||
s = uncPath(s)
|
s = uncPath(s)
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,7 @@ func TestUpdatingCheck(t *testing.T) {
|
|||||||
|
|
||||||
fi, err := fd.Stat()
|
fi, err := fd.Stat()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
o := &Object{size: fi.Size(), modTime: fi.ModTime(), fs: &Fs{}}
|
||||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -65,11 +65,7 @@ func TestUpdatingCheck(t *testing.T) {
|
|||||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||||
|
|
||||||
// turn the checking off and try again
|
// turn the checking off and try again
|
||||||
|
in.o.fs.opt.NoCheckUpdated = true
|
||||||
*noCheckUpdated = true
|
|
||||||
defer func() {
|
|
||||||
*noCheckUpdated = false
|
|
||||||
}()
|
|
||||||
|
|
||||||
r.WriteFile(filePath, "content updated", time.Now())
|
r.WriteFile(filePath, "content updated", time.Now())
|
||||||
_, err = in.Read(buf)
|
_, err = in.Read(buf)
|
||||||
|
@ -8,6 +8,6 @@ import "os"
|
|||||||
|
|
||||||
// readDevice turns a valid os.FileInfo into a device number,
|
// readDevice turns a valid os.FileInfo into a device number,
|
||||||
// returning devUnset if it fails.
|
// returning devUnset if it fails.
|
||||||
func readDevice(fi os.FileInfo) uint64 {
|
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||||
return devUnset
|
return devUnset
|
||||||
}
|
}
|
||||||
|
@ -9,17 +9,12 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// readDevice turns a valid os.FileInfo into a device number,
|
// readDevice turns a valid os.FileInfo into a device number,
|
||||||
// returning devUnset if it fails.
|
// returning devUnset if it fails.
|
||||||
func readDevice(fi os.FileInfo) uint64 {
|
func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 {
|
||||||
if !*oneFileSystem {
|
if !oneFileSystem {
|
||||||
return devUnset
|
return devUnset
|
||||||
}
|
}
|
||||||
statT, ok := fi.Sys().(*syscall.Stat_t)
|
statT, ok := fi.Sys().(*syscall.Stat_t)
|
||||||
|
@ -24,8 +24,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -44,7 +44,6 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
megaDebug = flags.BoolP("mega-debug", "", false, "If set then output more debug from mega.")
|
|
||||||
megaCacheMu sync.Mutex // mutex for the below
|
megaCacheMu sync.Mutex // mutex for the below
|
||||||
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
|
megaCache = map[string]*mega.Mega{} // cache logged in Mega's by user
|
||||||
)
|
)
|
||||||
@ -58,20 +57,33 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "User name",
|
Help: "User name",
|
||||||
Optional: true,
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "Password.",
|
Help: "Password.",
|
||||||
Optional: true,
|
Required: true,
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
}, {
|
||||||
|
Name: "debug",
|
||||||
|
Help: "If set then output more debug from mega.",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
User string `config:"user"`
|
||||||
|
Pass string `config:"pass"`
|
||||||
|
Debug bool `config:"debug"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote mega
|
// Fs represents a remote mega
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed config options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *mega.Mega // the connection to the server
|
srv *mega.Mega // the connection to the server
|
||||||
pacer *pacer.Pacer // pacer for API calls
|
pacer *pacer.Pacer // pacer for API calls
|
||||||
@ -145,12 +157,16 @@ func (f *Fs) readMetaDataForPath(remote string) (info *mega.Node, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
user := config.FileGet(name, "user")
|
// Parse config into Options struct
|
||||||
pass := config.FileGet(name, "pass")
|
opt := new(Options)
|
||||||
if pass != "" {
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.Pass != "" {
|
||||||
var err error
|
var err error
|
||||||
pass, err = obscure.Reveal(pass)
|
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||||
}
|
}
|
||||||
@ -163,30 +179,31 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
// them up between different remotes.
|
// them up between different remotes.
|
||||||
megaCacheMu.Lock()
|
megaCacheMu.Lock()
|
||||||
defer megaCacheMu.Unlock()
|
defer megaCacheMu.Unlock()
|
||||||
srv := megaCache[user]
|
srv := megaCache[opt.User]
|
||||||
if srv == nil {
|
if srv == nil {
|
||||||
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
srv = mega.New().SetClient(fshttp.NewClient(fs.Config))
|
||||||
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
srv.SetRetries(fs.Config.LowLevelRetries) // let mega do the low level retries
|
||||||
srv.SetLogger(func(format string, v ...interface{}) {
|
srv.SetLogger(func(format string, v ...interface{}) {
|
||||||
fs.Infof("*go-mega*", format, v...)
|
fs.Infof("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
if *megaDebug {
|
if opt.Debug {
|
||||||
srv.SetDebugger(func(format string, v ...interface{}) {
|
srv.SetDebugger(func(format string, v ...interface{}) {
|
||||||
fs.Debugf("*go-mega*", format, v...)
|
fs.Debugf("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
err := srv.Login(user, pass)
|
err := srv.Login(opt.User, opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't login")
|
return nil, errors.Wrap(err, "couldn't login")
|
||||||
}
|
}
|
||||||
megaCache[user] = srv
|
megaCache[opt.User] = srv
|
||||||
}
|
}
|
||||||
|
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
srv: srv,
|
srv: srv,
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
}
|
}
|
||||||
@ -196,7 +213,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
|
|
||||||
// Find the root node and check if it is a file or not
|
// Find the root node and check if it is a file or not
|
||||||
_, err := f.findRoot(false)
|
_, err = f.findRoot(false)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
// root node found and is a directory
|
// root node found and is a directory
|
||||||
|
@ -18,7 +18,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/onedrive/api"
|
"github.com/ncw/rclone/backend/onedrive/api"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -73,9 +74,7 @@ var (
|
|||||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||||
}
|
}
|
||||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
|
oauthBusinessResource = oauth2.SetAuthURLParam("resource", discoveryServiceURL)
|
||||||
|
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
|
||||||
chunkSize = fs.SizeSuffix(10 * 1024 * 1024)
|
|
||||||
sharedURL = "https://api.onedrive.com/v1.0/drives" // root URL for remote shared resources
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
@ -84,7 +83,7 @@ func init() {
|
|||||||
Name: "onedrive",
|
Name: "onedrive",
|
||||||
Description: "Microsoft OneDrive",
|
Description: "Microsoft OneDrive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
// choose account type
|
// choose account type
|
||||||
fmt.Printf("Choose OneDrive account type?\n")
|
fmt.Printf("Choose OneDrive account type?\n")
|
||||||
fmt.Printf(" * Say b for a OneDrive business account\n")
|
fmt.Printf(" * Say b for a OneDrive business account\n")
|
||||||
@ -93,12 +92,12 @@ func init() {
|
|||||||
|
|
||||||
if isPersonal {
|
if isPersonal {
|
||||||
// for personal accounts we don't safe a field about the account
|
// for personal accounts we don't safe a field about the account
|
||||||
err := oauthutil.Config("onedrive", name, oauthPersonalConfig)
|
err := oauthutil.Config("onedrive", name, m, oauthPersonalConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err := oauthutil.ConfigErrorCheck("onedrive", name, func(req *http.Request) oauthutil.AuthError {
|
err := oauthutil.ConfigErrorCheck("onedrive", name, m, func(req *http.Request) oauthutil.AuthError {
|
||||||
var resp oauthutil.AuthError
|
var resp oauthutil.AuthError
|
||||||
|
|
||||||
resp.Name = req.URL.Query().Get("error")
|
resp.Name = req.URL.Query().Get("error")
|
||||||
@ -113,7 +112,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Are we running headless?
|
// Are we running headless?
|
||||||
if config.FileGet(name, config.ConfigAutomatic) != "" {
|
if automatic, _ := m.Get(config.ConfigAutomatic); automatic != "" {
|
||||||
// Yes, okay we are done
|
// Yes, okay we are done
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -127,7 +126,7 @@ func init() {
|
|||||||
Services []serviceResource `json:"value"`
|
Services []serviceResource `json:"value"`
|
||||||
}
|
}
|
||||||
|
|
||||||
oAuthClient, _, err := oauthutil.NewClient(name, oauthBusinessConfig)
|
oAuthClient, _, err := oauthutil.NewClient(name, m, oauthBusinessConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||||
return
|
return
|
||||||
@ -172,13 +171,13 @@ func init() {
|
|||||||
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
|
foundService = config.Choose("Choose resource URL", resourcesID, resourcesURL, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
config.FileSet(name, configResourceURL, foundService)
|
m.Set(configResourceURL, foundService)
|
||||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
|
oauthBusinessResource = oauth2.SetAuthURLParam("resource", foundService)
|
||||||
|
|
||||||
// get the token from the inital config
|
// get the token from the inital config
|
||||||
// we need to update the token with a resource
|
// we need to update the token with a resource
|
||||||
// specific token we will query now
|
// specific token we will query now
|
||||||
token, err := oauthutil.GetToken(name)
|
token, err := oauthutil.GetToken(name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(nil, "Error while getting token: %s", err)
|
fs.Errorf(nil, "Error while getting token: %s", err)
|
||||||
return
|
return
|
||||||
@ -221,7 +220,7 @@ func init() {
|
|||||||
token.RefreshToken = jsonToken.RefreshToken
|
token.RefreshToken = jsonToken.RefreshToken
|
||||||
|
|
||||||
// finally save them in the config
|
// finally save them in the config
|
||||||
err = oauthutil.PutToken(name, token, true)
|
err = oauthutil.PutToken(name, m, token, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(nil, "Error while setting token: %s", err)
|
fs.Errorf(nil, "Error while setting token: %s", err)
|
||||||
}
|
}
|
||||||
@ -229,20 +228,30 @@ func init() {
|
|||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Microsoft App Client Id - leave blank normally.",
|
Help: "Microsoft App Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Microsoft App Client Secret - leave blank normally.",
|
Help: "Microsoft App Client Secret\nLeave blank normally.",
|
||||||
|
}, {
|
||||||
|
Name: "chunk_size",
|
||||||
|
Help: "Chunk size to upload files with - must be multiple of 320k.",
|
||||||
|
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
flags.VarP(&chunkSize, "onedrive-chunk-size", "", "Above this size files will be chunked - must be multiple of 320k.")
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
ResourceURL string `config:"resource_url"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote one drive
|
// Fs represents a remote one drive
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
@ -345,27 +354,35 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// get the resource URL from the config file0
|
// Parse config into Options struct
|
||||||
resourceURL := config.FileGet(name, configResourceURL, "")
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.ChunkSize%(320*1024) != 0 {
|
||||||
|
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", opt.ChunkSize)
|
||||||
|
}
|
||||||
// if we have a resource URL it's a business account otherwise a personal one
|
// if we have a resource URL it's a business account otherwise a personal one
|
||||||
|
isBusiness := opt.ResourceURL != ""
|
||||||
var rootURL string
|
var rootURL string
|
||||||
var oauthConfig *oauth2.Config
|
var oauthConfig *oauth2.Config
|
||||||
if resourceURL == "" {
|
if !isBusiness {
|
||||||
// personal account setup
|
// personal account setup
|
||||||
oauthConfig = oauthPersonalConfig
|
oauthConfig = oauthPersonalConfig
|
||||||
rootURL = rootURLPersonal
|
rootURL = rootURLPersonal
|
||||||
} else {
|
} else {
|
||||||
// business account setup
|
// business account setup
|
||||||
oauthConfig = oauthBusinessConfig
|
oauthConfig = oauthBusinessConfig
|
||||||
rootURL = resourceURL + "_api/v2.0/drives/me"
|
rootURL = opt.ResourceURL + "_api/v2.0/drives/me"
|
||||||
sharedURL = resourceURL + "_api/v2.0/drives"
|
sharedURL = opt.ResourceURL + "_api/v2.0/drives"
|
||||||
|
|
||||||
// update the URL in the AuthOptions
|
// update the URL in the AuthOptions
|
||||||
oauthBusinessResource = oauth2.SetAuthURLParam("resource", resourceURL)
|
oauthBusinessResource = oauth2.SetAuthURLParam("resource", opt.ResourceURL)
|
||||||
}
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure OneDrive: %v", err)
|
log.Fatalf("Failed to configure OneDrive: %v", err)
|
||||||
}
|
}
|
||||||
@ -373,9 +390,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
isBusiness: resourceURL != "",
|
isBusiness: isBusiness,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@ -1225,10 +1243,6 @@ func (o *Object) cancelUploadSession(url string) (err error) {
|
|||||||
|
|
||||||
// uploadMultipart uploads a file using multipart upload
|
// uploadMultipart uploads a file using multipart upload
|
||||||
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (info *api.Item, err error) {
|
||||||
if chunkSize%(320*1024) != 0 {
|
|
||||||
return nil, errors.Errorf("chunk size %d is not a multiple of 320k", chunkSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create upload session
|
// Create upload session
|
||||||
fs.Debugf(o, "Starting multipart upload")
|
fs.Debugf(o, "Starting multipart upload")
|
||||||
session, err := o.createUploadSession(modTime)
|
session, err := o.createUploadSession(modTime)
|
||||||
@ -1252,7 +1266,7 @@ func (o *Object) uploadMultipart(in io.Reader, size int64, modTime time.Time) (i
|
|||||||
remaining := size
|
remaining := size
|
||||||
position := int64(0)
|
position := int64(0)
|
||||||
for remaining > 0 {
|
for remaining > 0 {
|
||||||
n := int64(chunkSize)
|
n := int64(o.fs.opt.ChunkSize)
|
||||||
if remaining < n {
|
if remaining < n {
|
||||||
n = remaining
|
n = remaining
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
@ -37,23 +38,30 @@ func init() {
|
|||||||
Description: "OpenDrive",
|
Description: "OpenDrive",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "username",
|
Name: "username",
|
||||||
Help: "Username",
|
Help: "Username",
|
||||||
|
Required: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "password",
|
Name: "password",
|
||||||
Help: "Password.",
|
Help: "Password.",
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
|
Required: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
UserName string `config:"username"`
|
||||||
|
Password string `config:"password"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote server
|
// Fs represents a remote server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
username string // account name
|
|
||||||
password string // auth key0
|
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
pacer *pacer.Pacer // To pace and retry the API calls
|
pacer *pacer.Pacer // To pace and retry the API calls
|
||||||
session UserSessionInfo // contains the session data
|
session UserSessionInfo // contains the session data
|
||||||
@ -110,27 +118,31 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs contstructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
username := config.FileGet(name, "username")
|
if opt.UserName == "" {
|
||||||
if username == "" {
|
|
||||||
return nil, errors.New("username not found")
|
return nil, errors.New("username not found")
|
||||||
}
|
}
|
||||||
password, err := obscure.Reveal(config.FileGet(name, "password"))
|
opt.Password, err = obscure.Reveal(opt.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("password coudl not revealed")
|
return nil, errors.New("password could not revealed")
|
||||||
}
|
}
|
||||||
if password == "" {
|
if opt.Password == "" {
|
||||||
return nil, errors.New("password not found")
|
return nil, errors.New("password not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
username: username,
|
root: root,
|
||||||
password: password,
|
opt: *opt,
|
||||||
root: root,
|
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f.dirCache = dircache.New(root, "0", f)
|
f.dirCache = dircache.New(root, "0", f)
|
||||||
@ -141,7 +153,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
// get sessionID
|
// get sessionID
|
||||||
var resp *http.Response
|
var resp *http.Response
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
account := Account{Username: username, Password: password}
|
account := Account{Username: opt.UserName, Password: opt.Password}
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
|
@ -23,6 +23,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/pcloud/api"
|
"github.com/ncw/rclone/backend/pcloud/api"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -65,26 +67,31 @@ func init() {
|
|||||||
Name: "pcloud",
|
Name: "pcloud",
|
||||||
Description: "Pcloud",
|
Description: "Pcloud",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("pcloud", name, oauthConfig)
|
err := oauthutil.Config("pcloud", name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Pcloud App Client Id - leave blank normally.",
|
Help: "Pcloud App Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Pcloud App Client Secret - leave blank normally.",
|
Help: "Pcloud App Client Secret\nLeave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote pcloud
|
// Fs represents a remote pcloud
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
srv *rest.Client // the connection to the server
|
srv *rest.Client // the connection to the server
|
||||||
dirCache *dircache.DirCache // Map of directory path to directory id
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
||||||
@ -229,9 +236,15 @@ func errorHandler(resp *http.Response) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
root = parsePath(root)
|
root = parsePath(root)
|
||||||
oAuthClient, ts, err := oauthutil.NewClient(name, oauthConfig)
|
oAuthClient, ts, err := oauthutil.NewClient(name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure Pcloud: %v", err)
|
log.Fatalf("Failed to configure Pcloud: %v", err)
|
||||||
}
|
}
|
||||||
@ -239,6 +252,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/fs/walk"
|
"github.com/ncw/rclone/fs/walk"
|
||||||
@ -34,49 +35,43 @@ func init() {
|
|||||||
Description: "QingCloud Object Storage",
|
Description: "QingCloud Object Storage",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "env_auth",
|
Name: "env_auth",
|
||||||
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
|
||||||
Examples: []fs.OptionExample{
|
Default: false,
|
||||||
{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "false",
|
Value: "false",
|
||||||
Help: "Enter QingStor credentials in the next step",
|
Help: "Enter QingStor credentials in the next step",
|
||||||
}, {
|
}, {
|
||||||
Value: "true",
|
Value: "true",
|
||||||
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
Help: "Get QingStor credentials from the environment (env vars or IAM)",
|
||||||
},
|
}},
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "access_key_id",
|
Name: "access_key_id",
|
||||||
Help: "QingStor Access Key ID - leave blank for anonymous access or runtime credentials.",
|
Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
Name: "secret_access_key",
|
Name: "secret_access_key",
|
||||||
Help: "QingStor Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint",
|
Name: "endpoint",
|
||||||
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
|
||||||
}, {
|
}, {
|
||||||
Name: "zone",
|
Name: "zone",
|
||||||
Help: "Choose or Enter a zone to connect. Default is \"pek3a\".",
|
Help: "Zone to connect to.\nDefault is \"pek3a\".",
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{{
|
||||||
{
|
Value: "pek3a",
|
||||||
Value: "pek3a",
|
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
||||||
|
}, {
|
||||||
Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
|
Value: "sh1a",
|
||||||
},
|
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
||||||
{
|
}, {
|
||||||
Value: "sh1a",
|
Value: "gd2a",
|
||||||
|
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
||||||
Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
|
}},
|
||||||
},
|
|
||||||
{
|
|
||||||
Value: "gd2a",
|
|
||||||
|
|
||||||
Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
Name: "connection_retries",
|
Name: "connection_retries",
|
||||||
Help: "Number of connnection retry.\nLeave blank will use the default value \"3\".",
|
Help: "Number of connnection retries.",
|
||||||
|
Default: 3,
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -95,17 +90,28 @@ func timestampToTime(tp int64) time.Time {
|
|||||||
return tm.UTC()
|
return tm.UTC()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
EnvAuth bool `config:"env_auth"`
|
||||||
|
AccessKeyID string `config:"access_key_id"`
|
||||||
|
SecretAccessKey string `config:"secret_access_key"`
|
||||||
|
Endpoint string `config:"endpoint"`
|
||||||
|
Zone string `config:"zone"`
|
||||||
|
ConnectionRetries int `config:"connection_retries"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote qingstor server
|
// Fs represents a remote qingstor server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // The name of the remote
|
name string // The name of the remote
|
||||||
|
root string // The root is a subdir, is a special object
|
||||||
|
opt Options // parsed options
|
||||||
|
features *fs.Features // optional features
|
||||||
|
svc *qs.Service // The connection to the qingstor server
|
||||||
zone string // The zone we are working on
|
zone string // The zone we are working on
|
||||||
bucket string // The bucket we are working on
|
bucket string // The bucket we are working on
|
||||||
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted
|
||||||
bucketOK bool // true if we have created the bucket
|
bucketOK bool // true if we have created the bucket
|
||||||
bucketDeleted bool // true if we have deleted the bucket
|
bucketDeleted bool // true if we have deleted the bucket
|
||||||
root string // The root is a subdir, is a special object
|
|
||||||
features *fs.Features // optional features
|
|
||||||
svc *qs.Service // The connection to the qingstor server
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a qingstor object
|
// Object describes a qingstor object
|
||||||
@ -165,12 +171,12 @@ func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// qsConnection makes a connection to qingstor
|
// qsConnection makes a connection to qingstor
|
||||||
func qsServiceConnection(name string) (*qs.Service, error) {
|
func qsServiceConnection(opt *Options) (*qs.Service, error) {
|
||||||
accessKeyID := config.FileGet(name, "access_key_id")
|
accessKeyID := opt.AccessKeyID
|
||||||
secretAccessKey := config.FileGet(name, "secret_access_key")
|
secretAccessKey := opt.SecretAccessKey
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case config.FileGetBool(name, "env_auth", false):
|
case opt.EnvAuth:
|
||||||
// No need for empty checks if "env_auth" is true
|
// No need for empty checks if "env_auth" is true
|
||||||
case accessKeyID == "" && secretAccessKey == "":
|
case accessKeyID == "" && secretAccessKey == "":
|
||||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||||
@ -184,7 +190,7 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
|||||||
host := "qingstor.com"
|
host := "qingstor.com"
|
||||||
port := 443
|
port := 443
|
||||||
|
|
||||||
endpoint := config.FileGet(name, "endpoint", "")
|
endpoint := opt.Endpoint
|
||||||
if endpoint != "" {
|
if endpoint != "" {
|
||||||
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
_protocol, _host, _port, err := qsParseEndpoint(endpoint)
|
||||||
|
|
||||||
@ -204,48 +210,49 @@ func qsServiceConnection(name string) (*qs.Service, error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
connectionRetries := 3
|
|
||||||
retries := config.FileGet(name, "connection_retries", "")
|
|
||||||
if retries != "" {
|
|
||||||
connectionRetries, _ = strconv.Atoi(retries)
|
|
||||||
}
|
|
||||||
|
|
||||||
cf, err := qsConfig.NewDefault()
|
cf, err := qsConfig.NewDefault()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
cf.AccessKeyID = accessKeyID
|
cf.AccessKeyID = accessKeyID
|
||||||
cf.SecretAccessKey = secretAccessKey
|
cf.SecretAccessKey = secretAccessKey
|
||||||
cf.Protocol = protocol
|
cf.Protocol = protocol
|
||||||
cf.Host = host
|
cf.Host = host
|
||||||
cf.Port = port
|
cf.Port = port
|
||||||
cf.ConnectionRetries = connectionRetries
|
cf.ConnectionRetries = opt.ConnectionRetries
|
||||||
cf.Connection = fshttp.NewClient(fs.Config)
|
cf.Connection = fshttp.NewClient(fs.Config)
|
||||||
|
|
||||||
svc, _ := qs.Init(cf)
|
return qs.Init(cf)
|
||||||
|
|
||||||
return svc, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
bucket, key, err := qsParsePath(root)
|
bucket, key, err := qsParsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
svc, err := qsServiceConnection(name)
|
svc, err := qsServiceConnection(opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
zone := config.FileGet(name, "zone")
|
if opt.Zone == "" {
|
||||||
if zone == "" {
|
opt.Zone = "pek3a"
|
||||||
zone = "pek3a"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
zone: zone,
|
|
||||||
root: key,
|
root: key,
|
||||||
bucket: bucket,
|
opt: *opt,
|
||||||
svc: svc,
|
svc: svc,
|
||||||
|
zone: opt.Zone,
|
||||||
|
bucket: bucket,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
@ -258,7 +265,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f.root += "/"
|
f.root += "/"
|
||||||
}
|
}
|
||||||
//Check to see if the object exists
|
//Check to see if the object exists
|
||||||
bucketInit, err := svc.Bucket(bucket, zone)
|
bucketInit, err := svc.Bucket(bucket, opt.Zone)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
183
backend/s3/s3.go
183
backend/s3/s3.go
@ -37,8 +37,8 @@ import (
|
|||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/ncw/rclone/fs/walk"
|
"github.com/ncw/rclone/fs/walk"
|
||||||
@ -82,8 +82,9 @@ func init() {
|
|||||||
Help: "Any other S3 compatible provider",
|
Help: "Any other S3 compatible provider",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "env_auth",
|
Name: "env_auth",
|
||||||
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.",
|
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
|
||||||
|
Default: false,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "false",
|
Value: "false",
|
||||||
Help: "Enter AWS credentials in the next step",
|
Help: "Enter AWS credentials in the next step",
|
||||||
@ -93,10 +94,10 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "access_key_id",
|
Name: "access_key_id",
|
||||||
Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.",
|
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
Name: "secret_access_key",
|
Name: "secret_access_key",
|
||||||
Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
|
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to.",
|
Help: "Region to connect to.",
|
||||||
@ -146,7 +147,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "region",
|
Name: "region",
|
||||||
Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.",
|
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
|
||||||
Provider: "!AWS",
|
Provider: "!AWS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
@ -293,7 +294,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
|
||||||
Provider: "AWS",
|
Provider: "AWS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "",
|
Value: "",
|
||||||
@ -340,7 +341,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must match endpoint when using IBM Cloud Public. For on-prem COS, do not make a selection from this list, hit enter",
|
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
|
||||||
Provider: "IBMCOS",
|
Provider: "IBMCOS",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "us-standard",
|
Value: "us-standard",
|
||||||
@ -441,7 +442,7 @@ func init() {
|
|||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "location_constraint",
|
Name: "location_constraint",
|
||||||
Help: "Location constraint - must be set to match the Region. Leave blank if not sure. Used when creating buckets only.",
|
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
|
||||||
Provider: "!AWS,IBMCOS",
|
Provider: "!AWS,IBMCOS",
|
||||||
}, {
|
}, {
|
||||||
Name: "acl",
|
Name: "acl",
|
||||||
@ -518,10 +519,28 @@ func init() {
|
|||||||
Value: "ONEZONE_IA",
|
Value: "ONEZONE_IA",
|
||||||
Help: "One Zone Infrequent Access storage class",
|
Help: "One Zone Infrequent Access storage class",
|
||||||
}},
|
}},
|
||||||
},
|
}, {
|
||||||
},
|
Name: "chunk_size",
|
||||||
|
Help: "Chunk size to use for uploading",
|
||||||
|
Default: fs.SizeSuffix(s3manager.MinUploadPartSize),
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "disable_checksum",
|
||||||
|
Help: "Don't store MD5 checksum with object metadata",
|
||||||
|
Default: false,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "session_token",
|
||||||
|
Help: "An AWS session token",
|
||||||
|
Hide: fs.OptionHideBoth,
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "upload_concurrency",
|
||||||
|
Help: "Concurrency for multipart uploads.",
|
||||||
|
Default: 2,
|
||||||
|
Advanced: true,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&s3ChunkSize, "s3-chunk-size", "", "Chunk size to use for uploading")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constants
|
// Constants
|
||||||
@ -534,31 +553,36 @@ const (
|
|||||||
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
// Options defines the configuration for this backend
|
||||||
var (
|
type Options struct {
|
||||||
// Flags
|
Provider string `config:"provider"`
|
||||||
s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3")
|
EnvAuth bool `config:"env_auth"`
|
||||||
s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA)")
|
AccessKeyID string `config:"access_key_id"`
|
||||||
s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
|
SecretAccessKey string `config:"secret_access_key"`
|
||||||
s3DisableChecksum = flags.BoolP("s3-disable-checksum", "", false, "Don't store MD5 checksum with object metadata")
|
Region string `config:"region"`
|
||||||
s3UploadConcurrency = flags.IntP("s3-upload-concurrency", "", 2, "Concurrency for multipart uploads")
|
Endpoint string `config:"endpoint"`
|
||||||
)
|
LocationConstraint string `config:"location_constraint"`
|
||||||
|
ACL string `config:"acl"`
|
||||||
|
ServerSideEncryption string `config:"server_side_encryption"`
|
||||||
|
StorageClass string `config:"storage_class"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
|
DisableChecksum bool `config:"disable_checksum"`
|
||||||
|
SessionToken string `config:"session_token"`
|
||||||
|
UploadConcurrency int `config:"upload_concurrency"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote s3 server
|
// Fs represents a remote s3 server
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // the name of the remote
|
name string // the name of the remote
|
||||||
root string // root of the bucket - ignore all objects above this
|
root string // root of the bucket - ignore all objects above this
|
||||||
features *fs.Features // optional features
|
opt Options // parsed options
|
||||||
c *s3.S3 // the connection to the s3 server
|
features *fs.Features // optional features
|
||||||
ses *session.Session // the s3 session
|
c *s3.S3 // the connection to the s3 server
|
||||||
bucket string // the bucket we are working on
|
ses *session.Session // the s3 session
|
||||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
bucket string // the bucket we are working on
|
||||||
bucketOK bool // true if we have created the bucket
|
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||||
bucketDeleted bool // true if we have deleted the bucket
|
bucketOK bool // true if we have created the bucket
|
||||||
acl string // ACL for new buckets / objects
|
bucketDeleted bool // true if we have deleted the bucket
|
||||||
locationConstraint string // location constraint of new buckets
|
|
||||||
sse string // the type of server-side encryption
|
|
||||||
storageClass string // storage class
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a s3 object
|
// Object describes a s3 object
|
||||||
@ -620,12 +644,12 @@ func s3ParsePath(path string) (bucket, directory string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// s3Connection makes a connection to s3
|
// s3Connection makes a connection to s3
|
||||||
func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
|
||||||
// Make the auth
|
// Make the auth
|
||||||
v := credentials.Value{
|
v := credentials.Value{
|
||||||
AccessKeyID: config.FileGet(name, "access_key_id"),
|
AccessKeyID: opt.AccessKeyID,
|
||||||
SecretAccessKey: config.FileGet(name, "secret_access_key"),
|
SecretAccessKey: opt.SecretAccessKey,
|
||||||
SessionToken: config.FileGet(name, "session_token"),
|
SessionToken: opt.SessionToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
|
||||||
@ -660,7 +684,7 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
|||||||
cred := credentials.NewChainCredentials(providers)
|
cred := credentials.NewChainCredentials(providers)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case config.FileGetBool(name, "env_auth", false):
|
case opt.EnvAuth:
|
||||||
// No need for empty checks if "env_auth" is true
|
// No need for empty checks if "env_auth" is true
|
||||||
case v.AccessKeyID == "" && v.SecretAccessKey == "":
|
case v.AccessKeyID == "" && v.SecretAccessKey == "":
|
||||||
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
|
||||||
@ -671,26 +695,24 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
|||||||
return nil, nil, errors.New("secret_access_key not found")
|
return nil, nil, errors.New("secret_access_key not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint := config.FileGet(name, "endpoint")
|
if opt.Region == "" && opt.Endpoint == "" {
|
||||||
region := config.FileGet(name, "region")
|
opt.Endpoint = "https://s3.amazonaws.com/"
|
||||||
if region == "" && endpoint == "" {
|
|
||||||
endpoint = "https://s3.amazonaws.com/"
|
|
||||||
}
|
}
|
||||||
if region == "" {
|
if opt.Region == "" {
|
||||||
region = "us-east-1"
|
opt.Region = "us-east-1"
|
||||||
}
|
}
|
||||||
awsConfig := aws.NewConfig().
|
awsConfig := aws.NewConfig().
|
||||||
WithRegion(region).
|
WithRegion(opt.Region).
|
||||||
WithMaxRetries(maxRetries).
|
WithMaxRetries(maxRetries).
|
||||||
WithCredentials(cred).
|
WithCredentials(cred).
|
||||||
WithEndpoint(endpoint).
|
WithEndpoint(opt.Endpoint).
|
||||||
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
WithHTTPClient(fshttp.NewClient(fs.Config)).
|
||||||
WithS3ForcePathStyle(true)
|
WithS3ForcePathStyle(true)
|
||||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||||
ses := session.New()
|
ses := session.New()
|
||||||
c := s3.New(ses, awsConfig)
|
c := s3.New(ses, awsConfig)
|
||||||
if region == "other-v2-signature" {
|
if opt.Region == "other-v2-signature" {
|
||||||
fs.Debugf(name, "Using v2 auth")
|
fs.Debugf(nil, "Using v2 auth")
|
||||||
signer := func(req *request.Request) {
|
signer := func(req *request.Request) {
|
||||||
// Ignore AnonymousCredentials object
|
// Ignore AnonymousCredentials object
|
||||||
if req.Config.Credentials == credentials.AnonymousCredentials {
|
if req.Config.Credentials == credentials.AnonymousCredentials {
|
||||||
@ -706,40 +728,37 @@ func s3Connection(name string) (*s3.S3, *session.Session, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if opt.ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
||||||
|
return nil, errors.Errorf("s3 chunk size (%v) must be >= %v", opt.ChunkSize, fs.SizeSuffix(s3manager.MinUploadPartSize))
|
||||||
|
}
|
||||||
bucket, directory, err := s3ParsePath(root)
|
bucket, directory, err := s3ParsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c, ses, err := s3Connection(name)
|
c, ses, err := s3Connection(opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
c: c,
|
root: directory,
|
||||||
bucket: bucket,
|
opt: *opt,
|
||||||
ses: ses,
|
c: c,
|
||||||
acl: config.FileGet(name, "acl"),
|
bucket: bucket,
|
||||||
root: directory,
|
ses: ses,
|
||||||
locationConstraint: config.FileGet(name, "location_constraint"),
|
|
||||||
sse: config.FileGet(name, "server_side_encryption"),
|
|
||||||
storageClass: config.FileGet(name, "storage_class"),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
ReadMimeType: true,
|
ReadMimeType: true,
|
||||||
WriteMimeType: true,
|
WriteMimeType: true,
|
||||||
BucketBased: true,
|
BucketBased: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
if *s3ACL != "" {
|
|
||||||
f.acl = *s3ACL
|
|
||||||
}
|
|
||||||
if *s3StorageClass != "" {
|
|
||||||
f.storageClass = *s3StorageClass
|
|
||||||
}
|
|
||||||
if s3ChunkSize < fs.SizeSuffix(s3manager.MinUploadPartSize) {
|
|
||||||
return nil, errors.Errorf("s3 chunk size must be >= %v", fs.SizeSuffix(s3manager.MinUploadPartSize))
|
|
||||||
}
|
|
||||||
if f.root != "" {
|
if f.root != "" {
|
||||||
f.root += "/"
|
f.root += "/"
|
||||||
// Check to see if the object exists
|
// Check to see if the object exists
|
||||||
@ -1064,11 +1083,11 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
req := s3.CreateBucketInput{
|
req := s3.CreateBucketInput{
|
||||||
Bucket: &f.bucket,
|
Bucket: &f.bucket,
|
||||||
ACL: &f.acl,
|
ACL: &f.opt.ACL,
|
||||||
}
|
}
|
||||||
if f.locationConstraint != "" {
|
if f.opt.LocationConstraint != "" {
|
||||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||||
LocationConstraint: &f.locationConstraint,
|
LocationConstraint: &f.opt.LocationConstraint,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_, err := f.c.CreateBucket(&req)
|
_, err := f.c.CreateBucket(&req)
|
||||||
@ -1297,7 +1316,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
|
||||||
req := s3.CopyObjectInput{
|
req := s3.CopyObjectInput{
|
||||||
Bucket: &o.fs.bucket,
|
Bucket: &o.fs.bucket,
|
||||||
ACL: &o.fs.acl,
|
ACL: &o.fs.opt.ACL,
|
||||||
Key: &key,
|
Key: &key,
|
||||||
ContentType: &mimeType,
|
ContentType: &mimeType,
|
||||||
CopySource: aws.String(pathEscape(sourceKey)),
|
CopySource: aws.String(pathEscape(sourceKey)),
|
||||||
@ -1353,10 +1372,10 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
size := src.Size()
|
size := src.Size()
|
||||||
|
|
||||||
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
|
||||||
u.Concurrency = *s3UploadConcurrency
|
u.Concurrency = o.fs.opt.UploadConcurrency
|
||||||
u.LeavePartsOnError = false
|
u.LeavePartsOnError = false
|
||||||
u.S3 = o.fs.c
|
u.S3 = o.fs.c
|
||||||
u.PartSize = int64(s3ChunkSize)
|
u.PartSize = int64(o.fs.opt.ChunkSize)
|
||||||
|
|
||||||
if size == -1 {
|
if size == -1 {
|
||||||
// Make parts as small as possible while still being able to upload to the
|
// Make parts as small as possible while still being able to upload to the
|
||||||
@ -1376,7 +1395,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
|
||||||
}
|
}
|
||||||
|
|
||||||
if !*s3DisableChecksum && size > uploader.PartSize {
|
if !o.fs.opt.DisableChecksum && size > uploader.PartSize {
|
||||||
hash, err := src.Hash(hash.MD5)
|
hash, err := src.Hash(hash.MD5)
|
||||||
|
|
||||||
if err == nil && matchMd5.MatchString(hash) {
|
if err == nil && matchMd5.MatchString(hash) {
|
||||||
@ -1394,18 +1413,18 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
key := o.fs.root + o.remote
|
key := o.fs.root + o.remote
|
||||||
req := s3manager.UploadInput{
|
req := s3manager.UploadInput{
|
||||||
Bucket: &o.fs.bucket,
|
Bucket: &o.fs.bucket,
|
||||||
ACL: &o.fs.acl,
|
ACL: &o.fs.opt.ACL,
|
||||||
Key: &key,
|
Key: &key,
|
||||||
Body: in,
|
Body: in,
|
||||||
ContentType: &mimeType,
|
ContentType: &mimeType,
|
||||||
Metadata: metadata,
|
Metadata: metadata,
|
||||||
//ContentLength: &size,
|
//ContentLength: &size,
|
||||||
}
|
}
|
||||||
if o.fs.sse != "" {
|
if o.fs.opt.ServerSideEncryption != "" {
|
||||||
req.ServerSideEncryption = &o.fs.sse
|
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
|
||||||
}
|
}
|
||||||
if o.fs.storageClass != "" {
|
if o.fs.opt.StorageClass != "" {
|
||||||
req.StorageClass = &o.fs.storageClass
|
req.StorageClass = &o.fs.opt.StorageClass
|
||||||
}
|
}
|
||||||
_, err = uploader.Upload(&req)
|
_, err = uploader.Upload(&req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -20,7 +20,8 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -38,10 +39,6 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
currentUser = readCurrentUser()
|
currentUser = readCurrentUser()
|
||||||
|
|
||||||
// Flags
|
|
||||||
sftpAskPassword = flags.BoolP("sftp-ask-password", "", false, "Allow asking for SFTP password when needed.")
|
|
||||||
sshPathOverride = flags.StringP("ssh-path-override", "", "", "Override path used by SSH connection.")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@ -52,32 +49,28 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "host",
|
Name: "host",
|
||||||
Help: "SSH host to connect to",
|
Help: "SSH host to connect to",
|
||||||
Optional: false,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "example.com",
|
Value: "example.com",
|
||||||
Help: "Connect to example.com",
|
Help: "Connect to example.com",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "SSH username, leave blank for current username, " + currentUser,
|
Help: "SSH username, leave blank for current username, " + currentUser,
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "port",
|
Name: "port",
|
||||||
Help: "SSH port, leave blank to use default (22)",
|
Help: "SSH port, leave blank to use default (22)",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "SSH password, leave blank to use ssh-agent.",
|
Help: "SSH password, leave blank to use ssh-agent.",
|
||||||
Optional: true,
|
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "key_file",
|
Name: "key_file",
|
||||||
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
Help: "Path to unencrypted PEM-encoded private key file, leave blank to use ssh-agent.",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "use_insecure_cipher",
|
Name: "use_insecure_cipher",
|
||||||
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
Help: "Enable the use of the aes128-cbc cipher. This cipher is insecure and may allow plaintext data to be recovered by an attacker.",
|
||||||
Optional: true,
|
Default: false,
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "false",
|
Value: "false",
|
||||||
@ -88,30 +81,56 @@ func init() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
Name: "disable_hashcheck",
|
Name: "disable_hashcheck",
|
||||||
Help: "Disable the execution of SSH commands to determine if remote file hashing is available. Leave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
Default: false,
|
||||||
Optional: true,
|
Help: "Disable the execution of SSH commands to determine if remote file hashing is available.\nLeave blank or set to false to enable hashing (recommended), set to true to disable hashing.",
|
||||||
|
}, {
|
||||||
|
Name: "ask_password",
|
||||||
|
Default: false,
|
||||||
|
Help: "Allow asking for SFTP password when needed.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "path_override",
|
||||||
|
Default: "",
|
||||||
|
Help: "Override path used by SSH connection.",
|
||||||
|
Advanced: true,
|
||||||
|
}, {
|
||||||
|
Name: "set_modtime",
|
||||||
|
Default: true,
|
||||||
|
Help: "Set the modified time on the remote if set.",
|
||||||
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
fs.Register(fsi)
|
fs.Register(fsi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Host string `config:"host"`
|
||||||
|
User string `config:"user"`
|
||||||
|
Port string `config:"port"`
|
||||||
|
Pass string `config:"pass"`
|
||||||
|
KeyFile string `config:"key_file"`
|
||||||
|
UseInsecureCipher bool `config:"use_insecure_cipher"`
|
||||||
|
DisableHashCheck bool `config:"disable_hashcheck"`
|
||||||
|
AskPassword bool `config:"ask_password"`
|
||||||
|
PathOverride string `config:"path_override"`
|
||||||
|
SetModTime bool `config:"set_modtime"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs stores the interface to the remote SFTP files
|
// Fs stores the interface to the remote SFTP files
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string
|
name string
|
||||||
root string
|
root string
|
||||||
features *fs.Features // optional features
|
opt Options // parsed options
|
||||||
config *ssh.ClientConfig
|
features *fs.Features // optional features
|
||||||
host string
|
config *ssh.ClientConfig
|
||||||
port string
|
url string
|
||||||
url string
|
mkdirLock *stringLock
|
||||||
mkdirLock *stringLock
|
cachedHashes *hash.Set
|
||||||
cachedHashes *hash.Set
|
poolMu sync.Mutex
|
||||||
hashcheckDisabled bool
|
pool []*conn
|
||||||
setModtime bool
|
connLimit *rate.Limiter // for limiting number of connections per second
|
||||||
poolMu sync.Mutex
|
|
||||||
pool []*conn
|
|
||||||
connLimit *rate.Limiter // for limiting number of connections per second
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
// Object is a remote SFTP file that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||||
@ -197,7 +216,7 @@ func (f *Fs) sftpConnection() (c *conn, err error) {
|
|||||||
c = &conn{
|
c = &conn{
|
||||||
err: make(chan error, 1),
|
err: make(chan error, 1),
|
||||||
}
|
}
|
||||||
c.sshClient, err = Dial("tcp", f.host+":"+f.port, f.config)
|
c.sshClient, err = Dial("tcp", f.opt.Host+":"+f.opt.Port, f.config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't connect SSH")
|
return nil, errors.Wrap(err, "couldn't connect SSH")
|
||||||
}
|
}
|
||||||
@ -270,35 +289,33 @@ func (f *Fs) putSftpConnection(pc **conn, err error) {
|
|||||||
|
|
||||||
// NewFs creates a new Fs object from the name and root. It connects to
|
// NewFs creates a new Fs object from the name and root. It connects to
|
||||||
// the host specified in the config file.
|
// the host specified in the config file.
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
user := config.FileGet(name, "user")
|
// Parse config into Options struct
|
||||||
host := config.FileGet(name, "host")
|
opt := new(Options)
|
||||||
port := config.FileGet(name, "port")
|
err := configstruct.Set(m, opt)
|
||||||
pass := config.FileGet(name, "pass")
|
if err != nil {
|
||||||
keyFile := config.FileGet(name, "key_file")
|
return nil, err
|
||||||
insecureCipher := config.FileGetBool(name, "use_insecure_cipher")
|
|
||||||
hashcheckDisabled := config.FileGetBool(name, "disable_hashcheck")
|
|
||||||
setModtime := config.FileGetBool(name, "set_modtime", true)
|
|
||||||
if user == "" {
|
|
||||||
user = currentUser
|
|
||||||
}
|
}
|
||||||
if port == "" {
|
if opt.User == "" {
|
||||||
port = "22"
|
opt.User = currentUser
|
||||||
|
}
|
||||||
|
if opt.Port == "" {
|
||||||
|
opt.Port = "22"
|
||||||
}
|
}
|
||||||
sshConfig := &ssh.ClientConfig{
|
sshConfig := &ssh.ClientConfig{
|
||||||
User: user,
|
User: opt.User,
|
||||||
Auth: []ssh.AuthMethod{},
|
Auth: []ssh.AuthMethod{},
|
||||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
Timeout: fs.Config.ConnectTimeout,
|
Timeout: fs.Config.ConnectTimeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
if insecureCipher {
|
if opt.UseInsecureCipher {
|
||||||
sshConfig.Config.SetDefaults()
|
sshConfig.Config.SetDefaults()
|
||||||
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
sshConfig.Config.Ciphers = append(sshConfig.Config.Ciphers, "aes128-cbc")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add ssh agent-auth if no password or file specified
|
// Add ssh agent-auth if no password or file specified
|
||||||
if pass == "" && keyFile == "" {
|
if opt.Pass == "" && opt.KeyFile == "" {
|
||||||
sshAgentClient, _, err := sshagent.New()
|
sshAgentClient, _, err := sshagent.New()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
return nil, errors.Wrap(err, "couldn't connect to ssh-agent")
|
||||||
@ -311,8 +328,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Load key file if specified
|
// Load key file if specified
|
||||||
if keyFile != "" {
|
if opt.KeyFile != "" {
|
||||||
key, err := ioutil.ReadFile(keyFile)
|
key, err := ioutil.ReadFile(opt.KeyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read private key file")
|
return nil, errors.Wrap(err, "failed to read private key file")
|
||||||
}
|
}
|
||||||
@ -324,8 +341,8 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Auth from password if specified
|
// Auth from password if specified
|
||||||
if pass != "" {
|
if opt.Pass != "" {
|
||||||
clearpass, err := obscure.Reveal(pass)
|
clearpass, err := obscure.Reveal(opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -333,23 +350,20 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Ask for password if none was defined and we're allowed to
|
// Ask for password if none was defined and we're allowed to
|
||||||
if pass == "" && *sftpAskPassword {
|
if opt.Pass == "" && opt.AskPassword {
|
||||||
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
_, _ = fmt.Fprint(os.Stderr, "Enter SFTP password: ")
|
||||||
clearpass := config.ReadPassword()
|
clearpass := config.ReadPassword()
|
||||||
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
sshConfig.Auth = append(sshConfig.Auth, ssh.Password(clearpass))
|
||||||
}
|
}
|
||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
config: sshConfig,
|
opt: *opt,
|
||||||
host: host,
|
config: sshConfig,
|
||||||
port: port,
|
url: "sftp://" + opt.User + "@" + opt.Host + ":" + opt.Port + "/" + root,
|
||||||
url: "sftp://" + user + "@" + host + ":" + port + "/" + root,
|
mkdirLock: newStringLock(),
|
||||||
hashcheckDisabled: hashcheckDisabled,
|
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
||||||
setModtime: setModtime,
|
|
||||||
mkdirLock: newStringLock(),
|
|
||||||
connLimit: rate.NewLimiter(rate.Limit(connectionsPerSecond), 1),
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
@ -663,7 +677,7 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
return *f.cachedHashes
|
return *f.cachedHashes
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.hashcheckDisabled {
|
if f.opt.DisableHashCheck {
|
||||||
return hash.Set(hash.None)
|
return hash.Set(hash.None)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -758,8 +772,8 @@ func (o *Object) Hash(r hash.Type) (string, error) {
|
|||||||
session.Stdout = &stdout
|
session.Stdout = &stdout
|
||||||
session.Stderr = &stderr
|
session.Stderr = &stderr
|
||||||
escapedPath := shellEscape(o.path())
|
escapedPath := shellEscape(o.path())
|
||||||
if *sshPathOverride != "" {
|
if o.fs.opt.PathOverride != "" {
|
||||||
escapedPath = shellEscape(path.Join(*sshPathOverride, o.remote))
|
escapedPath = shellEscape(path.Join(o.fs.opt.PathOverride, o.remote))
|
||||||
}
|
}
|
||||||
err = session.Run(hashCmd + " " + escapedPath)
|
err = session.Run(hashCmd + " " + escapedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -852,7 +866,7 @@ func (o *Object) SetModTime(modTime time.Time) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "SetModTime")
|
return errors.Wrap(err, "SetModTime")
|
||||||
}
|
}
|
||||||
if o.fs.setModtime {
|
if o.fs.opt.SetModTime {
|
||||||
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
err = c.sftpClient.Chtimes(o.path(), modTime, modTime)
|
||||||
o.fs.putSftpConnection(&c, err)
|
o.fs.putSftpConnection(&c, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -14,8 +14,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/config/flags"
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -31,11 +31,6 @@ const (
|
|||||||
listChunks = 1000 // chunk size to read directory listings
|
listChunks = 1000 // chunk size to read directory listings
|
||||||
)
|
)
|
||||||
|
|
||||||
// Globals
|
|
||||||
var (
|
|
||||||
chunkSize = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register with Fs
|
// Register with Fs
|
||||||
func init() {
|
func init() {
|
||||||
fs.Register(&fs.RegInfo{
|
fs.Register(&fs.RegInfo{
|
||||||
@ -43,8 +38,9 @@ func init() {
|
|||||||
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "env_auth",
|
Name: "env_auth",
|
||||||
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
Help: "Get swift credentials from environment variables in standard OpenStack form.",
|
||||||
|
Default: false,
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
Value: "false",
|
Value: "false",
|
||||||
@ -107,11 +103,13 @@ func init() {
|
|||||||
Name: "auth_token",
|
Name: "auth_token",
|
||||||
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
|
||||||
}, {
|
}, {
|
||||||
Name: "auth_version",
|
Name: "auth_version",
|
||||||
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
|
||||||
|
Default: 0,
|
||||||
}, {
|
}, {
|
||||||
Name: "endpoint_type",
|
Name: "endpoint_type",
|
||||||
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
|
||||||
|
Default: "public",
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Help: "Public (default, choose this if not sure)",
|
Help: "Public (default, choose this if not sure)",
|
||||||
Value: "public",
|
Value: "public",
|
||||||
@ -122,10 +120,32 @@ func init() {
|
|||||||
Help: "Admin",
|
Help: "Admin",
|
||||||
Value: "admin",
|
Value: "admin",
|
||||||
}},
|
}},
|
||||||
},
|
}, {
|
||||||
},
|
Name: "chunk_size",
|
||||||
|
Help: "Above this size files will be chunked into a _segments container.",
|
||||||
|
Default: fs.SizeSuffix(5 * 1024 * 1024 * 1024),
|
||||||
|
Advanced: true,
|
||||||
|
}},
|
||||||
})
|
})
|
||||||
flags.VarP(&chunkSize, "swift-chunk-size", "", "Above this size files will be chunked into a _segments container.")
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
EnvAuth bool `config:"env_auth"`
|
||||||
|
User string `config:"user"`
|
||||||
|
Key string `config:"key"`
|
||||||
|
Auth string `config:"auth"`
|
||||||
|
UserID string `config:"user_id"`
|
||||||
|
Domain string `config:"domain"`
|
||||||
|
Tenant string `config:"tenant"`
|
||||||
|
TenantID string `config:"tenant_id"`
|
||||||
|
TenantDomain string `config:"tenant_domain"`
|
||||||
|
Region string `config:"region"`
|
||||||
|
StorageURL string `config:"storage_url"`
|
||||||
|
AuthToken string `config:"auth_token"`
|
||||||
|
AuthVersion int `config:"auth_version"`
|
||||||
|
EndpointType string `config:"endpoint_type"`
|
||||||
|
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fs represents a remote swift server
|
// Fs represents a remote swift server
|
||||||
@ -133,6 +153,7 @@ type Fs struct {
|
|||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on if any
|
root string // the path we are working on if any
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
|
opt Options // options for this backend
|
||||||
c *swift.Connection // the connection to the swift server
|
c *swift.Connection // the connection to the swift server
|
||||||
container string // the container we are working on
|
container string // the container we are working on
|
||||||
containerOKMu sync.Mutex // mutex to protect container OK
|
containerOKMu sync.Mutex // mutex to protect container OK
|
||||||
@ -195,27 +216,27 @@ func parsePath(path string) (container, directory string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// swiftConnection makes a connection to swift
|
// swiftConnection makes a connection to swift
|
||||||
func swiftConnection(name string) (*swift.Connection, error) {
|
func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
|
||||||
c := &swift.Connection{
|
c := &swift.Connection{
|
||||||
// Keep these in the same order as the Config for ease of checking
|
// Keep these in the same order as the Config for ease of checking
|
||||||
UserName: config.FileGet(name, "user"),
|
UserName: opt.User,
|
||||||
ApiKey: config.FileGet(name, "key"),
|
ApiKey: opt.Key,
|
||||||
AuthUrl: config.FileGet(name, "auth"),
|
AuthUrl: opt.Auth,
|
||||||
UserId: config.FileGet(name, "user_id"),
|
UserId: opt.UserID,
|
||||||
Domain: config.FileGet(name, "domain"),
|
Domain: opt.Domain,
|
||||||
Tenant: config.FileGet(name, "tenant"),
|
Tenant: opt.Tenant,
|
||||||
TenantId: config.FileGet(name, "tenant_id"),
|
TenantId: opt.TenantID,
|
||||||
TenantDomain: config.FileGet(name, "tenant_domain"),
|
TenantDomain: opt.TenantDomain,
|
||||||
Region: config.FileGet(name, "region"),
|
Region: opt.Region,
|
||||||
StorageUrl: config.FileGet(name, "storage_url"),
|
StorageUrl: opt.StorageURL,
|
||||||
AuthToken: config.FileGet(name, "auth_token"),
|
AuthToken: opt.AuthToken,
|
||||||
AuthVersion: config.FileGetInt(name, "auth_version", 0),
|
AuthVersion: opt.AuthVersion,
|
||||||
EndpointType: swift.EndpointType(config.FileGet(name, "endpoint_type", "public")),
|
EndpointType: swift.EndpointType(opt.EndpointType),
|
||||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||||
Transport: fshttp.NewTransport(fs.Config),
|
Transport: fshttp.NewTransport(fs.Config),
|
||||||
}
|
}
|
||||||
if config.FileGetBool(name, "env_auth", false) {
|
if opt.EnvAuth {
|
||||||
err := c.ApplyEnvironment()
|
err := c.ApplyEnvironment()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to read environment variables")
|
return nil, errors.Wrap(err, "failed to read environment variables")
|
||||||
@ -251,13 +272,14 @@ func swiftConnection(name string) (*swift.Connection, error) {
|
|||||||
//
|
//
|
||||||
// if noCheckContainer is set then the Fs won't check the container
|
// if noCheckContainer is set then the Fs won't check the container
|
||||||
// exists before creating it.
|
// exists before creating it.
|
||||||
func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
|
||||||
container, directory, err := parsePath(root)
|
container, directory, err := parsePath(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
c: c,
|
c: c,
|
||||||
container: container,
|
container: container,
|
||||||
segmentsContainer: container + "_segments",
|
segmentsContainer: container + "_segments",
|
||||||
@ -288,12 +310,19 @@ func NewFsWithConnection(name, root string, c *swift.Connection, noCheckContaine
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs contstructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
c, err := swiftConnection(name)
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return NewFsWithConnection(name, root, c, false)
|
|
||||||
|
c, err := swiftConnection(opt, name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewFsWithConnection(opt, name, root, c, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an Object from a path
|
// Return an Object from a path
|
||||||
@ -871,7 +900,7 @@ func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64,
|
|||||||
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
n := int64(chunkSize)
|
n := int64(o.fs.opt.ChunkSize)
|
||||||
if size != -1 {
|
if size != -1 {
|
||||||
n = min(left, n)
|
n = min(left, n)
|
||||||
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
|
||||||
@ -921,7 +950,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
contentType := fs.MimeType(src)
|
contentType := fs.MimeType(src)
|
||||||
headers := m.ObjectHeaders()
|
headers := m.ObjectHeaders()
|
||||||
uniquePrefix := ""
|
uniquePrefix := ""
|
||||||
if size > int64(chunkSize) || size == -1 {
|
if size > int64(o.fs.opt.ChunkSize) || size == -1 {
|
||||||
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -32,6 +32,8 @@ import (
|
|||||||
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
"github.com/ncw/rclone/backend/webdav/odrvcookie"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fserrors"
|
"github.com/ncw/rclone/fs/fserrors"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
@ -56,15 +58,14 @@ func init() {
|
|||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: "url",
|
Name: "url",
|
||||||
Help: "URL of http host to connect to",
|
Help: "URL of http host to connect to",
|
||||||
Optional: false,
|
Required: true,
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "https://example.com",
|
Value: "https://example.com",
|
||||||
Help: "Connect to example.com",
|
Help: "Connect to example.com",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "vendor",
|
Name: "vendor",
|
||||||
Help: "Name of the Webdav site/service/software you are using",
|
Help: "Name of the Webdav site/service/software you are using",
|
||||||
Optional: false,
|
|
||||||
Examples: []fs.OptionExample{{
|
Examples: []fs.OptionExample{{
|
||||||
Value: "nextcloud",
|
Value: "nextcloud",
|
||||||
Help: "Nextcloud",
|
Help: "Nextcloud",
|
||||||
@ -79,34 +80,37 @@ func init() {
|
|||||||
Help: "Other site/service or software",
|
Help: "Other site/service or software",
|
||||||
}},
|
}},
|
||||||
}, {
|
}, {
|
||||||
Name: "user",
|
Name: "user",
|
||||||
Help: "User name",
|
Help: "User name",
|
||||||
Optional: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: "pass",
|
Name: "pass",
|
||||||
Help: "Password.",
|
Help: "Password.",
|
||||||
Optional: true,
|
|
||||||
IsPassword: true,
|
IsPassword: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "bearer_token",
|
Name: "bearer_token",
|
||||||
Help: "Bearer token instead of user/pass (eg a Macaroon)",
|
Help: "Bearer token instead of user/pass (eg a Macaroon)",
|
||||||
Optional: true,
|
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
URL string `config:"url"`
|
||||||
|
Vendor string `config:"vendor"`
|
||||||
|
User string `config:"user"`
|
||||||
|
Pass string `config:"pass"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote webdav
|
// Fs represents a remote webdav
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string // name of this remote
|
name string // name of this remote
|
||||||
root string // the path we are working on
|
root string // the path we are working on
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
endpoint *url.URL // URL of the host
|
endpoint *url.URL // URL of the host
|
||||||
endpointURL string // endpoint as a string
|
endpointURL string // endpoint as a string
|
||||||
srv *rest.Client // the connection to the one drive server
|
srv *rest.Client // the connection to the one drive server
|
||||||
pacer *pacer.Pacer // pacer for API calls
|
pacer *pacer.Pacer // pacer for API calls
|
||||||
user string // username
|
|
||||||
pass string // password
|
|
||||||
vendor string // name of the vendor
|
|
||||||
precision time.Duration // mod time precision
|
precision time.Duration // mod time precision
|
||||||
canStream bool // set if can stream
|
canStream bool // set if can stream
|
||||||
useOCMtime bool // set if can use X-OC-Mtime
|
useOCMtime bool // set if can use X-OC-Mtime
|
||||||
@ -264,10 +268,12 @@ func (o *Object) filePath() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
endpoint := config.FileGet(name, "url")
|
// Parse config into Options struct
|
||||||
if !strings.HasSuffix(endpoint, "/") {
|
opt := new(Options)
|
||||||
endpoint += "/"
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
rootIsDir := strings.HasSuffix(root, "/")
|
rootIsDir := strings.HasSuffix(root, "/")
|
||||||
root = strings.Trim(root, "/")
|
root = strings.Trim(root, "/")
|
||||||
@ -275,17 +281,23 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
user := config.FileGet(name, "user")
|
user := config.FileGet(name, "user")
|
||||||
pass := config.FileGet(name, "pass")
|
pass := config.FileGet(name, "pass")
|
||||||
bearerToken := config.FileGet(name, "bearer_token")
|
bearerToken := config.FileGet(name, "bearer_token")
|
||||||
if pass != "" {
|
if !strings.HasSuffix(opt.URL, "/") {
|
||||||
|
opt.URL += "/"
|
||||||
|
}
|
||||||
|
if opt.Pass != "" {
|
||||||
var err error
|
var err error
|
||||||
pass, err = obscure.Reveal(pass)
|
opt.Pass, err = obscure.Reveal(opt.Pass)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "couldn't decrypt password")
|
return nil, errors.Wrap(err, "couldn't decrypt password")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
vendor := config.FileGet(name, "vendor")
|
if opt.Vendor == "" {
|
||||||
|
opt.Vendor = "other"
|
||||||
|
}
|
||||||
|
root = strings.Trim(root, "/")
|
||||||
|
|
||||||
// Parse the endpoint
|
// Parse the endpoint
|
||||||
u, err := url.Parse(endpoint)
|
u, err := url.Parse(opt.URL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -293,24 +305,23 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
root: root,
|
root: root,
|
||||||
|
opt: *opt,
|
||||||
endpoint: u,
|
endpoint: u,
|
||||||
endpointURL: u.String(),
|
endpointURL: u.String(),
|
||||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetRoot(u.String()),
|
||||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||||
user: user,
|
|
||||||
pass: pass,
|
|
||||||
precision: fs.ModTimeNotSupported,
|
precision: fs.ModTimeNotSupported,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CanHaveEmptyDirectories: true,
|
CanHaveEmptyDirectories: true,
|
||||||
}).Fill(f)
|
}).Fill(f)
|
||||||
if user != "" || pass != "" {
|
if user != "" || pass != "" {
|
||||||
f.srv.SetUserPass(user, pass)
|
f.srv.SetUserPass(opt.User, opt.Pass)
|
||||||
} else if bearerToken != "" {
|
} else if bearerToken != "" {
|
||||||
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
f.srv.SetHeader("Authorization", "BEARER "+bearerToken)
|
||||||
}
|
}
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
err = f.setQuirks(vendor)
|
err = f.setQuirks(opt.Vendor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -339,10 +350,6 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
|
|
||||||
// setQuirks adjusts the Fs for the vendor passed in
|
// setQuirks adjusts the Fs for the vendor passed in
|
||||||
func (f *Fs) setQuirks(vendor string) error {
|
func (f *Fs) setQuirks(vendor string) error {
|
||||||
if vendor == "" {
|
|
||||||
vendor = "other"
|
|
||||||
}
|
|
||||||
f.vendor = vendor
|
|
||||||
switch vendor {
|
switch vendor {
|
||||||
case "owncloud":
|
case "owncloud":
|
||||||
f.canStream = true
|
f.canStream = true
|
||||||
@ -355,7 +362,7 @@ func (f *Fs) setQuirks(vendor string) error {
|
|||||||
// To mount sharepoint, two Cookies are required
|
// To mount sharepoint, two Cookies are required
|
||||||
// They have to be set instead of BasicAuth
|
// They have to be set instead of BasicAuth
|
||||||
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
|
f.srv.RemoveHeader("Authorization") // We don't need this Header if using cookies
|
||||||
spCk := odrvcookie.New(f.user, f.pass, f.endpointURL)
|
spCk := odrvcookie.New(f.opt.User, f.opt.Pass, f.endpointURL)
|
||||||
spCookies, err := spCk.Cookies()
|
spCookies, err := spCk.Cookies()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -16,6 +16,8 @@ import (
|
|||||||
yandex "github.com/ncw/rclone/backend/yandex/api"
|
yandex "github.com/ncw/rclone/backend/yandex/api"
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/config/obscure"
|
"github.com/ncw/rclone/fs/config/obscure"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
@ -51,29 +53,35 @@ func init() {
|
|||||||
Name: "yandex",
|
Name: "yandex",
|
||||||
Description: "Yandex Disk",
|
Description: "Yandex Disk",
|
||||||
NewFs: NewFs,
|
NewFs: NewFs,
|
||||||
Config: func(name string) {
|
Config: func(name string, m configmap.Mapper) {
|
||||||
err := oauthutil.Config("yandex", name, oauthConfig)
|
err := oauthutil.Config("yandex", name, m, oauthConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to configure token: %v", err)
|
log.Fatalf("Failed to configure token: %v", err)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Options: []fs.Option{{
|
Options: []fs.Option{{
|
||||||
Name: config.ConfigClientID,
|
Name: config.ConfigClientID,
|
||||||
Help: "Yandex Client Id - leave blank normally.",
|
Help: "Yandex Client Id\nLeave blank normally.",
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigClientSecret,
|
Name: config.ConfigClientSecret,
|
||||||
Help: "Yandex Client Secret - leave blank normally.",
|
Help: "Yandex Client Secret\nLeave blank normally.",
|
||||||
}},
|
}},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options defines the configuration for this backend
|
||||||
|
type Options struct {
|
||||||
|
Token string `config:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
// Fs represents a remote yandex
|
// Fs represents a remote yandex
|
||||||
type Fs struct {
|
type Fs struct {
|
||||||
name string
|
name string
|
||||||
root string //root path
|
root string // root path
|
||||||
|
opt Options // parsed options
|
||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
yd *yandex.Client // client for rest api
|
yd *yandex.Client // client for rest api
|
||||||
diskRoot string //root path with "disk:/" container name
|
diskRoot string // root path with "disk:/" container name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a swift object
|
// Object describes a swift object
|
||||||
@ -109,11 +117,9 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// read access token from ConfigFile string
|
// read access token from ConfigFile string
|
||||||
func getAccessToken(name string) (*oauth2.Token, error) {
|
func getAccessToken(opt *Options) (*oauth2.Token, error) {
|
||||||
// Read the token from the config file
|
|
||||||
tokenConfig := config.FileGet(name, "token")
|
|
||||||
//Get access token from config string
|
//Get access token from config string
|
||||||
decoder := json.NewDecoder(strings.NewReader(tokenConfig))
|
decoder := json.NewDecoder(strings.NewReader(opt.Token))
|
||||||
var result *oauth2.Token
|
var result *oauth2.Token
|
||||||
err := decoder.Decode(&result)
|
err := decoder.Decode(&result)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -123,9 +129,16 @@ func getAccessToken(name string) (*oauth2.Token, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewFs constructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
// Parse config into Options struct
|
||||||
|
opt := new(Options)
|
||||||
|
err := configstruct.Set(m, opt)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
//read access token from config
|
//read access token from config
|
||||||
token, err := getAccessToken(name)
|
token, err := getAccessToken(opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -135,6 +148,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
|||||||
|
|
||||||
f := &Fs{
|
f := &Fs{
|
||||||
name: name,
|
name: name,
|
||||||
|
opt: *opt,
|
||||||
yd: yandexDisk,
|
yd: yandexDisk,
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
|
@ -8,8 +8,6 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/backend/cache"
|
"github.com/ncw/rclone/backend/cache"
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
"github.com/ncw/rclone/fs"
|
|
||||||
"github.com/ncw/rclone/fs/config"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@ -27,17 +25,6 @@ Print cache stats for a remote in JSON format
|
|||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
|
|
||||||
_, configName, _, err := fs.ParseRemote(args[0])
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf("cachestats", "%s", err.Error())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !config.FileGetBool(configName, "read_only", false) {
|
|
||||||
config.FileSet(configName, "read_only", "true")
|
|
||||||
defer config.FileDeleteKey(configName, "read_only")
|
|
||||||
}
|
|
||||||
|
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
var fsCache *cache.Fs
|
var fsCache *cache.Fs
|
||||||
|
53
cmd/cmd.go
53
cmd/cmd.go
@ -16,6 +16,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -151,12 +152,12 @@ func ShowVersion() {
|
|||||||
// It returns a string with the file name if points to a file
|
// It returns a string with the file name if points to a file
|
||||||
// otherwise "".
|
// otherwise "".
|
||||||
func NewFsFile(remote string) (fs.Fs, string) {
|
func NewFsFile(remote string) (fs.Fs, string) {
|
||||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
_, _, fsPath, err := fs.ParseRemote(remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.CountError(err)
|
fs.CountError(err)
|
||||||
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
log.Fatalf("Failed to create file system for %q: %v", remote, err)
|
||||||
}
|
}
|
||||||
f, err := fsInfo.NewFs(configName, fsPath)
|
f, err := fs.NewFs(remote)
|
||||||
switch err {
|
switch err {
|
||||||
case fs.ErrorIsFile:
|
case fs.ErrorIsFile:
|
||||||
return f, path.Base(fsPath)
|
return f, path.Base(fsPath)
|
||||||
@ -496,3 +497,51 @@ func resolveExitCode(err error) {
|
|||||||
os.Exit(exitCodeUsageError)
|
os.Exit(exitCodeUsageError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddBackendFlags creates flags for all the backend options
|
||||||
|
func AddBackendFlags() {
|
||||||
|
for _, fsInfo := range fs.Registry {
|
||||||
|
done := map[string]struct{}{}
|
||||||
|
for i := range fsInfo.Options {
|
||||||
|
opt := &fsInfo.Options[i]
|
||||||
|
// Skip if done already (eg with Provider options)
|
||||||
|
if _, doneAlready := done[opt.Name]; doneAlready {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
done[opt.Name] = struct{}{}
|
||||||
|
// Make a flag from each option
|
||||||
|
name := strings.Replace(opt.Name, "_", "-", -1) // convert snake_case to kebab-case
|
||||||
|
if !opt.NoPrefix {
|
||||||
|
name = fsInfo.Prefix + "-" + name
|
||||||
|
}
|
||||||
|
found := pflag.CommandLine.Lookup(name) != nil
|
||||||
|
if !found {
|
||||||
|
// Take first line of help only
|
||||||
|
help := strings.TrimSpace(opt.Help)
|
||||||
|
if nl := strings.IndexRune(help, '\n'); nl >= 0 {
|
||||||
|
help = help[:nl]
|
||||||
|
}
|
||||||
|
help = strings.TrimSpace(help)
|
||||||
|
flag := pflag.CommandLine.VarPF(opt, name, string(opt.ShortOpt), help)
|
||||||
|
if _, isBool := opt.Value.(bool); isBool {
|
||||||
|
flag.NoOptDefVal = "true"
|
||||||
|
}
|
||||||
|
// Hide on the command line if requested
|
||||||
|
if opt.Hide&fs.OptionHideCommandLine != 0 {
|
||||||
|
flag.Hidden = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fs.Errorf(nil, "Not adding duplicate flag --%s", name)
|
||||||
|
}
|
||||||
|
//flag.Hidden = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Main runs rclone interpreting flags and commands out of os.Args
|
||||||
|
func Main() {
|
||||||
|
AddBackendFlags()
|
||||||
|
if err := Root.Execute(); err != nil {
|
||||||
|
log.Fatalf("Fatal error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -40,14 +40,14 @@ use it like this
|
|||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(2, 11, command, args)
|
cmd.CheckArgs(2, 11, command, args)
|
||||||
cmd.Run(false, false, command, func() error {
|
cmd.Run(false, false, command, func() error {
|
||||||
fsInfo, configName, _, err := fs.ParseRemote(args[0])
|
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fsInfo.Name != "crypt" {
|
if fsInfo.Name != "crypt" {
|
||||||
return errors.New("The remote needs to be of type \"crypt\"")
|
return errors.New("The remote needs to be of type \"crypt\"")
|
||||||
}
|
}
|
||||||
cipher, err := crypt.NewCipher(configName)
|
cipher, err := crypt.NewCipher(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -102,14 +102,14 @@ can be processed line by line as each item is written one to a line.
|
|||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
var cipher crypt.Cipher
|
var cipher crypt.Cipher
|
||||||
if showEncrypted {
|
if showEncrypted {
|
||||||
fsInfo, configName, _, err := fs.ParseRemote(args[0])
|
fsInfo, _, _, config, err := fs.ConfigFs(args[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf(err.Error())
|
log.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
if fsInfo.Name != "crypt" {
|
if fsInfo.Name != "crypt" {
|
||||||
log.Fatalf("The remote needs to be of type \"crypt\"")
|
log.Fatalf("The remote needs to be of type \"crypt\"")
|
||||||
}
|
}
|
||||||
cipher, err = crypt.NewCipher(configName)
|
cipher, err = crypt.NewCipher(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf(err.Error())
|
log.Fatalf(err.Error())
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,14 @@ var (
|
|||||||
// implementation from the fs
|
// implementation from the fs
|
||||||
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
|
ConfigFileGet = func(section, key string) (string, bool) { return "", false }
|
||||||
|
|
||||||
|
// Set a value into the config file
|
||||||
|
//
|
||||||
|
// This is a function pointer to decouple the config
|
||||||
|
// implementation from the fs
|
||||||
|
ConfigFileSet = func(section, key, value string) {
|
||||||
|
Errorf(nil, "No config handler to set %q = %q in section %q of the config file", key, value, section)
|
||||||
|
}
|
||||||
|
|
||||||
// CountError counts an error. If any errors have been
|
// CountError counts an error. If any errors have been
|
||||||
// counted then it will exit with a non zero error code.
|
// counted then it will exit with a non zero error code.
|
||||||
//
|
//
|
||||||
|
@ -81,8 +81,9 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// Set the function pointer up in fs
|
// Set the function pointers up in fs
|
||||||
fs.ConfigFileGet = FileGetFlag
|
fs.ConfigFileGet = FileGetFlag
|
||||||
|
fs.ConfigFileSet = FileSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func getConfigData() *goconfig.ConfigFile {
|
func getConfigData() *goconfig.ConfigFile {
|
||||||
@ -705,7 +706,8 @@ func RemoteConfig(name string) {
|
|||||||
fmt.Printf("Remote config\n")
|
fmt.Printf("Remote config\n")
|
||||||
f := MustFindByName(name)
|
f := MustFindByName(name)
|
||||||
if f.Config != nil {
|
if f.Config != nil {
|
||||||
f.Config(name)
|
m := fs.ConfigMap(f, name)
|
||||||
|
f.Config(name, m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -745,7 +747,7 @@ func ChooseOption(o *fs.Option, name string) string {
|
|||||||
fmt.Println(o.Help)
|
fmt.Println(o.Help)
|
||||||
if o.IsPassword {
|
if o.IsPassword {
|
||||||
actions := []string{"yYes type in my own password", "gGenerate random password"}
|
actions := []string{"yYes type in my own password", "gGenerate random password"}
|
||||||
if o.Optional {
|
if !o.Required {
|
||||||
actions = append(actions, "nNo leave this optional password blank")
|
actions = append(actions, "nNo leave this optional password blank")
|
||||||
}
|
}
|
||||||
var password string
|
var password string
|
||||||
@ -1089,8 +1091,8 @@ func Authorize(args []string) {
|
|||||||
log.Fatalf("Invalid number of arguments: %d", len(args))
|
log.Fatalf("Invalid number of arguments: %d", len(args))
|
||||||
}
|
}
|
||||||
newType := args[0]
|
newType := args[0]
|
||||||
fs := fs.MustFind(newType)
|
f := fs.MustFind(newType)
|
||||||
if fs.Config == nil {
|
if f.Config == nil {
|
||||||
log.Fatalf("Can't authorize fs %q", newType)
|
log.Fatalf("Can't authorize fs %q", newType)
|
||||||
}
|
}
|
||||||
// Name used for temporary fs
|
// Name used for temporary fs
|
||||||
@ -1105,20 +1107,15 @@ func Authorize(args []string) {
|
|||||||
getConfigData().SetValue(name, ConfigClientID, args[1])
|
getConfigData().SetValue(name, ConfigClientID, args[1])
|
||||||
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
getConfigData().SetValue(name, ConfigClientSecret, args[2])
|
||||||
}
|
}
|
||||||
fs.Config(name)
|
m := fs.ConfigMap(f, name)
|
||||||
|
f.Config(name, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileGetFlag gets the config key under section returning the
|
// FileGetFlag gets the config key under section returning the
|
||||||
// the value and true if found and or ("", false) otherwise
|
// the value and true if found and or ("", false) otherwise
|
||||||
//
|
|
||||||
// It looks up defaults in the environment if they are present
|
|
||||||
func FileGetFlag(section, key string) (string, bool) {
|
func FileGetFlag(section, key string) (string, bool) {
|
||||||
newValue, err := getConfigData().GetValue(section, key)
|
newValue, err := getConfigData().GetValue(section, key)
|
||||||
if err == nil {
|
return newValue, err == nil
|
||||||
return newValue, true
|
|
||||||
}
|
|
||||||
envKey := fs.ConfigToEnv(section, key)
|
|
||||||
return os.LookupEnv(envKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileGet gets the config key under section returning the
|
// FileGet gets the config key under section returning the
|
||||||
@ -1134,46 +1131,14 @@ func FileGet(section, key string, defaultVal ...string) string {
|
|||||||
return getConfigData().MustValue(section, key, defaultVal...)
|
return getConfigData().MustValue(section, key, defaultVal...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileGetBool gets the config key under section returning the
|
|
||||||
// default or false if not set.
|
|
||||||
//
|
|
||||||
// It looks up defaults in the environment if they are present
|
|
||||||
func FileGetBool(section, key string, defaultVal ...bool) bool {
|
|
||||||
envKey := fs.ConfigToEnv(section, key)
|
|
||||||
newValue, found := os.LookupEnv(envKey)
|
|
||||||
if found {
|
|
||||||
newBool, err := strconv.ParseBool(newValue)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Couldn't parse %q into bool - ignoring: %v", envKey, err)
|
|
||||||
} else {
|
|
||||||
defaultVal = []bool{newBool}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return getConfigData().MustBool(section, key, defaultVal...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileGetInt gets the config key under section returning the
|
|
||||||
// default or 0 if not set.
|
|
||||||
//
|
|
||||||
// It looks up defaults in the environment if they are present
|
|
||||||
func FileGetInt(section, key string, defaultVal ...int) int {
|
|
||||||
envKey := fs.ConfigToEnv(section, key)
|
|
||||||
newValue, found := os.LookupEnv(envKey)
|
|
||||||
if found {
|
|
||||||
newInt, err := strconv.Atoi(newValue)
|
|
||||||
if err != nil {
|
|
||||||
fs.Errorf(nil, "Couldn't parse %q into int - ignoring: %v", envKey, err)
|
|
||||||
} else {
|
|
||||||
defaultVal = []int{newInt}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return getConfigData().MustInt(section, key, defaultVal...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileSet sets the key in section to value. It doesn't save
|
// FileSet sets the key in section to value. It doesn't save
|
||||||
// the config file.
|
// the config file.
|
||||||
func FileSet(section, key, value string) {
|
func FileSet(section, key, value string) {
|
||||||
getConfigData().SetValue(section, key, value)
|
if value != "" {
|
||||||
|
getConfigData().SetValue(section, key, value)
|
||||||
|
} else {
|
||||||
|
FileDeleteKey(section, key)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileDeleteKey deletes the config key in the config file.
|
// FileDeleteKey deletes the config key in the config file.
|
||||||
|
208
fs/fs.go
208
fs/fs.go
@ -2,6 +2,7 @@
|
|||||||
package fs
|
package fs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
@ -13,6 +14,8 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
|
"github.com/ncw/rclone/fs/config/configstruct"
|
||||||
"github.com/ncw/rclone/fs/fspath"
|
"github.com/ncw/rclone/fs/fspath"
|
||||||
"github.com/ncw/rclone/fs/hash"
|
"github.com/ncw/rclone/fs/hash"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -68,24 +71,87 @@ type RegInfo struct {
|
|||||||
Name string
|
Name string
|
||||||
// Description of this fs - defaults to Name
|
// Description of this fs - defaults to Name
|
||||||
Description string
|
Description string
|
||||||
|
// Prefix for command line flags for this fs - defaults to Name if not set
|
||||||
|
Prefix string
|
||||||
// Create a new file system. If root refers to an existing
|
// Create a new file system. If root refers to an existing
|
||||||
// object, then it should return a Fs which which points to
|
// object, then it should return a Fs which which points to
|
||||||
// the parent of that object and ErrorIsFile.
|
// the parent of that object and ErrorIsFile.
|
||||||
NewFs func(name string, root string) (Fs, error) `json:"-"`
|
NewFs func(name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
|
||||||
// Function to call to help with config
|
// Function to call to help with config
|
||||||
Config func(string) `json:"-"`
|
Config func(name string, config configmap.Mapper) `json:"-"`
|
||||||
// Options for the Fs configuration
|
// Options for the Fs configuration
|
||||||
Options []Option
|
Options Options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Options is a slice of configuration Option for a backend
|
||||||
|
type Options []Option
|
||||||
|
|
||||||
|
// Set the default values for the options
|
||||||
|
func (os Options) setValues() {
|
||||||
|
for i := range os {
|
||||||
|
o := &os[i]
|
||||||
|
if o.Default == nil {
|
||||||
|
o.Default = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptionVisibility controls whether the options are visible in the
|
||||||
|
// configurator or the command line.
|
||||||
|
type OptionVisibility byte
|
||||||
|
|
||||||
|
// Constants Option.Hide
|
||||||
|
const (
|
||||||
|
OptionHideCommandLine OptionVisibility = 1 << iota
|
||||||
|
OptionHideConfigurator
|
||||||
|
OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator
|
||||||
|
)
|
||||||
|
|
||||||
// Option is describes an option for the config wizard
|
// Option is describes an option for the config wizard
|
||||||
|
//
|
||||||
|
// This also describes command line options and environment variables
|
||||||
type Option struct {
|
type Option struct {
|
||||||
Name string
|
Name string // name of the option in snake_case
|
||||||
Help string
|
Help string // Help, the first line only is used for the command line help
|
||||||
Provider string
|
Provider string // Set to filter on provider
|
||||||
Optional bool
|
Default interface{} // default value, nil => ""
|
||||||
IsPassword bool
|
Value interface{} // value to be set by flags
|
||||||
Examples OptionExamples `json:",omitempty"`
|
Examples OptionExamples `json:",omitempty"` // config examples
|
||||||
|
ShortOpt string // the short option for this if required
|
||||||
|
Hide OptionVisibility // set this to hide the config from the configurator or the command line
|
||||||
|
Required bool // this option is required
|
||||||
|
IsPassword bool // set if the option is a password
|
||||||
|
NoPrefix bool // set if the option for this should not use the backend prefix
|
||||||
|
Advanced bool // set if this is an advanced config option
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gets the current current value which is the default if not set
|
||||||
|
func (o *Option) value() interface{} {
|
||||||
|
val := o.Value
|
||||||
|
if val == nil {
|
||||||
|
val = o.Default
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// String turns Option into a string
|
||||||
|
func (o *Option) String() string {
|
||||||
|
return fmt.Sprint(o.value())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a Option from a string
|
||||||
|
func (o *Option) Set(s string) (err error) {
|
||||||
|
newValue, err := configstruct.StringToInterface(o.value(), s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
o.Value = newValue
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type of the value
|
||||||
|
func (o *Option) Type() string {
|
||||||
|
return reflect.TypeOf(o.value()).Name()
|
||||||
}
|
}
|
||||||
|
|
||||||
// OptionExamples is a slice of examples
|
// OptionExamples is a slice of examples
|
||||||
@ -114,6 +180,10 @@ type OptionExample struct {
|
|||||||
//
|
//
|
||||||
// Fs modules should use this in an init() function
|
// Fs modules should use this in an init() function
|
||||||
func Register(info *RegInfo) {
|
func Register(info *RegInfo) {
|
||||||
|
info.Options.setValues()
|
||||||
|
if info.Prefix == "" {
|
||||||
|
info.Prefix = info.Name
|
||||||
|
}
|
||||||
Registry = append(Registry, info)
|
Registry = append(Registry, info)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,7 +862,8 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e
|
|||||||
var fsName string
|
var fsName string
|
||||||
var ok bool
|
var ok bool
|
||||||
if configName != "" {
|
if configName != "" {
|
||||||
fsName, ok = ConfigFileGet(configName, "type")
|
m := ConfigMap(nil, configName)
|
||||||
|
fsName, ok = m.Get("type")
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, "", "", ErrorNotFoundInConfigFile
|
return nil, "", "", ErrorNotFoundInConfigFile
|
||||||
}
|
}
|
||||||
@ -804,6 +875,119 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e
|
|||||||
return fsInfo, configName, fsPath, err
|
return fsInfo, configName, fsPath, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name
|
||||||
|
type configEnvVars string
|
||||||
|
|
||||||
|
// Get a config item from the environment variables if possible
|
||||||
|
func (configName configEnvVars) Get(key string) (value string, ok bool) {
|
||||||
|
return os.LookupEnv(ConfigToEnv(string(configName), key))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A configmap.Getter to read from the environment RCLONE_option_name
|
||||||
|
type optionEnvVars string
|
||||||
|
|
||||||
|
// Get a config item from the option environment variables if possible
|
||||||
|
func (prefix optionEnvVars) Get(key string) (value string, ok bool) {
|
||||||
|
return os.LookupEnv(OptionToEnv(string(prefix) + "-" + key))
|
||||||
|
}
|
||||||
|
|
||||||
|
// A configmap.Getter to read either the default value or the set
|
||||||
|
// value from the RegInfo.Options
|
||||||
|
type regInfoValues struct {
|
||||||
|
fsInfo *RegInfo
|
||||||
|
useDefault bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// override the values in configMap with the either the flag values or
|
||||||
|
// the default values
|
||||||
|
func (r *regInfoValues) Get(key string) (value string, ok bool) {
|
||||||
|
for i := range r.fsInfo.Options {
|
||||||
|
o := &r.fsInfo.Options[i]
|
||||||
|
if o.Name == key {
|
||||||
|
if r.useDefault || o.Value != nil {
|
||||||
|
return o.String(), true
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// A configmap.Setter to read from the config file
|
||||||
|
type setConfigFile string
|
||||||
|
|
||||||
|
// Set a config item into the config file
|
||||||
|
func (section setConfigFile) Set(key, value string) {
|
||||||
|
Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section)
|
||||||
|
ConfigFileSet(string(section), key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A configmap.Getter to read from the config file
|
||||||
|
type getConfigFile string
|
||||||
|
|
||||||
|
// Get a config item from the config file
|
||||||
|
func (section getConfigFile) Get(key string) (value string, ok bool) {
|
||||||
|
value, ok = ConfigFileGet(string(section), key)
|
||||||
|
// Ignore empty lines in the config file
|
||||||
|
if value == "" {
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigMap creates a configmap.Map from the *RegInfo and the
|
||||||
|
// configName passed in.
|
||||||
|
//
|
||||||
|
// If fsInfo is nil then the returned configmap.Map should only be
|
||||||
|
// used for reading non backend specific parameters, such as "type".
|
||||||
|
func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) {
|
||||||
|
// Create the config
|
||||||
|
config = configmap.New()
|
||||||
|
|
||||||
|
// Read the config, more specific to least specific
|
||||||
|
|
||||||
|
// flag values
|
||||||
|
if fsInfo != nil {
|
||||||
|
config.AddGetter(®InfoValues{fsInfo, false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// remote specific environment vars
|
||||||
|
config.AddGetter(configEnvVars(configName))
|
||||||
|
|
||||||
|
// backend specific environment vars
|
||||||
|
if fsInfo != nil {
|
||||||
|
config.AddGetter(optionEnvVars(fsInfo.Prefix))
|
||||||
|
}
|
||||||
|
|
||||||
|
// config file
|
||||||
|
config.AddGetter(getConfigFile(configName))
|
||||||
|
|
||||||
|
// default values
|
||||||
|
if fsInfo != nil {
|
||||||
|
config.AddGetter(®InfoValues{fsInfo, true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set Config
|
||||||
|
config.AddSetter(setConfigFile(configName))
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigFs makes the config for calling NewFs with.
|
||||||
|
//
|
||||||
|
// It parses the path which is of the form remote:path
|
||||||
|
//
|
||||||
|
// Remotes are looked up in the config file. If the remote isn't
|
||||||
|
// found then NotFoundInConfigFile will be returned.
|
||||||
|
func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) {
|
||||||
|
// Parse the remote path
|
||||||
|
fsInfo, configName, fsPath, err = ParseRemote(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
config = ConfigMap(fsInfo, configName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// NewFs makes a new Fs object from the path
|
// NewFs makes a new Fs object from the path
|
||||||
//
|
//
|
||||||
// The path is of the form remote:path
|
// The path is of the form remote:path
|
||||||
@ -814,11 +998,11 @@ func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err e
|
|||||||
// On Windows avoid single character remote names as they can be mixed
|
// On Windows avoid single character remote names as they can be mixed
|
||||||
// up with drive letters.
|
// up with drive letters.
|
||||||
func NewFs(path string) (Fs, error) {
|
func NewFs(path string) (Fs, error) {
|
||||||
fsInfo, configName, fsPath, err := ParseRemote(path)
|
fsInfo, configName, fsPath, config, err := ConfigFs(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return fsInfo.NewFs(configName, fsPath)
|
return fsInfo.NewFs(configName, fsPath, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
|
// TemporaryLocalFs creates a local FS in the OS's temporary directory.
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/spf13/pflag"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -52,3 +53,20 @@ func TestFeaturesDisableList(t *testing.T) {
|
|||||||
assert.False(t, ft.CaseInsensitive)
|
assert.False(t, ft.CaseInsensitive)
|
||||||
assert.False(t, ft.DuplicateFiles)
|
assert.False(t, ft.DuplicateFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check it satisfies the interface
|
||||||
|
var _ pflag.Value = (*Option)(nil)
|
||||||
|
|
||||||
|
func TestOption(t *testing.T) {
|
||||||
|
d := &Option{
|
||||||
|
Name: "potato",
|
||||||
|
Value: SizeSuffix(17 << 20),
|
||||||
|
}
|
||||||
|
assert.Equal(t, "17M", d.String())
|
||||||
|
assert.Equal(t, "SizeSuffix", d.Type())
|
||||||
|
err := d.Set("18M")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, SizeSuffix(18<<20), d.Value)
|
||||||
|
err = d.Set("sdfsdf")
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
|
|
||||||
"github.com/ncw/rclone/fs"
|
"github.com/ncw/rclone/fs"
|
||||||
"github.com/ncw/rclone/fs/config"
|
"github.com/ncw/rclone/fs/config"
|
||||||
|
"github.com/ncw/rclone/fs/config/configmap"
|
||||||
"github.com/ncw/rclone/fs/fshttp"
|
"github.com/ncw/rclone/fs/fshttp"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/skratchdot/open-golang/open"
|
"github.com/skratchdot/open-golang/open"
|
||||||
@ -85,9 +86,9 @@ type oldToken struct {
|
|||||||
|
|
||||||
// GetToken returns the token saved in the config file under
|
// GetToken returns the token saved in the config file under
|
||||||
// section name.
|
// section name.
|
||||||
func GetToken(name string) (*oauth2.Token, error) {
|
func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) {
|
||||||
tokenString := config.FileGet(name, config.ConfigToken)
|
tokenString, ok := m.Get(config.ConfigToken)
|
||||||
if tokenString == "" {
|
if !ok || tokenString == "" {
|
||||||
return nil, errors.New("empty token found - please run rclone config again")
|
return nil, errors.New("empty token found - please run rclone config again")
|
||||||
}
|
}
|
||||||
token := new(oauth2.Token)
|
token := new(oauth2.Token)
|
||||||
@ -110,7 +111,7 @@ func GetToken(name string) (*oauth2.Token, error) {
|
|||||||
token.RefreshToken = oldtoken.RefreshToken
|
token.RefreshToken = oldtoken.RefreshToken
|
||||||
token.Expiry = oldtoken.Expiry
|
token.Expiry = oldtoken.Expiry
|
||||||
// Save new format in config file
|
// Save new format in config file
|
||||||
err = PutToken(name, token, false)
|
err = PutToken(name, m, token, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -120,14 +121,14 @@ func GetToken(name string) (*oauth2.Token, error) {
|
|||||||
// PutToken stores the token in the config file
|
// PutToken stores the token in the config file
|
||||||
//
|
//
|
||||||
// This saves the config file if it changes
|
// This saves the config file if it changes
|
||||||
func PutToken(name string, token *oauth2.Token, newSection bool) error {
|
func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error {
|
||||||
tokenBytes, err := json.Marshal(token)
|
tokenBytes, err := json.Marshal(token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tokenString := string(tokenBytes)
|
tokenString := string(tokenBytes)
|
||||||
old := config.FileGet(name, config.ConfigToken)
|
old, ok := m.Get(config.ConfigToken)
|
||||||
if tokenString != old {
|
if !ok || tokenString != old {
|
||||||
err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
|
err = config.SetValueAndSave(name, config.ConfigToken, tokenString)
|
||||||
if newSection && err != nil {
|
if newSection && err != nil {
|
||||||
fs.Debugf(name, "Added new token to config, still needs to be saved")
|
fs.Debugf(name, "Added new token to config, still needs to be saved")
|
||||||
@ -144,6 +145,7 @@ func PutToken(name string, token *oauth2.Token, newSection bool) error {
|
|||||||
type TokenSource struct {
|
type TokenSource struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
name string
|
name string
|
||||||
|
m configmap.Mapper
|
||||||
tokenSource oauth2.TokenSource
|
tokenSource oauth2.TokenSource
|
||||||
token *oauth2.Token
|
token *oauth2.Token
|
||||||
config *oauth2.Config
|
config *oauth2.Config
|
||||||
@ -176,7 +178,7 @@ func (ts *TokenSource) Token() (*oauth2.Token, error) {
|
|||||||
if ts.expiryTimer != nil {
|
if ts.expiryTimer != nil {
|
||||||
ts.expiryTimer.Reset(ts.timeToExpiry())
|
ts.expiryTimer.Reset(ts.timeToExpiry())
|
||||||
}
|
}
|
||||||
err = PutToken(ts.name, token, false)
|
err = PutToken(ts.name, ts.m, token, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -229,27 +231,27 @@ func Context(client *http.Client) context.Context {
|
|||||||
// config file if they are not blank.
|
// config file if they are not blank.
|
||||||
// If any value is overridden, true is returned.
|
// If any value is overridden, true is returned.
|
||||||
// the origConfig is copied
|
// the origConfig is copied
|
||||||
func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) {
|
||||||
newConfig = new(oauth2.Config)
|
newConfig = new(oauth2.Config)
|
||||||
*newConfig = *origConfig
|
*newConfig = *origConfig
|
||||||
changed = false
|
changed = false
|
||||||
ClientID := config.FileGet(name, config.ConfigClientID)
|
ClientID, ok := m.Get(config.ConfigClientID)
|
||||||
if ClientID != "" {
|
if ok && ClientID != "" {
|
||||||
newConfig.ClientID = ClientID
|
newConfig.ClientID = ClientID
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
ClientSecret := config.FileGet(name, config.ConfigClientSecret)
|
ClientSecret, ok := m.Get(config.ConfigClientSecret)
|
||||||
if ClientSecret != "" {
|
if ok && ClientSecret != "" {
|
||||||
newConfig.ClientSecret = ClientSecret
|
newConfig.ClientSecret = ClientSecret
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
AuthURL := config.FileGet(name, config.ConfigAuthURL)
|
AuthURL, ok := m.Get(config.ConfigAuthURL)
|
||||||
if AuthURL != "" {
|
if ok && AuthURL != "" {
|
||||||
newConfig.Endpoint.AuthURL = AuthURL
|
newConfig.Endpoint.AuthURL = AuthURL
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
TokenURL := config.FileGet(name, config.ConfigTokenURL)
|
TokenURL, ok := m.Get(config.ConfigTokenURL)
|
||||||
if TokenURL != "" {
|
if ok && TokenURL != "" {
|
||||||
newConfig.Endpoint.TokenURL = TokenURL
|
newConfig.Endpoint.TokenURL = TokenURL
|
||||||
changed = true
|
changed = true
|
||||||
}
|
}
|
||||||
@ -260,9 +262,9 @@ func overrideCredentials(name string, origConfig *oauth2.Config) (newConfig *oau
|
|||||||
// configures a Client with it. It returns the client and a
|
// configures a Client with it. It returns the client and a
|
||||||
// TokenSource which Invalidate may need to be called on. It uses the
|
// TokenSource which Invalidate may need to be called on. It uses the
|
||||||
// httpClient passed in as the base client.
|
// httpClient passed in as the base client.
|
||||||
func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
func NewClientWithBaseClient(name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) {
|
||||||
config, _ = overrideCredentials(name, config)
|
config, _ = overrideCredentials(name, m, config)
|
||||||
token, err := GetToken(name)
|
token, err := GetToken(name, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@ -274,6 +276,7 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt
|
|||||||
// tokens in the config file
|
// tokens in the config file
|
||||||
ts := &TokenSource{
|
ts := &TokenSource{
|
||||||
name: name,
|
name: name,
|
||||||
|
m: m,
|
||||||
token: token,
|
token: token,
|
||||||
config: config,
|
config: config,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
@ -284,36 +287,37 @@ func NewClientWithBaseClient(name string, config *oauth2.Config, baseClient *htt
|
|||||||
|
|
||||||
// NewClient gets a token from the config file and configures a Client
|
// NewClient gets a token from the config file and configures a Client
|
||||||
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
// with it. It returns the client and a TokenSource which Invalidate may need to be called on
|
||||||
func NewClient(name string, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
func NewClient(name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) {
|
||||||
return NewClientWithBaseClient(name, oauthConfig, fshttp.NewClient(fs.Config))
|
return NewClientWithBaseClient(name, m, oauthConfig, fshttp.NewClient(fs.Config))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config does the initial creation of the token
|
// Config does the initial creation of the token
|
||||||
//
|
//
|
||||||
// It may run an internal webserver to receive the results
|
// It may run an internal webserver to receive the results
|
||||||
func Config(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
func Config(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||||
return doConfig(id, name, nil, config, true, opts)
|
return doConfig(id, name, m, nil, config, true, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigNoOffline does the same as Config but does not pass the
|
// ConfigNoOffline does the same as Config but does not pass the
|
||||||
// "access_type=offline" parameter.
|
// "access_type=offline" parameter.
|
||||||
func ConfigNoOffline(id, name string, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
func ConfigNoOffline(id, name string, m configmap.Mapper, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||||
return doConfig(id, name, nil, config, false, opts)
|
return doConfig(id, name, m, nil, config, false, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
|
// ConfigErrorCheck does the same as Config, but allows the backend to pass a error handling function
|
||||||
// This function gets called with the request made to rclone as a parameter if no code was found
|
// This function gets called with the request made to rclone as a parameter if no code was found
|
||||||
func ConfigErrorCheck(id, name string, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
func ConfigErrorCheck(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, config *oauth2.Config, opts ...oauth2.AuthCodeOption) error {
|
||||||
return doConfig(id, name, errorHandler, config, true, opts)
|
return doConfig(id, name, m, errorHandler, config, true, opts)
|
||||||
}
|
}
|
||||||
|
|
||||||
func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
func doConfig(id, name string, m configmap.Mapper, errorHandler func(*http.Request) AuthError, oauthConfig *oauth2.Config, offline bool, opts []oauth2.AuthCodeOption) error {
|
||||||
oauthConfig, changed := overrideCredentials(name, oauthConfig)
|
oauthConfig, changed := overrideCredentials(name, m, oauthConfig)
|
||||||
automatic := config.FileGet(name, config.ConfigAutomatic) != ""
|
auto, ok := m.Get(config.ConfigAutomatic)
|
||||||
|
automatic := ok && auto != ""
|
||||||
|
|
||||||
// See if already have a token
|
// See if already have a token
|
||||||
tokenString := config.FileGet(name, "token")
|
tokenString, ok := m.Get("token")
|
||||||
if tokenString != "" {
|
if ok && tokenString != "" {
|
||||||
fmt.Printf("Already have a token - refresh?\n")
|
fmt.Printf("Already have a token - refresh?\n")
|
||||||
if !config.Confirm() {
|
if !config.Confirm() {
|
||||||
return nil
|
return nil
|
||||||
@ -354,7 +358,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return PutToken(name, token, false)
|
return PutToken(name, m, token, false)
|
||||||
}
|
}
|
||||||
case TitleBarRedirectURL:
|
case TitleBarRedirectURL:
|
||||||
useWebServer = automatic
|
useWebServer = automatic
|
||||||
@ -436,7 +440,7 @@ func doConfig(id, name string, errorHandler func(*http.Request) AuthError, oauth
|
|||||||
}
|
}
|
||||||
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste", result)
|
||||||
}
|
}
|
||||||
return PutToken(name, token, true)
|
return PutToken(name, m, token, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Local web server for collecting auth
|
// Local web server for collecting auth
|
||||||
|
@ -4,8 +4,6 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/ncw/rclone/cmd"
|
"github.com/ncw/rclone/cmd"
|
||||||
|
|
||||||
_ "github.com/ncw/rclone/backend/all" // import all backends
|
_ "github.com/ncw/rclone/backend/all" // import all backends
|
||||||
@ -13,7 +11,5 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if err := cmd.Root.Execute(); err != nil {
|
cmd.Main()
|
||||||
log.Fatalf("Fatal error: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user