mirror of
https://github.com/rclone/rclone.git
synced 2024-11-29 03:45:25 +01:00
backend: fix misspellings
This commit is contained in:
parent
f3874707ee
commit
a0d4c04687
@ -30,7 +30,7 @@ type Options struct {
|
|||||||
Remote string `config:"remote"`
|
Remote string `config:"remote"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path.
|
// NewFs constructs an Fs from the path.
|
||||||
//
|
//
|
||||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
|
@ -307,7 +307,7 @@ func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline
|
|||||||
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
|
@ -17,12 +17,12 @@ type Error struct {
|
|||||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error statisfies the error interface
|
// Error satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal statisfies the Fatal interface
|
// Fatal satisfies the Fatal interface
|
||||||
//
|
//
|
||||||
// It indicates which errors should be treated as fatal
|
// It indicates which errors should be treated as fatal
|
||||||
func (e *Error) Fatal() bool {
|
func (e *Error) Fatal() bool {
|
||||||
@ -100,7 +100,7 @@ func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
|||||||
return Timestamp(newT), base[:versionStart] + ext
|
return Timestamp(newT), base[:versionStart] + ext
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsZero returns true if the timestamp is unitialised
|
// IsZero returns true if the timestamp is uninitialized
|
||||||
func (t Timestamp) IsZero() bool {
|
func (t Timestamp) IsZero() bool {
|
||||||
return time.Time(t).IsZero()
|
return time.Time(t).IsZero()
|
||||||
}
|
}
|
||||||
|
@ -117,7 +117,7 @@ This value should be set no larger than 4.657GiB (== 5GB).`,
|
|||||||
When uploading large files, chunk the file into this size. Note that
|
When uploading large files, chunk the file into this size. Note that
|
||||||
these chunks are buffered in memory and there might a maximum of
|
these chunks are buffered in memory and there might a maximum of
|
||||||
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
"--transfers" chunks in progress at once. 5,000,000 Bytes is the
|
||||||
minimim size.`,
|
minimum size.`,
|
||||||
Default: defaultChunkSize,
|
Default: defaultChunkSize,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@ -319,7 +319,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@ -1459,7 +1459,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
// Content-Type b2/x-auto to automatically set the stored Content-Type
|
||||||
// post upload. In the case where a file extension is absent or the
|
// post upload. In the case where a file extension is absent or the
|
||||||
// lookup fails, the Content-Type is set to application/octet-stream. The
|
// lookup fails, the Content-Type is set to application/octet-stream. The
|
||||||
// Content-Type mappings can be purused here.
|
// Content-Type mappings can be pursued here.
|
||||||
//
|
//
|
||||||
// X-Bz-Content-Sha1
|
// X-Bz-Content-Sha1
|
||||||
// required
|
// required
|
||||||
|
@ -45,7 +45,7 @@ type Error struct {
|
|||||||
RequestID string `json:"request_id"`
|
RequestID string `json:"request_id"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
@ -57,7 +57,7 @@ func (e *Error) Error() string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Error statisfies the error interface
|
// Check Error satisfies the error interface
|
||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// ItemFields are the fields needed for FileInfo
|
// ItemFields are the fields needed for FileInfo
|
||||||
|
@ -171,13 +171,13 @@ var retryErrorCodes = []int{
|
|||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
authRety := false
|
authRetry := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||||
authRety = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// substitute reserved characters for box
|
// substitute reserved characters for box
|
||||||
@ -530,10 +530,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
//
|
//
|
||||||
// The new object may have been created if an error is returned
|
// The new object may have been created if an error is returned
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
return existingObj, existingObj.Update(in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src)
|
return f.PutUnchecked(in, src)
|
||||||
|
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@ -576,7 +576,7 @@ The slice indices are similar to Python slices: start[:end]
|
|||||||
|
|
||||||
start is the 0 based chunk number from the beginning of the file
|
start is the 0 based chunk number from the beginning of the file
|
||||||
to fetch inclusive. end is 0 based chunk number from the beginning
|
to fetch inclusive. end is 0 based chunk number from the beginning
|
||||||
of the file to fetch exclisive.
|
of the file to fetch exclusive.
|
||||||
Both values can be negative, in which case they count from the back
|
Both values can be negative, in which case they count from the back
|
||||||
of the file. The value "-5:" represents the last 5 chunks of a file.
|
of the file. The value "-5:" represents the last 5 chunks of a file.
|
||||||
|
|
||||||
@ -870,7 +870,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChangeNotify can subsribe multiple callers
|
// ChangeNotify can subscribe multiple callers
|
||||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||||
@ -1549,7 +1549,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is still active
|
||||||
if f.opt.TempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't copy - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantCopy
|
return nil, fs.ErrorCantCopy
|
||||||
@ -1625,7 +1625,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
|
|
||||||
// if this is a temp object then we perform the changes locally
|
// if this is a temp object then we perform the changes locally
|
||||||
if srcObj.isTempFile() {
|
if srcObj.isTempFile() {
|
||||||
// we check if the feature is stil active
|
// we check if the feature is still active
|
||||||
if f.opt.TempWritePath == "" {
|
if f.opt.TempWritePath == "" {
|
||||||
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
fs.Errorf(srcObj, "can't move - this is a local cached file but this feature is turned off this run")
|
||||||
return nil, fs.ErrorCantMove
|
return nil, fs.ErrorCantMove
|
||||||
|
@ -748,7 +748,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
|||||||
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) {
|
||||||
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
return nil, fh.finishAndClose(ErrorEncryptedBadMagic)
|
||||||
}
|
}
|
||||||
// retreive the nonce
|
// retrieve the nonce
|
||||||
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
fh.nonce.fromBuf(readBuf[fileMagicSize:])
|
||||||
fh.initialNonce = fh.nonce
|
fh.initialNonce = fh.nonce
|
||||||
return fh, nil
|
return fh, nil
|
||||||
|
@ -122,7 +122,7 @@ func NewCipher(m configmap.Mapper) (Cipher, error) {
|
|||||||
return newCipherForConfig(opt)
|
return newCipherForConfig(opt)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@ -555,7 +555,7 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||||
// src with it, and calcuates the hash given by HashType on the fly
|
// src with it, and calculates the hash given by HashType on the fly
|
||||||
//
|
//
|
||||||
// Note that we break lots of encapsulation in this function.
|
// Note that we break lots of encapsulation in this function.
|
||||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||||
|
@ -482,7 +482,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whehter a given err rates being retried
|
// shouldRetry determines whether a given err rates being retried
|
||||||
func shouldRetry(err error) (bool, error) {
|
func shouldRetry(err error) (bool, error) {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -863,7 +863,7 @@ func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, path string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@ -1483,7 +1483,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
in := make(chan listREntry, inputBuffer)
|
in := make(chan listREntry, inputBuffer)
|
||||||
out := make(chan error, fs.Config.Checkers)
|
out := make(chan error, fs.Config.Checkers)
|
||||||
list := walk.NewListRHelper(callback)
|
list := walk.NewListRHelper(callback)
|
||||||
overfflow := []listREntry{}
|
overflow := []listREntry{}
|
||||||
|
|
||||||
cb := func(entry fs.DirEntry) error {
|
cb := func(entry fs.DirEntry) error {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
@ -1493,7 +1493,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
case in <- listREntry{d.ID(), d.Remote()}:
|
case in <- listREntry{d.ID(), d.Remote()}:
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
default:
|
default:
|
||||||
overfflow = append(overfflow, listREntry{d.ID(), d.Remote()})
|
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return list.Add(entry)
|
return list.Add(entry)
|
||||||
@ -1509,18 +1509,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
|||||||
// wait until the all directories are processed
|
// wait until the all directories are processed
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
// if the input channel overflowed add the collected entries to the channel now
|
// if the input channel overflowed add the collected entries to the channel now
|
||||||
for len(overfflow) > 0 {
|
for len(overflow) > 0 {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
l := len(overfflow)
|
l := len(overflow)
|
||||||
// only fill half of the channel to prevent entries beeing put into overfflow again
|
// only fill half of the channel to prevent entries beeing put into overflow again
|
||||||
if l > inputBuffer/2 {
|
if l > inputBuffer/2 {
|
||||||
l = inputBuffer / 2
|
l = inputBuffer / 2
|
||||||
}
|
}
|
||||||
wg.Add(l)
|
wg.Add(l)
|
||||||
for _, d := range overfflow[:l] {
|
for _, d := range overflow[:l] {
|
||||||
in <- d
|
in <- d
|
||||||
}
|
}
|
||||||
overfflow = overfflow[l:]
|
overflow = overflow[l:]
|
||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
|
|
||||||
// wait again for the completion of all directories
|
// wait again for the completion of all directories
|
||||||
@ -1711,14 +1711,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Name, srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.Name, srcDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.rmdir(srcDir.ID(), true)
|
err = f.rmdir(srcDir.ID(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -2137,7 +2137,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// ChangeNotify calls the passed function with a path that has had changes.
|
// ChangeNotify calls the passed function with a path that has had changes.
|
||||||
// If the implementation uses polling, it should adhere to the given interval.
|
// If the implementation uses polling, it should adhere to the given interval.
|
||||||
//
|
//
|
||||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
// Automatically restarts itself in case of unexpected behavior of the remote.
|
||||||
//
|
//
|
||||||
// Close the returned channel to stop being notified.
|
// Close the returned channel to stop being notified.
|
||||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||||
|
@ -185,7 +185,7 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
|||||||
// been 200 OK.
|
// been 200 OK.
|
||||||
//
|
//
|
||||||
// So parse the response out of the body. We aren't expecting
|
// So parse the response out of the body. We aren't expecting
|
||||||
// any other 2xx codes, so we parse it unconditionaly on
|
// any other 2xx codes, so we parse it unconditionally on
|
||||||
// StatusCode
|
// StatusCode
|
||||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||||
return 598, err
|
return 598, err
|
||||||
|
@ -213,7 +213,7 @@ func shouldRetry(err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
// Keep old behaviour for backward compatibility
|
// Keep old behavior for backward compatibility
|
||||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
@ -239,7 +239,7 @@ func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error)
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
|
@ -166,7 +166,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
|||||||
f.poolMu.Unlock()
|
f.poolMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
|
@ -300,7 +300,7 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry determines whehter a given err rates being retried
|
// shouldRetry determines whether a given err rates being retried
|
||||||
func shouldRetry(err error) (again bool, errOut error) {
|
func shouldRetry(err error) (again bool, errOut error) {
|
||||||
again = false
|
again = false
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -348,7 +348,7 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
|||||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
var oAuthClient *http.Client
|
var oAuthClient *http.Client
|
||||||
|
|
||||||
|
@ -251,7 +251,7 @@ func parseName(base *url.URL, name string) (string, error) {
|
|||||||
}
|
}
|
||||||
// calculate the name relative to the base
|
// calculate the name relative to the base
|
||||||
name = u.Path[len(base.Path):]
|
name = u.Path[len(base.Path):]
|
||||||
// musn't be empty
|
// mustn't be empty
|
||||||
if name == "" {
|
if name == "" {
|
||||||
return "", errNameIsEmpty
|
return "", errNameIsEmpty
|
||||||
}
|
}
|
||||||
|
@ -103,7 +103,7 @@ func init() {
|
|||||||
var jsonToken api.TokenJSON
|
var jsonToken api.TokenJSON
|
||||||
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
resp, err := srv.CallJSON(&opts, nil, &jsonToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// if 2fa is enabled the first request is expected to fail. we'lls do another request with the 2fa code as an additional http header
|
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
|
||||||
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
fmt.Printf("This account has 2 factor authentication enabled you will receive a verification code via SMS.\n")
|
||||||
@ -163,7 +163,7 @@ func init() {
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "upload_resume_limit",
|
Name: "upload_resume_limit",
|
||||||
Help: "Files bigger than this can be resumed if the upload failes.",
|
Help: "Files bigger than this can be resumed if the upload fail's.",
|
||||||
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
Default: fs.SizeSuffix(10 * 1024 * 1024),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}},
|
}},
|
||||||
@ -361,7 +361,7 @@ func grantTypeFilter(req *http.Request) {
|
|||||||
}
|
}
|
||||||
_ = req.Body.Close()
|
_ = req.Body.Close()
|
||||||
|
|
||||||
// make the refesh token upper case
|
// make the refresh token upper case
|
||||||
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
|
||||||
|
|
||||||
// set the new ReadCloser (with a dummy Close())
|
// set the new ReadCloser (with a dummy Close())
|
||||||
@ -769,7 +769,7 @@ func (f *Fs) Purge() error {
|
|||||||
return f.purgeCheck("", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||||
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
func (f *Fs) copyOrMove(method, src, dest string) (info *api.JottaFile, err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@ -1080,7 +1080,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|||||||
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
func readMD5(in io.Reader, size, threshold int64) (md5sum string, out io.Reader, cleanup func(), err error) {
|
||||||
// we need a MD5
|
// we need a MD5
|
||||||
md5Hasher := md5.New()
|
md5Hasher := md5.New()
|
||||||
// use the teeReader to write to the local file AND caclulate the MD5 while doing so
|
// use the teeReader to write to the local file AND calculate the MD5 while doing so
|
||||||
teeReader := io.TeeReader(in, md5Hasher)
|
teeReader := io.TeeReader(in, md5Hasher)
|
||||||
|
|
||||||
// nothing to clean up by default
|
// nothing to clean up by default
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
Translate file names for JottaCloud adapted from OneDrive
|
Translate file names for JottaCloud adapted from OneDrive
|
||||||
|
|
||||||
|
|
||||||
The following characters are JottaClous reserved characters, and can't
|
The following characters are JottaCloud reserved characters, and can't
|
||||||
be used in JottaCloud folder and file names.
|
be used in JottaCloud folder and file names.
|
||||||
|
|
||||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||||
|
@ -225,10 +225,10 @@ func (f *Fs) Features() *fs.Features {
|
|||||||
return f.features
|
return f.features
|
||||||
}
|
}
|
||||||
|
|
||||||
// caseInsenstive returns whether the remote is case insensitive or not
|
// caseInsensitive returns whether the remote is case insensitive or not
|
||||||
func (f *Fs) caseInsensitive() bool {
|
func (f *Fs) caseInsensitive() bool {
|
||||||
// FIXME not entirely accurate since you can have case
|
// FIXME not entirely accurate since you can have case
|
||||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
// sensitive Fses on darwin and case insensitive Fses on linux.
|
||||||
// Should probably check but that would involve creating a
|
// Should probably check but that would involve creating a
|
||||||
// file in the remote to be most accurate which probably isn't
|
// file in the remote to be most accurate which probably isn't
|
||||||
// desirable.
|
// desirable.
|
||||||
@ -288,7 +288,7 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
|||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// Handle the odd case, that a symlink was specfied by name without the link suffix
|
// Handle the odd case, that a symlink was specified by name without the link suffix
|
||||||
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink {
|
||||||
return nil, fs.ErrorObjectNotFound
|
return nil, fs.ErrorObjectNotFound
|
||||||
}
|
}
|
||||||
@ -958,7 +958,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
|
|
||||||
if o.translatedLink {
|
if o.translatedLink {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Remove any current symlink or file, if one exsits
|
// Remove any current symlink or file, if one exists
|
||||||
if _, err := os.Lstat(o.path); err == nil {
|
if _, err := os.Lstat(o.path); err == nil {
|
||||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||||
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
fs.Errorf(o, "Failed to remove previous file: %v", removeErr)
|
||||||
|
@ -497,7 +497,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|||||||
// Creates from the parameters passed in a half finished Object which
|
// Creates from the parameters passed in a half finished Object which
|
||||||
// must have setMetaData called on it
|
// must have setMetaData called on it
|
||||||
//
|
//
|
||||||
// Returns the dirNode, obect, leaf and error
|
// Returns the dirNode, object, leaf and error
|
||||||
//
|
//
|
||||||
// Used to create new objects
|
// Used to create new objects
|
||||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, dirNode *mega.Node, leaf string, err error) {
|
||||||
@ -523,10 +523,10 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
|||||||
// This will create a duplicate if we upload a new file without
|
// This will create a duplicate if we upload a new file without
|
||||||
// checking to see if there is one already - use Put() for that.
|
// checking to see if there is one already - use Put() for that.
|
||||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||||
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
return exisitingObj, exisitingObj.Update(in, src, options...)
|
return existingObj, existingObj.Update(in, src, options...)
|
||||||
case fs.ErrorObjectNotFound:
|
case fs.ErrorObjectNotFound:
|
||||||
// Not found so create it
|
// Not found so create it
|
||||||
return f.PutUnchecked(in, src)
|
return f.PutUnchecked(in, src)
|
||||||
@ -847,14 +847,14 @@ func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|||||||
return shouldRetry(err)
|
return shouldRetry(err)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.GetName(), srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed on %q in %v", info.GetName(), srcDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// rmdir (into trash) the now empty source directory
|
// rmdir (into trash) the now empty source directory
|
||||||
fs.Infof(srcDir, "removing empty directory")
|
fs.Infof(srcDir, "removing empty directory")
|
||||||
err = f.deleteNode(srcDirNode)
|
err = f.deleteNode(srcDirNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
return errors.Wrapf(err, "MergeDirs move failed to rmdir %q", srcDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1129,7 +1129,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
return errors.Wrap(err, "failed to finish upload")
|
return errors.Wrap(err, "failed to finish upload")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the upload succeded and the original object existed, then delete it
|
// If the upload succeeded and the original object existed, then delete it
|
||||||
if o.info != nil {
|
if o.info != nil {
|
||||||
err = o.fs.deleteNode(o.info)
|
err = o.fs.deleteNode(o.info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -25,7 +25,7 @@ type Error struct {
|
|||||||
} `json:"error"`
|
} `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
out := e.ErrorInfo.Code
|
out := e.ErrorInfo.Code
|
||||||
if e.ErrorInfo.InnerError.Code != "" {
|
if e.ErrorInfo.InnerError.Code != "" {
|
||||||
@ -35,7 +35,7 @@ func (e *Error) Error() string {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Error statisfies the error interface
|
// Check Error satisfies the error interface
|
||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// Identity represents an identity of an actor. For example, and actor
|
// Identity represents an identity of an actor. For example, and actor
|
||||||
@ -295,9 +295,9 @@ func (i *Item) GetID() string {
|
|||||||
return i.ID
|
return i.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDriveID returns a normalized ParentReferance of the item
|
// GetDriveID returns a normalized ParentReference of the item
|
||||||
func (i *Item) GetDriveID() string {
|
func (i *Item) GetDriveID() string {
|
||||||
return i.GetParentReferance().DriveID
|
return i.GetParentReference().DriveID
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetName returns a normalized Name of the item
|
// GetName returns a normalized Name of the item
|
||||||
@ -398,8 +398,8 @@ func (i *Item) GetLastModifiedDateTime() Timestamp {
|
|||||||
return i.LastModifiedDateTime
|
return i.LastModifiedDateTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetParentReferance returns a normalized ParentReferance of the item
|
// GetParentReference returns a normalized ParentReference of the item
|
||||||
func (i *Item) GetParentReferance() *ItemReference {
|
func (i *Item) GetParentReference() *ItemReference {
|
||||||
if i.IsRemote() && i.ParentReference == nil {
|
if i.IsRemote() && i.ParentReference == nil {
|
||||||
return i.RemoteItem.ParentReference
|
return i.RemoteItem.ParentReference
|
||||||
}
|
}
|
||||||
|
@ -324,13 +324,13 @@ var retryErrorCodes = []int{
|
|||||||
// shouldRetry returns a boolean as to whether this resp and err
|
// shouldRetry returns a boolean as to whether this resp and err
|
||||||
// deserve to be retried. It returns the err as a convenience
|
// deserve to be retried. It returns the err as a convenience
|
||||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
authRety := false
|
authRetry := false
|
||||||
|
|
||||||
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
if resp != nil && resp.StatusCode == 401 && len(resp.Header["Www-Authenticate"]) == 1 && strings.Index(resp.Header["Www-Authenticate"][0], "expired_token") >= 0 {
|
||||||
authRety = true
|
authRetry = true
|
||||||
fs.Debugf(nil, "Should retry: %v", err)
|
fs.Debugf(nil, "Should retry: %v", err)
|
||||||
}
|
}
|
||||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
// readMetaDataForPathRelativeToID reads the metadata for a path relative to an item that is addressed by its normalized ID.
|
||||||
|
@ -119,7 +119,7 @@ func (f *Fs) DirCacheFlush() {
|
|||||||
f.dirCache.ResetRoot()
|
f.dirCache.ResetRoot()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, bucket:path
|
// NewFs constructs an Fs from the path, bucket:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
|
@ -13,7 +13,7 @@ type Error struct {
|
|||||||
} `json:"error"`
|
} `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error statisfies the error interface
|
// Error satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
return fmt.Sprintf("%s (Error %d)", e.Info.Message, e.Info.Code)
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ type Error struct {
|
|||||||
ErrorString string `json:"error"`
|
ErrorString string `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
|
return fmt.Sprintf("pcloud error: %s (%d)", e.ErrorString, e.Result)
|
||||||
}
|
}
|
||||||
@ -58,7 +58,7 @@ func (e *Error) Update(err error) error {
|
|||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Error statisfies the error interface
|
// Check Error satisfies the error interface
|
||||||
var _ error = (*Error)(nil)
|
var _ error = (*Error)(nil)
|
||||||
|
|
||||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||||
|
@ -385,7 +385,7 @@ func fileIDtoNumber(fileID string) string {
|
|||||||
if len(fileID) > 0 && fileID[0] == 'f' {
|
if len(fileID) > 0 && fileID[0] == 'f' {
|
||||||
return fileID[1:]
|
return fileID[1:]
|
||||||
}
|
}
|
||||||
fs.Debugf(nil, "Invalid filee id %q", fileID)
|
fs.Debugf(nil, "Invalid file id %q", fileID)
|
||||||
return fileID
|
return fileID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -449,7 +449,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|||||||
}
|
}
|
||||||
_, err = bucketInit.PutObject(key, &req)
|
_, err = bucketInit.PutObject(key, &req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(f, "Copied Faild, API Error: %v", err)
|
fs.Debugf(f, "Copy Failed, API Error: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return f.NewObject(remote)
|
return f.NewObject(remote)
|
||||||
@ -756,7 +756,7 @@ func (f *Fs) Mkdir(dir string) error {
|
|||||||
}
|
}
|
||||||
switch *statistics.Status {
|
switch *statistics.Status {
|
||||||
case "deleted":
|
case "deleted":
|
||||||
fs.Debugf(f, "Wiat for qingstor sync bucket status, retries: %d", retries)
|
fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries)
|
||||||
time.Sleep(time.Second * 1)
|
time.Sleep(time.Second * 1)
|
||||||
retries++
|
retries++
|
||||||
continue
|
continue
|
||||||
@ -875,7 +875,7 @@ func (o *Object) readMetaData() (err error) {
|
|||||||
fs.Debugf(o, "Read metadata of key: %s", key)
|
fs.Debugf(o, "Read metadata of key: %s", key)
|
||||||
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Debugf(o, "Read metadata faild, API Error: %v", err)
|
fs.Debugf(o, "Read metadata failed, API Error: %v", err)
|
||||||
if e, ok := err.(*qsErr.QingStorError); ok {
|
if e, ok := err.(*qsErr.QingStorError); ok {
|
||||||
if e.StatusCode == http.StatusNotFound {
|
if e.StatusCode == http.StatusNotFound {
|
||||||
return fs.ErrorObjectNotFound
|
return fs.ErrorObjectNotFound
|
||||||
|
@ -163,7 +163,7 @@ func (u *uploader) singlePartUpload(buf io.Reader, size int64) error {
|
|||||||
|
|
||||||
_, err := bucketInit.PutObject(u.cfg.key, &req)
|
_, err := bucketInit.PutObject(u.cfg.key, &req)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
fs.Debugf(u, "Upload single objcet finished")
|
fs.Debugf(u, "Upload single object finished")
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -237,10 +237,10 @@ func init() {
|
|||||||
Help: "EU Cross Region Amsterdam Private Endpoint",
|
Help: "EU Cross Region Amsterdam Private Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
Value: "s3.eu-gb.objectstorage.softlayer.net",
|
||||||
Help: "Great Britan Endpoint",
|
Help: "Great Britain Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
|
||||||
Help: "Great Britan Private Endpoint",
|
Help: "Great Britain Private Endpoint",
|
||||||
}, {
|
}, {
|
||||||
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
Value: "s3.ap-geo.objectstorage.softlayer.net",
|
||||||
Help: "APAC Cross Regional Endpoint",
|
Help: "APAC Cross Regional Endpoint",
|
||||||
@ -450,7 +450,7 @@ func init() {
|
|||||||
Help: "US East Region Flex",
|
Help: "US East Region Flex",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-south-standard",
|
Value: "us-south-standard",
|
||||||
Help: "US Sout hRegion Standard",
|
Help: "US South Region Standard",
|
||||||
}, {
|
}, {
|
||||||
Value: "us-south-vault",
|
Value: "us-south-vault",
|
||||||
Help: "US South Region Vault",
|
Help: "US South Region Vault",
|
||||||
@ -474,16 +474,16 @@ func init() {
|
|||||||
Help: "EU Cross Region Flex",
|
Help: "EU Cross Region Flex",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-gb-standard",
|
Value: "eu-gb-standard",
|
||||||
Help: "Great Britan Standard",
|
Help: "Great Britain Standard",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-gb-vault",
|
Value: "eu-gb-vault",
|
||||||
Help: "Great Britan Vault",
|
Help: "Great Britain Vault",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-gb-cold",
|
Value: "eu-gb-cold",
|
||||||
Help: "Great Britan Cold",
|
Help: "Great Britain Cold",
|
||||||
}, {
|
}, {
|
||||||
Value: "eu-gb-flex",
|
Value: "eu-gb-flex",
|
||||||
Help: "Great Britan Flex",
|
Help: "Great Britain Flex",
|
||||||
}, {
|
}, {
|
||||||
Value: "ap-standard",
|
Value: "ap-standard",
|
||||||
Help: "APAC Standard",
|
Help: "APAC Standard",
|
||||||
@ -842,7 +842,7 @@ var retryErrorCodes = []int{
|
|||||||
func (f *Fs) shouldRetry(err error) (bool, error) {
|
func (f *Fs) shouldRetry(err error) (bool, error) {
|
||||||
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
// If this is an awserr object, try and extract more useful information to determine if we should retry
|
||||||
if awsError, ok := err.(awserr.Error); ok {
|
if awsError, ok := err.(awserr.Error); ok {
|
||||||
// Simple case, check the original embedded error in case it's generically retriable
|
// Simple case, check the original embedded error in case it's generically retryable
|
||||||
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
if fserrors.ShouldRetry(awsError.OrigErr()) {
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
|
@ -430,7 +430,7 @@ func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, n
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
|
@ -177,8 +177,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
|||||||
// At least one value will be written to the channel,
|
// At least one value will be written to the channel,
|
||||||
// specifying the initial value and updated values might
|
// specifying the initial value and updated values might
|
||||||
// follow. A 0 Duration should pause the polling.
|
// follow. A 0 Duration should pause the polling.
|
||||||
// The ChangeNotify implemantion must empty the channel
|
// The ChangeNotify implementation must empty the channel
|
||||||
// regulary. When the channel gets closed, the implemantion
|
// regularly. When the channel gets closed, the implementation
|
||||||
// should stop polling and release resources.
|
// should stop polling and release resources.
|
||||||
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
func (f *Fs) ChangeNotify(fn func(string, fs.EntryType), ch <-chan time.Duration) {
|
||||||
var remoteChans []chan time.Duration
|
var remoteChans []chan time.Duration
|
||||||
|
@ -124,7 +124,7 @@ type PropValue struct {
|
|||||||
Value string `xml:",chardata"`
|
Value string `xml:",chardata"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error is used to desribe webdav errors
|
// Error is used to describe webdav errors
|
||||||
//
|
//
|
||||||
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
// <d:error xmlns:d="DAV:" xmlns:s="http://sabredav.org/ns">
|
||||||
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
// <s:exception>Sabre\DAV\Exception\NotFound</s:exception>
|
||||||
@ -137,7 +137,7 @@ type Error struct {
|
|||||||
StatusCode int
|
StatusCode int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and statistifes the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
func (e *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
var out []string
|
var out []string
|
||||||
if e.Message != "" {
|
if e.Message != "" {
|
||||||
|
@ -102,7 +102,7 @@ func (ca *CookieAuth) Cookies() (*CookieResponse, error) {
|
|||||||
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error) {
|
||||||
spRoot, err := url.Parse(ca.endpoint)
|
spRoot, err := url.Parse(ca.endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while contructing endpoint URL")
|
return nil, errors.Wrap(err, "Error while constructing endpoint URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0")
|
||||||
@ -121,7 +121,7 @@ func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (*CookieResponse, error
|
|||||||
Jar: jar,
|
Jar: jar,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send the previously aquired Token as a Post parameter
|
// Send the previously acquired Token as a Post parameter
|
||||||
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil {
|
||||||
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
return nil, errors.Wrap(err, "Error while grabbing cookies from endpoint: %v")
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ func errorHandler(resp *http.Response) error {
|
|||||||
return errResponse
|
return errResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
// addShlash makes sure s is terminated with a / if non empty
|
// addSlash makes sure s is terminated with a / if non empty
|
||||||
func addSlash(s string) string {
|
func addSlash(s string) string {
|
||||||
if s != "" && !strings.HasSuffix(s, "/") {
|
if s != "" && !strings.HasSuffix(s, "/") {
|
||||||
s += "/"
|
s += "/"
|
||||||
|
@ -56,7 +56,7 @@ type AsyncInfo struct {
|
|||||||
Templated bool `json:"templated"`
|
Templated bool `json:"templated"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// AsyncStatus is returned when requesting the status of an async operations. Possble values in-progress, success, failure
|
// AsyncStatus is returned when requesting the status of an async operations. Possible values in-progress, success, failure
|
||||||
type AsyncStatus struct {
|
type AsyncStatus struct {
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
@ -634,7 +634,7 @@ func (f *Fs) Purge() error {
|
|||||||
return f.purgeCheck("", false)
|
return f.purgeCheck("", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// copyOrMoves copys or moves directories or files depending on the mthod parameter
|
// copyOrMoves copies or moves directories or files depending on the method parameter
|
||||||
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
func (f *Fs) copyOrMove(method, src, dst string, overwrite bool) (err error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "POST",
|
Method: "POST",
|
||||||
@ -1107,7 +1107,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
//if file uploaded sucessfully then return metadata
|
//if file uploaded successfully then return metadata
|
||||||
o.modTime = modTime
|
o.modTime = modTime
|
||||||
o.md5sum = "" // according to unit tests after put the md5 is empty.
|
o.md5sum = "" // according to unit tests after put the md5 is empty.
|
||||||
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?
|
o.size = int64(in1.BytesRead()) // better solution o.readMetaData() ?
|
||||||
|
Loading…
Reference in New Issue
Block a user