mirror of
https://github.com/rclone/rclone.git
synced 2024-11-07 09:04:52 +01:00
all: fix spelling across the project
* abcdefghijklmnopqrstuvwxyz * accounting * additional * allowed * almost * already * appropriately * arise * bandwidth * behave * bidirectional * brackets * cached * characters * cloud * committing * concatenating * configured * constructs * current * cutoff * deferred * different * directory * disposition * dropbox * either way * error * excess * experiments * explicitly * externally * files * github * gzipped * hierarchies * huffman * hyphen * implicitly * independent * insensitive * integrity * libraries * literally * metadata * mimics * missing * modification * multipart * multiple * nightmare * nonexistent * number * obscure * ourselves * overridden * potatoes * preexisting * priority * received * remote * replacement * represents * reproducibility * response * satisfies * sensitive * separately * separator * specifying * string * successful * synchronization * syncing * šenfeld * take * temporarily * testcontents * that * the * themselves * throttling * timeout * transaction * transferred * unnecessary * using * webbrowser * which * with * workspace Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
This commit is contained in:
parent
0008cb4934
commit
ce3b65e6dc
@ -77,7 +77,7 @@ Make sure you
|
|||||||
* Add [documentation](#writing-documentation) for a new feature.
|
* Add [documentation](#writing-documentation) for a new feature.
|
||||||
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
* [Commit your changes](#committing-your-changes) using the [message guideline](#commit-messages).
|
||||||
|
|
||||||
When you are done with that push your changes to Github:
|
When you are done with that push your changes to GitHub:
|
||||||
|
|
||||||
git push -u origin my-new-feature
|
git push -u origin my-new-feature
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ Your changes will then get reviewed and you might get asked to fix some stuff. I
|
|||||||
|
|
||||||
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
You may sometimes be asked to [base your changes on the latest master](#basing-your-changes-on-the-latest-master) or [squash your commits](#squashing-your-commits).
|
||||||
|
|
||||||
## Using Git and Github ##
|
## Using Git and GitHub ##
|
||||||
|
|
||||||
### Committing your changes ###
|
### Committing your changes ###
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ type GetFileInfoRequest struct {
|
|||||||
// If the original source of the file being uploaded has a last
|
// If the original source of the file being uploaded has a last
|
||||||
// modified time concept, Backblaze recommends using
|
// modified time concept, Backblaze recommends using
|
||||||
// src_last_modified_millis as the name, and a string holding the base
|
// src_last_modified_millis as the name, and a string holding the base
|
||||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
// 10 number of milliseconds since midnight, January 1, 1970
|
||||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||||
// programming language Java. It is intended to be compatible with
|
// programming language Java. It is intended to be compatible with
|
||||||
// Java's time long. For example, it can be passed directly into the
|
// Java's time long. For example, it can be passed directly into the
|
||||||
|
@ -14,7 +14,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC3339 + `"`
|
timeFormat = `"` + time.RFC3339 + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents represents date and time information for the
|
// Time represents date and time information for the
|
||||||
// box API, by using RFC3339
|
// box API, by using RFC3339
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ import (
|
|||||||
// length of 13 decimals it makes a 7-digit base-36 number.
|
// length of 13 decimals it makes a 7-digit base-36 number.
|
||||||
//
|
//
|
||||||
// When transactions is set to the norename style, data chunks will
|
// When transactions is set to the norename style, data chunks will
|
||||||
// keep their temporary chunk names (with the transacion identifier
|
// keep their temporary chunk names (with the transaction identifier
|
||||||
// suffix). To distinguish them from temporary chunks, the txn field
|
// suffix). To distinguish them from temporary chunks, the txn field
|
||||||
// of the metadata file is set to match the transaction identifier of
|
// of the metadata file is set to match the transaction identifier of
|
||||||
// the data chunks.
|
// the data chunks.
|
||||||
@ -1079,7 +1079,7 @@ func (o *Object) readMetadata(ctx context.Context) error {
|
|||||||
|
|
||||||
// readXactID returns the transaction ID stored in the passed metadata object
|
// readXactID returns the transaction ID stored in the passed metadata object
|
||||||
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
func (o *Object) readXactID(ctx context.Context) (xactID string, err error) {
|
||||||
// if xactID has already been read and cahced return it now
|
// if xactID has already been read and cached return it now
|
||||||
if o.xIDCached {
|
if o.xIDCached {
|
||||||
return o.xactID, nil
|
return o.xactID, nil
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// Package combine implents a backend to combine multipe remotes in a directory tree
|
// Package combine implents a backend to combine multiple remotes in a directory tree
|
||||||
package combine
|
package combine
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -90,7 +90,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
|||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||||
are doing.
|
are doing.
|
||||||
Level 0 turns off compression.`,
|
Level 0 turns off compression.`,
|
||||||
Default: sgzip.DefaultCompression,
|
Default: sgzip.DefaultCompression,
|
||||||
@ -130,7 +130,7 @@ type Fs struct {
|
|||||||
features *fs.Features // optional features
|
features *fs.Features // optional features
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFs contstructs an Fs from the path, container:path
|
// NewFs constructs an Fs from the path, container:path
|
||||||
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||||
// Parse config into Options struct
|
// Parse config into Options struct
|
||||||
opt := new(Options)
|
opt := new(Options)
|
||||||
@ -451,7 +451,7 @@ func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, mod
|
|||||||
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Need to include what we allready read
|
// Need to include what we already read
|
||||||
in = &ReadCloserWrapper{
|
in = &ReadCloserWrapper{
|
||||||
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
Reader: io.MultiReader(bytes.NewReader(buf), in),
|
||||||
Closer: in,
|
Closer: in,
|
||||||
@ -731,7 +731,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If our new object is compressed we have to rename it with the correct size.
|
// If our new object is compressed we have to rename it with the correct size.
|
||||||
// Uncompressed objects don't store the size in the name so we they'll allready have the correct name.
|
// Uncompressed objects don't store the size in the name so we they'll already have the correct name.
|
||||||
if compressible {
|
if compressible {
|
||||||
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -742,7 +742,7 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
|
|||||||
return newObj, nil
|
return newObj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporarely disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
// Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects
|
||||||
// will break stuff. Right no I can't think of a way to make this work.
|
// will break stuff. Right no I can't think of a way to make this work.
|
||||||
|
|
||||||
// PutUnchecked uploads the object
|
// PutUnchecked uploads the object
|
||||||
|
@ -125,7 +125,7 @@ names, or for debugging purposes.`,
|
|||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
This option could help with shortening the encrypted filename. The
|
||||||
suitable option would depend on the way your remote count the filename
|
suitable option would depend on the way your remote count the filename
|
||||||
length and if it's case sensitve.`,
|
length and if it's case sensitive.`,
|
||||||
Default: "base32",
|
Default: "base32",
|
||||||
Examples: []fs.OptionExample{
|
Examples: []fs.OptionExample{
|
||||||
{
|
{
|
||||||
|
@ -3305,7 +3305,7 @@ drives found and a combined drive.
|
|||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal charactes will be
|
be accessible with the aliases shown. Any illegal characters will be
|
||||||
substituted with "_" and duplicate names will have numbers suffixed.
|
substituted with "_" and duplicate names will have numbers suffixed.
|
||||||
It will also add a remote called AllDrives which shows all the shared
|
It will also add a remote called AllDrives which shows all the shared
|
||||||
drives combined into one directory tree.
|
drives combined into one directory tree.
|
||||||
|
@ -309,7 +309,7 @@ func (b *batcher) Shutdown() {
|
|||||||
}
|
}
|
||||||
b.shutOnce.Do(func() {
|
b.shutOnce.Do(func() {
|
||||||
atexit.Unregister(b.atexit)
|
atexit.Unregister(b.atexit)
|
||||||
fs.Infof(b.f, "Commiting uploads - please wait...")
|
fs.Infof(b.f, "Committing uploads - please wait...")
|
||||||
// show that batcher is shutting down
|
// show that batcher is shutting down
|
||||||
close(b.closed)
|
close(b.closed)
|
||||||
// quit the commitLoop by sending a quitRequest message
|
// quit the commitLoop by sending a quitRequest message
|
||||||
|
@ -268,7 +268,7 @@ default based on the batch_mode in use.
|
|||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
Name: "batch_commit_timeout",
|
Name: "batch_commit_timeout",
|
||||||
Help: `Max time to wait for a batch to finish comitting`,
|
Help: `Max time to wait for a batch to finish committing`,
|
||||||
Default: fs.Duration(10 * time.Minute),
|
Default: fs.Duration(10 * time.Minute),
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
}, {
|
||||||
@ -1669,7 +1669,7 @@ func (o *Object) uploadChunked(ctx context.Context, in0 io.Reader, commitInfo *f
|
|||||||
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
correctOffset := uErr.EndpointError.IncorrectOffset.CorrectOffset
|
||||||
delta := int64(correctOffset) - int64(cursor.Offset)
|
delta := int64(correctOffset) - int64(cursor.Offset)
|
||||||
skip += delta
|
skip += delta
|
||||||
what := fmt.Sprintf("incorrect offset error receved: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
what := fmt.Sprintf("incorrect offset error received: sent %d, need %d, skip %d", cursor.Offset, correctOffset, skip)
|
||||||
if skip < 0 {
|
if skip < 0 {
|
||||||
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
return false, fmt.Errorf("can't seek backwards to correct offset: %s", what)
|
||||||
} else if skip == chunkSize {
|
} else if skip == chunkSize {
|
||||||
|
@ -84,7 +84,7 @@ type CopyFileResponse struct {
|
|||||||
URLs []FileCopy `json:"urls"`
|
URLs []FileCopy `json:"urls"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileCopy is used in the the CopyFileResponse
|
// FileCopy is used in the CopyFileResponse
|
||||||
type FileCopy struct {
|
type FileCopy struct {
|
||||||
FromURL string `json:"from_url"`
|
FromURL string `json:"from_url"`
|
||||||
ToURL string `json:"to_url"`
|
ToURL string `json:"to_url"`
|
||||||
|
@ -19,7 +19,7 @@ const (
|
|||||||
timeFormatJSON = `"` + timeFormatParameters + `"`
|
timeFormatJSON = `"` + timeFormatParameters + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents represents date and time information for the
|
// Time represents date and time information for the
|
||||||
// filefabric API
|
// filefabric API
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
@ -95,7 +95,7 @@ type Status struct {
|
|||||||
// Warning string `json:"warning"` // obsolete
|
// Warning string `json:"warning"` // obsolete
|
||||||
}
|
}
|
||||||
|
|
||||||
// Status statisfies the error interface
|
// Status satisfies the error interface
|
||||||
func (e *Status) Error() string {
|
func (e *Status) Error() string {
|
||||||
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
return fmt.Sprintf("%s (%s)", e.Message, e.Code)
|
||||||
}
|
}
|
||||||
|
@ -843,7 +843,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
return f.purgeCheck(ctx, dir, false)
|
return f.purgeCheck(ctx, dir, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the the background task to complete if necessary
|
// Wait for the background task to complete if necessary
|
||||||
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
func (f *Fs) waitForBackgroundTask(ctx context.Context, taskID api.String) (err error) {
|
||||||
if taskID == "" || taskID == "0" {
|
if taskID == "" || taskID == "0" {
|
||||||
// No task to wait for
|
// No task to wait for
|
||||||
|
@ -311,7 +311,7 @@ rclone does if you know the bucket exists already.
|
|||||||
Help: `If set this will decompress gzip encoded objects.
|
Help: `If set this will decompress gzip encoded objects.
|
||||||
|
|
||||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||||
set. Normally rclone will download these files files as compressed objects.
|
set. Normally rclone will download these files as compressed objects.
|
||||||
|
|
||||||
If this flag is set then rclone will decompress these files with
|
If this flag is set then rclone will decompress these files with
|
||||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||||
|
@ -330,7 +330,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, transaction)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not allow the root-prefix to be non-existent nor a directory,
|
// Do not allow the root-prefix to be nonexistent nor a directory,
|
||||||
// but it can be empty.
|
// but it can be empty.
|
||||||
if f.opt.RootPrefix != "" {
|
if f.opt.RootPrefix != "" {
|
||||||
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
item, err := f.fetchMetadataForPath(ctx, f.opt.RootPrefix, api.HiDriveObjectNoMetadataFields)
|
||||||
@ -623,7 +623,7 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
|||||||
// should be retried after the parent-directories of the destination have been created.
|
// should be retried after the parent-directories of the destination have been created.
|
||||||
// If so, it will create the parent-directories.
|
// If so, it will create the parent-directories.
|
||||||
//
|
//
|
||||||
// If any errors arrise while finding the source or
|
// If any errors arise while finding the source or
|
||||||
// creating the parent-directory those will be returned.
|
// creating the parent-directory those will be returned.
|
||||||
// Otherwise returns the originalError.
|
// Otherwise returns the originalError.
|
||||||
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
func (f *Fs) shouldRetryAndCreateParents(ctx context.Context, destinationPath string, sourcePath string, originalError error) (bool, error) {
|
||||||
@ -961,7 +961,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
} else {
|
} else {
|
||||||
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
_, _, err = o.fs.uploadFileChunked(ctx, resolvedPath, in, modTime, int(o.fs.opt.UploadChunkSize), o.fs.opt.UploadConcurrency)
|
||||||
}
|
}
|
||||||
// Try to check if object was updated, eitherway.
|
// Try to check if object was updated, either way.
|
||||||
// Metadata should be updated even if the upload fails.
|
// Metadata should be updated even if the upload fails.
|
||||||
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
info, metaErr = o.fs.fetchMetadataForPath(ctx, resolvedPath, api.HiDriveObjectWithMetadataFields)
|
||||||
} else {
|
} else {
|
||||||
|
@ -138,7 +138,7 @@ var testTable = []struct {
|
|||||||
// pattern describes how to use data to construct the hash-input.
|
// pattern describes how to use data to construct the hash-input.
|
||||||
// For every entry n at even indices this repeats the data n times.
|
// For every entry n at even indices this repeats the data n times.
|
||||||
// For every entry m at odd indices this repeats a null-byte m times.
|
// For every entry m at odd indices this repeats a null-byte m times.
|
||||||
// The input-data is constructed by concatinating the results in order.
|
// The input-data is constructed by concatenating the results in order.
|
||||||
pattern []int64
|
pattern []int64
|
||||||
out []byte
|
out []byte
|
||||||
name string
|
name string
|
||||||
|
@ -227,7 +227,7 @@ type Object struct {
|
|||||||
rawData json.RawMessage
|
rawData json.RawMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// IAFile reprensents a subset of object in MetadataResponse.Files
|
// IAFile represents a subset of object in MetadataResponse.Files
|
||||||
type IAFile struct {
|
type IAFile struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// Source string `json:"source"`
|
// Source string `json:"source"`
|
||||||
@ -243,7 +243,7 @@ type IAFile struct {
|
|||||||
rawData json.RawMessage
|
rawData json.RawMessage
|
||||||
}
|
}
|
||||||
|
|
||||||
// MetadataResponse reprensents subset of the JSON object returned by (frontend)/metadata/
|
// MetadataResponse represents subset of the JSON object returned by (frontend)/metadata/
|
||||||
type MetadataResponse struct {
|
type MetadataResponse struct {
|
||||||
Files []IAFile `json:"files"`
|
Files []IAFile `json:"files"`
|
||||||
ItemSize int64 `json:"item_size"`
|
ItemSize int64 `json:"item_size"`
|
||||||
@ -1273,7 +1273,7 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
|||||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// mimicks urllib.parse.quote() on Python; exclude / from url.PathEscape
|
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||||
func quotePath(s string) string {
|
func quotePath(s string) string {
|
||||||
seg := strings.Split(s, "/")
|
seg := strings.Split(s, "/")
|
||||||
newValues := []string{}
|
newValues := []string{}
|
||||||
|
@ -1418,7 +1418,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
}
|
}
|
||||||
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
info, err := f.copyOrMove(ctx, "cp", srcObj.filePath(), remote)
|
||||||
|
|
||||||
// if destination was a trashed file then after a successfull copy the copied file is still in trash (bug in api?)
|
// if destination was a trashed file then after a successful copy the copied file is still in trash (bug in api?)
|
||||||
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
if err == nil && bool(info.Deleted) && !f.opt.TrashedOnly && info.State == "COMPLETED" {
|
||||||
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
fs.Debugf(src, "Server-side copied to trashed destination, restoring")
|
||||||
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
info, err = f.createOrUpdate(ctx, remote, srcObj.modTime, srcObj.size, srcObj.md5)
|
||||||
|
@ -668,7 +668,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
|||||||
//
|
//
|
||||||
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
// https://app.koofr.net/content/links/39a6cc01-3b23-477a-8059-c0fb3b0f15de/files/get?path=%2F
|
||||||
//
|
//
|
||||||
// I am not sure about meaning of "path" parameter; in my expriments
|
// I am not sure about meaning of "path" parameter; in my experiments
|
||||||
// it is always "%2F", and omitting it or putting any other value
|
// it is always "%2F", and omitting it or putting any other value
|
||||||
// results in 404.
|
// results in 404.
|
||||||
//
|
//
|
||||||
|
@ -192,7 +192,7 @@ func TestHashOnUpdate(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
assert.Equal(t, "9a0364b9e99bb480dd25e1f0284c8555", md5)
|
||||||
|
|
||||||
// Reupload it with diferent contents but same size and timestamp
|
// Reupload it with different contents but same size and timestamp
|
||||||
var b = bytes.NewBufferString("CONTENT")
|
var b = bytes.NewBufferString("CONTENT")
|
||||||
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
src := object.NewStaticObjectInfo(filePath, when, int64(b.Len()), true, nil, f)
|
||||||
err = o.Update(ctx, b, src)
|
err = o.Update(ctx, b, src)
|
||||||
|
@ -9,7 +9,7 @@ import (
|
|||||||
|
|
||||||
const haveSetBTime = false
|
const haveSetBTime = false
|
||||||
|
|
||||||
// setBTime changes the the birth time of the file passed in
|
// setBTime changes the birth time of the file passed in
|
||||||
func setBTime(name string, btime time.Time) error {
|
func setBTime(name string, btime time.Time) error {
|
||||||
// Does nothing
|
// Does nothing
|
||||||
return nil
|
return nil
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
const haveSetBTime = true
|
const haveSetBTime = true
|
||||||
|
|
||||||
// setBTime sets the the birth time of the file passed in
|
// setBTime sets the birth time of the file passed in
|
||||||
func setBTime(name string, btime time.Time) (err error) {
|
func setBTime(name string, btime time.Time) (err error) {
|
||||||
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
h, err := syscall.Open(name, os.O_RDWR, 0755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -347,7 +347,7 @@ func (f *Fs) mkdir(ctx context.Context, rootNode *mega.Node, dir string) (node *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("internal error: mkdir called with non-existent root node: %w", err)
|
return nil, fmt.Errorf("internal error: mkdir called with nonexistent root node: %w", err)
|
||||||
}
|
}
|
||||||
// i is number of directories to create (may be 0)
|
// i is number of directories to create (may be 0)
|
||||||
// node is directory to create them from
|
// node is directory to create them from
|
||||||
@ -387,7 +387,7 @@ func (f *Fs) findRoot(ctx context.Context, create bool) (*mega.Node, error) {
|
|||||||
return f._rootNode, nil
|
return f._rootNode, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for pre-existing root
|
// Check for preexisting root
|
||||||
absRoot := f.srv.FS.GetRoot()
|
absRoot := f.srv.FS.GetRoot()
|
||||||
node, err := f.findDir(absRoot, f.root)
|
node, err := f.findDir(absRoot, f.root)
|
||||||
//log.Printf("findRoot findDir %p %v", node, err)
|
//log.Printf("findRoot findDir %p %v", node, err)
|
||||||
|
@ -118,7 +118,7 @@ type Fs struct {
|
|||||||
filetype string // dir, file or symlink
|
filetype string // dir, file or symlink
|
||||||
dirscreated map[string]bool // if implicit dir has been created already
|
dirscreated map[string]bool // if implicit dir has been created already
|
||||||
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
dirscreatedMutex sync.Mutex // mutex to protect dirscreated
|
||||||
statcache map[string][]File // cache successfull stat requests
|
statcache map[string][]File // cache successful stat requests
|
||||||
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
statcacheMutex sync.RWMutex // RWMutex to protect statcache
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,7 +424,7 @@ func (f *Fs) getFileName(file *File) string {
|
|||||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||||
if f.filetype == "" {
|
if f.filetype == "" {
|
||||||
// This happens in two scenarios.
|
// This happens in two scenarios.
|
||||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||||
err := f.initFs(ctx, dir)
|
err := f.initFs(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -488,7 +488,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||||
if f.filetype == "" {
|
if f.filetype == "" {
|
||||||
// This happens in two scenarios.
|
// This happens in two scenarios.
|
||||||
// 1. NewFs is done on a non-existent object, then later rclone attempts to List/ListR this NewFs.
|
// 1. NewFs is done on a nonexistent object, then later rclone attempts to List/ListR this NewFs.
|
||||||
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
// 2. List/ListR is called from the context of test_all and not the regular rclone binary.
|
||||||
err := f.initFs(ctx, dir)
|
err := f.initFs(ctx, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -70,7 +70,7 @@ type Drive struct {
|
|||||||
Quota Quota `json:"quota"`
|
Quota Quota `json:"quota"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timestamp represents represents date and time information for the
|
// Timestamp represents date and time information for the
|
||||||
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
// OneDrive API, by using ISO 8601 and is always in UTC time.
|
||||||
type Timestamp time.Time
|
type Timestamp time.Time
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ const (
|
|||||||
timeFormat = `"` + time.RFC1123Z + `"`
|
timeFormat = `"` + time.RFC1123Z + `"`
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents represents date and time information for the
|
// Time represents date and time information for the
|
||||||
// pcloud API, by using RFC1123Z
|
// pcloud API, by using RFC1123Z
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
|
@ -2009,7 +2009,7 @@ See [the time option docs](/docs/#time-option) for valid formats.
|
|||||||
Help: `If set this will decompress gzip encoded objects.
|
Help: `If set this will decompress gzip encoded objects.
|
||||||
|
|
||||||
It is possible to upload objects to S3 with "Content-Encoding: gzip"
|
It is possible to upload objects to S3 with "Content-Encoding: gzip"
|
||||||
set. Normally rclone will download these files files as compressed objects.
|
set. Normally rclone will download these files as compressed objects.
|
||||||
|
|
||||||
If this flag is set then rclone will decompress these files with
|
If this flag is set then rclone will decompress these files with
|
||||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||||
@ -5199,7 +5199,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
var head s3.HeadObjectOutput
|
var head s3.HeadObjectOutput
|
||||||
//structs.SetFrom(&head, &req)
|
//structs.SetFrom(&head, &req)
|
||||||
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
setFrom_s3HeadObjectOutput_s3PutObjectInput(&head, &req)
|
||||||
head.ETag = &md5sumHex // doesn't matter quotes are misssing
|
head.ETag = &md5sumHex // doesn't matter quotes are missing
|
||||||
head.ContentLength = &size
|
head.ContentLength = &size
|
||||||
// If we have done a single part PUT request then we can read these
|
// If we have done a single part PUT request then we can read these
|
||||||
if gotEtag != "" {
|
if gotEtag != "" {
|
||||||
|
@ -78,7 +78,7 @@ func (f *Fs) InternalTestMetadata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("GzipEncoding", func(t *testing.T) {
|
t.Run("GzipEncoding", func(t *testing.T) {
|
||||||
// Test that the gziped file we uploaded can be
|
// Test that the gzipped file we uploaded can be
|
||||||
// downloaded with and without decompression
|
// downloaded with and without decompression
|
||||||
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
checkDownload := func(wantContents string, wantSize int64, wantHash string) {
|
||||||
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
gotContents := fstests.ReadObject(ctx, t, o, -1)
|
||||||
@ -116,7 +116,7 @@ func (f *Fs) InternalTestNoHead(t *testing.T) {
|
|||||||
defer func() {
|
defer func() {
|
||||||
assert.NoError(t, obj.Remove(ctx))
|
assert.NoError(t, obj.Remove(ctx))
|
||||||
}()
|
}()
|
||||||
// PutTestcontests checks the received object
|
// PutTestcontents checks the received object
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@ func TestInternalUrlEncode(t *testing.T) {
|
|||||||
want string
|
want string
|
||||||
}{
|
}{
|
||||||
{"", ""},
|
{"", ""},
|
||||||
{"abcdefghijklmopqrstuvwxyz", "abcdefghijklmopqrstuvwxyz"},
|
{"abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
|
||||||
{"ABCDEFGHIJKLMOPQRSTUVWXYZ", "ABCDEFGHIJKLMOPQRSTUVWXYZ"},
|
{"ABCDEFGHIJKLMNOPQRSTUVWXYZ", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"},
|
||||||
{"0123456789", "0123456789"},
|
{"0123456789", "0123456789"},
|
||||||
{"abc/ABC/123", "abc/ABC/123"},
|
{"abc/ABC/123", "abc/ABC/123"},
|
||||||
{" ", "%20%20%20"},
|
{" ", "%20%20%20"},
|
||||||
|
@ -80,7 +80,7 @@ type UploadInfo struct {
|
|||||||
} `json:"data"`
|
} `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadResponse is the respnse to a successful upload
|
// UploadResponse is the response to a successful upload
|
||||||
type UploadResponse struct {
|
type UploadResponse struct {
|
||||||
Files []struct {
|
Files []struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
@ -163,7 +163,7 @@ func (f *Fs) splitPathFull(pth string) (string, string) {
|
|||||||
return "//" + fullPath[:i], fullPath[i+1:]
|
return "//" + fullPath[:i], fullPath[i+1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// splitPath is modified splitPath version that doesn't include the seperator
|
// splitPath is modified splitPath version that doesn't include the separator
|
||||||
// in the base path
|
// in the base path
|
||||||
func (f *Fs) splitPath(pth string) (string, string) {
|
func (f *Fs) splitPath(pth string) (string, string) {
|
||||||
// chop of any leading or trailing '/'
|
// chop of any leading or trailing '/'
|
||||||
@ -479,7 +479,7 @@ func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size
|
|||||||
} else if size == 0 {
|
} else if size == 0 {
|
||||||
return nil, fs.ErrorCantUploadEmptyFiles
|
return nil, fs.ErrorCantUploadEmptyFiles
|
||||||
}
|
}
|
||||||
// yes it does take take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
// yes it does take 4 requests if we're uploading to root and 6+ if we're uploading to any subdir :(
|
||||||
|
|
||||||
// create upload request
|
// create upload request
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -757,7 +757,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dirmove: source not found: %w", err)
|
return fmt.Errorf("dirmove: source not found: %w", err)
|
||||||
}
|
}
|
||||||
// check if the destination allready exists
|
// check if the destination already exists
|
||||||
dstPath := f.dirPath(dstRemote)
|
dstPath := f.dirPath(dstRemote)
|
||||||
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
|
_, err = f.readMetaDataForPath(ctx, dstPath, &api.MetadataRequestOptions{Limit: 1})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -782,7 +782,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
needMove := srcBase != dstBase
|
needMove := srcBase != dstBase
|
||||||
|
|
||||||
// if we have to rename we'll have to use a temporary name since
|
// if we have to rename we'll have to use a temporary name since
|
||||||
// there could allready be a directory with the same name as the src directory
|
// there could already be a directory with the same name as the src directory
|
||||||
if needRename {
|
if needRename {
|
||||||
// rename to a temporary name
|
// rename to a temporary name
|
||||||
tmpName := "rcloneTemp" + random.String(8)
|
tmpName := "rcloneTemp" + random.String(8)
|
||||||
|
@ -6,7 +6,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Time represents represents date and time information for Zoho
|
// Time represents date and time information for Zoho
|
||||||
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
|
// Zoho uses milliseconds since unix epoch (Java currentTimeMillis)
|
||||||
type Time time.Time
|
type Time time.Time
|
||||||
|
|
||||||
|
@ -150,8 +150,8 @@ func init() {
|
|||||||
return workspace.ID, workspace.Attributes.Name
|
return workspace.ID, workspace.Attributes.Name
|
||||||
})
|
})
|
||||||
case "workspace_end":
|
case "workspace_end":
|
||||||
worksspaceID := config.Result
|
workspaceID := config.Result
|
||||||
m.Set(configRootID, worksspaceID)
|
m.Set(configRootID, workspaceID)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("unknown state %q", config.State)
|
return nil, fmt.Errorf("unknown state %q", config.State)
|
||||||
@ -1264,7 +1264,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// upload was successfull, need to delete old object before rename
|
// upload was successful, need to delete old object before rename
|
||||||
if err = o.Remove(ctx); err != nil {
|
if err = o.Remove(ctx); err != nil {
|
||||||
return fmt.Errorf("failed to remove old object: %w", err)
|
return fmt.Errorf("failed to remove old object: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -290,7 +290,7 @@ func (b *bisyncRun) applyDeltas(ctx context.Context, ds1, ds2 *deltaSet) (change
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// exccessDeletes checks whether number of deletes is within allowed range
|
// excessDeletes checks whether number of deletes is within allowed range
|
||||||
func (ds *deltaSet) excessDeletes() bool {
|
func (ds *deltaSet) excessDeletes() bool {
|
||||||
maxDelete := ds.opt.MaxDelete
|
maxDelete := ds.opt.MaxDelete
|
||||||
maxRatio := float64(maxDelete) / 100.0
|
maxRatio := float64(maxDelete) / 100.0
|
||||||
|
@ -15,7 +15,7 @@ func makeHelp(help string) string {
|
|||||||
return replacer.Replace(help)
|
return replacer.Replace(help)
|
||||||
}
|
}
|
||||||
|
|
||||||
var shortHelp = `Perform bidirectonal synchronization between two paths.`
|
var shortHelp = `Perform bidirectional synchronization between two paths.`
|
||||||
|
|
||||||
var rcHelp = makeHelp(`This takes the following parameters
|
var rcHelp = makeHelp(`This takes the following parameters
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ func handleDefaultMountpath() (string, error) {
|
|||||||
func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
func handleNetworkShareMountpath(mountpath string, opt *mountlib.Options) (string, error) {
|
||||||
// Assuming mount path is a valid network share path (UNC format, "\\Server\Share").
|
// Assuming mount path is a valid network share path (UNC format, "\\Server\Share").
|
||||||
// Always mount as network drive, regardless of the NetworkMode option.
|
// Always mount as network drive, regardless of the NetworkMode option.
|
||||||
// Find an unused drive letter to use as mountpoint, the the supplied path can
|
// Find an unused drive letter to use as mountpoint, the supplied path can
|
||||||
// be used as volume prefix (network share path) instead of mountpoint.
|
// be used as volume prefix (network share path) instead of mountpoint.
|
||||||
if !opt.NetworkMode {
|
if !opt.NetworkMode {
|
||||||
fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format")
|
fs.Debugf(nil, "Forcing --network-mode because mountpoint path is network share UNC format")
|
||||||
|
@ -140,7 +140,7 @@ are 100% certain you are already passing obscured passwords then use
|
|||||||
|rclone config password| command.
|
|rclone config password| command.
|
||||||
|
|
||||||
The flag |--non-interactive| is for use by applications that wish to
|
The flag |--non-interactive| is for use by applications that wish to
|
||||||
configure rclone themeselves, rather than using rclone's text based
|
configure rclone themselves, rather than using rclone's text based
|
||||||
configuration questions. If this flag is set, and rclone needs to ask
|
configuration questions. If this flag is set, and rclone needs to ask
|
||||||
the user a question, a JSON blob will be returned with the question in
|
the user a question, a JSON blob will be returned with the question in
|
||||||
it.
|
it.
|
||||||
|
@ -99,7 +99,7 @@ For the MD5 and SHA1 algorithms there are also dedicated commands,
|
|||||||
|
|
||||||
This command can also hash data received on standard input (stdin),
|
This command can also hash data received on standard input (stdin),
|
||||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||||
when there is data to read (if not, the hypen will be treated literaly,
|
when there is data to read (if not, the hyphen will be treated literally,
|
||||||
as a relative path).
|
as a relative path).
|
||||||
|
|
||||||
Run without a hash to see the list of all supported hashes, e.g.
|
Run without a hash to see the list of all supported hashes, e.g.
|
||||||
|
@ -343,7 +343,7 @@ func showBackend(name string) {
|
|||||||
defaultValue := opt.GetValue()
|
defaultValue := opt.GetValue()
|
||||||
// Default value and Required are related: Required means option must
|
// Default value and Required are related: Required means option must
|
||||||
// have a value, but if there is a default then a value does not have
|
// have a value, but if there is a default then a value does not have
|
||||||
// to be explicitely set and then Required makes no difference.
|
// to be explicitly set and then Required makes no difference.
|
||||||
if defaultValue != "" {
|
if defaultValue != "" {
|
||||||
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
fmt.Printf("- Default: %s\n", quoteString(defaultValue))
|
||||||
} else {
|
} else {
|
||||||
|
@ -26,7 +26,7 @@ Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the re
|
|||||||
|
|
||||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
|
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
|
||||||
|
|
||||||
Listing a non-existent directory will produce an error except for
|
Listing a nonexistent directory will produce an error except for
|
||||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||||
the bucket-based remotes).
|
the bucket-based remotes).
|
||||||
`, "|", "`")
|
`, "|", "`")
|
||||||
|
@ -84,7 +84,7 @@ If ` + "`--files-only`" + ` is not specified directories in addition to the file
|
|||||||
will be returned.
|
will be returned.
|
||||||
|
|
||||||
If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned.
|
If ` + "`--metadata`" + ` is set then an additional Metadata key will be returned.
|
||||||
This will have metdata in rclone standard format as a JSON object.
|
This will have metadata in rclone standard format as a JSON object.
|
||||||
|
|
||||||
if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the
|
if ` + "`--stat`" + ` is set then a single JSON blob will be returned about the
|
||||||
item pointed to. This will return an error if the item isn't found.
|
item pointed to. This will return an error if the item isn't found.
|
||||||
|
@ -35,7 +35,7 @@ to running ` + "`rclone hashsum MD5 remote:path`" + `.
|
|||||||
|
|
||||||
This command can also hash data received on standard input (stdin),
|
This command can also hash data received on standard input (stdin),
|
||||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||||
when there is data to read (if not, the hypen will be treated literaly,
|
when there is data to read (if not, the hyphen will be treated literally,
|
||||||
as a relative path).
|
as a relative path).
|
||||||
`,
|
`,
|
||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
|
@ -88,7 +88,7 @@ and experience unexpected program errors, freezes or other issues, consider moun
|
|||||||
as a network drive instead.
|
as a network drive instead.
|
||||||
|
|
||||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||||
or to a path representing a **non-existent** subdirectory of an **existing** parent
|
or to a path representing a **nonexistent** subdirectory of an **existing** parent
|
||||||
directory or drive. Using the special value |*| will tell rclone to
|
directory or drive. Using the special value |*| will tell rclone to
|
||||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||||
Examples:
|
Examples:
|
||||||
@ -119,7 +119,7 @@ the mapped drive, shown in Windows Explorer etc, while the complete
|
|||||||
|\\server\share| will be reported as the remote UNC path by
|
|\\server\share| will be reported as the remote UNC path by
|
||||||
|net use| etc, just like a normal network drive mapping.
|
|net use| etc, just like a normal network drive mapping.
|
||||||
|
|
||||||
If you specify a full network share UNC path with |--volname|, this will implicitely
|
If you specify a full network share UNC path with |--volname|, this will implicitly
|
||||||
set the |--network-mode| option, so the following two examples have same result:
|
set the |--network-mode| option, so the following two examples have same result:
|
||||||
|
|
||||||
rclone @ remote:path/to/files X: --network-mode
|
rclone @ remote:path/to/files X: --network-mode
|
||||||
@ -128,7 +128,7 @@ set the |--network-mode| option, so the following two examples have same result:
|
|||||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||||
will automatically assign a drive letter, same as with |*| and use that as
|
will automatically assign a drive letter, same as with |*| and use that as
|
||||||
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
mountpoint, and instead use the UNC path specified as the volume name, as if it were
|
||||||
specified with the |--volname| option. This will also implicitely set
|
specified with the |--volname| option. This will also implicitly set
|
||||||
the |--network-mode| option. This means the following two examples have same result:
|
the |--network-mode| option. This means the following two examples have same result:
|
||||||
|
|
||||||
rclone @ remote:path/to/files \\cloud\remote
|
rclone @ remote:path/to/files \\cloud\remote
|
||||||
@ -164,7 +164,7 @@ The permissions on each entry will be set according to [options](#options)
|
|||||||
|
|
||||||
The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|,
|
The default permissions corresponds to |--file-perms 0666 --dir-perms 0777|,
|
||||||
i.e. read and write permissions to everyone. This means you will not be able
|
i.e. read and write permissions to everyone. This means you will not be able
|
||||||
to start any programs from the the mount. To be able to do that you must add
|
to start any programs from the mount. To be able to do that you must add
|
||||||
execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it
|
execute permissions, e.g. |--file-perms 0777 --dir-perms 0777| to add it
|
||||||
to everyone. If the program needs to write files, chances are you will have
|
to everyone. If the program needs to write files, chances are you will have
|
||||||
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
to enable [VFS File Caching](#vfs-file-caching) as well (see also [limitations](#limitations)).
|
||||||
|
@ -238,7 +238,7 @@ func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = m.CheckAllowings(); err != nil {
|
if err = m.CheckAllowed(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m.SetVolumeName(m.MountOpt.VolumeName)
|
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||||
|
@ -62,9 +62,9 @@ func absPath(path string) string {
|
|||||||
return path
|
return path
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckAllowings informs about ignored flags on Windows. If not on Windows
|
// CheckAllowed informs about ignored flags on Windows. If not on Windows
|
||||||
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
// and not --allow-non-empty flag is used, verify that mountpoint is empty.
|
||||||
func (m *MountPoint) CheckAllowings() error {
|
func (m *MountPoint) CheckAllowed() error {
|
||||||
opt := &m.MountOpt
|
opt := &m.MountOpt
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
if opt.AllowNonEmpty {
|
if opt.AllowNonEmpty {
|
||||||
|
@ -48,7 +48,7 @@ press '?' to toggle the help on and off. The supported keys are:
|
|||||||
` + strings.Join(helpText()[1:], "\n ") + `
|
` + strings.Join(helpText()[1:], "\n ") + `
|
||||||
|
|
||||||
Listed files/directories may be prefixed by a one-character flag,
|
Listed files/directories may be prefixed by a one-character flag,
|
||||||
some of them combined with a description in brackes at end of line.
|
some of them combined with a description in brackets at end of line.
|
||||||
These flags have the following meaning:
|
These flags have the following meaning:
|
||||||
|
|
||||||
e means this is an empty directory, i.e. contains no files (but
|
e means this is an empty directory, i.e. contains no files (but
|
||||||
|
@ -25,7 +25,7 @@ func newUnixListener(path string, gid int) (net.Listener, string, error) {
|
|||||||
return nil, "", fmt.Errorf("expected only one socket from systemd, got %d", len(fds))
|
return nil, "", fmt.Errorf("expected only one socket from systemd, got %d", len(fds))
|
||||||
}
|
}
|
||||||
|
|
||||||
// create socket outselves
|
// create socket ourselves
|
||||||
if filepath.Ext(path) == "" {
|
if filepath.Ext(path) == "" {
|
||||||
path += ".sock"
|
path += ".sock"
|
||||||
}
|
}
|
||||||
|
@ -153,7 +153,7 @@ func newServer(ctx context.Context, f fs.Fs, opt *Options) (*server, error) {
|
|||||||
}
|
}
|
||||||
s.useTLS = s.opt.TLSKey != ""
|
s.useTLS = s.opt.TLSKey != ""
|
||||||
|
|
||||||
// Check PassivePorts format since the the server library doesn't!
|
// Check PassivePorts format since the server library doesn't!
|
||||||
if !passivePortsRe.MatchString(opt.PassivePorts) {
|
if !passivePortsRe.MatchString(opt.PassivePorts) {
|
||||||
return nil, fmt.Errorf("invalid format for passive ports %q", opt.PassivePorts)
|
return nil, fmt.Errorf("invalid format for passive ports %q", opt.PassivePorts)
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ to running ` + "`rclone hashsum SHA1 remote:path`" + `.
|
|||||||
|
|
||||||
This command can also hash data received on standard input (stdin),
|
This command can also hash data received on standard input (stdin),
|
||||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||||
when there is data to read (if not, the hypen will be treated literaly,
|
when there is data to read (if not, the hyphen will be treated literally,
|
||||||
as a relative path).
|
as a relative path).
|
||||||
|
|
||||||
This command can also hash data received on STDIN, if not passing
|
This command can also hash data received on STDIN, if not passing
|
||||||
|
@ -144,12 +144,12 @@ func Touch(ctx context.Context, f fs.Fs, remote string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if notCreateNewFile {
|
if notCreateNewFile {
|
||||||
fs.Logf(f, "Not touching non-existent file due to --no-create")
|
fs.Logf(f, "Not touching nonexistent file due to --no-create")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if recursive {
|
if recursive {
|
||||||
// For consistency, --recursive never creates new files.
|
// For consistency, --recursive never creates new files.
|
||||||
fs.Logf(f, "Not touching non-existent file due to --recursive")
|
fs.Logf(f, "Not touching nonexistent file due to --recursive")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if operations.SkipDestructive(ctx, f, "touch (create)") {
|
if operations.SkipDestructive(ctx, f, "touch (create)") {
|
||||||
|
@ -81,7 +81,7 @@ func TestEnvironmentVariables(t *testing.T) {
|
|||||||
// Backend flags and remote name
|
// Backend flags and remote name
|
||||||
// - The listremotes command includes names from environment variables,
|
// - The listremotes command includes names from environment variables,
|
||||||
// the part between "RCLONE_CONFIG_" and "_TYPE", converted to lowercase.
|
// the part between "RCLONE_CONFIG_" and "_TYPE", converted to lowercase.
|
||||||
// - When using using a remote created from env, e.g. with lsd command,
|
// - When using a remote created from env, e.g. with lsd command,
|
||||||
// the name is case insensitive in contrast to remotes in config file
|
// the name is case insensitive in contrast to remotes in config file
|
||||||
// (fs.ConfigToEnv converts to uppercase before checking environment).
|
// (fs.ConfigToEnv converts to uppercase before checking environment).
|
||||||
// - Previously using a remote created from env, e.g. with lsd command,
|
// - Previously using a remote created from env, e.g. with lsd command,
|
||||||
|
@ -323,7 +323,7 @@ Most of these events come up due to a error status from an internal call.
|
|||||||
On such a critical error the `{...}.path1.lst` and `{...}.path2.lst`
|
On such a critical error the `{...}.path1.lst` and `{...}.path2.lst`
|
||||||
listing files are renamed to extension `.lst-err`, which blocks any future
|
listing files are renamed to extension `.lst-err`, which blocks any future
|
||||||
bisync runs (since the normal `.lst` files are not found).
|
bisync runs (since the normal `.lst` files are not found).
|
||||||
Bisync keeps them under `bisync` subdirectory of the rclone cache direcory,
|
Bisync keeps them under `bisync` subdirectory of the rclone cache directory,
|
||||||
typically at `${HOME}/.cache/rclone/bisync/` on Linux.
|
typically at `${HOME}/.cache/rclone/bisync/` on Linux.
|
||||||
|
|
||||||
Some errors are considered temporary and re-running the bisync is not blocked.
|
Some errors are considered temporary and re-running the bisync is not blocked.
|
||||||
@ -421,7 +421,7 @@ don't have spelling case differences (`Smile.jpg` vs. `smile.jpg`).
|
|||||||
## Windows support {#windows}
|
## Windows support {#windows}
|
||||||
|
|
||||||
Bisync has been tested on Windows 8.1, Windows 10 Pro 64-bit and on Windows
|
Bisync has been tested on Windows 8.1, Windows 10 Pro 64-bit and on Windows
|
||||||
Github runners.
|
GitHub runners.
|
||||||
|
|
||||||
Drive letters are allowed, including drive letters mapped to network drives
|
Drive letters are allowed, including drive letters mapped to network drives
|
||||||
(`rclone bisync J:\localsync GDrive:`).
|
(`rclone bisync J:\localsync GDrive:`).
|
||||||
@ -929,7 +929,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
|||||||
synched tree even if there are check file mismatches in the test tree.
|
synched tree even if there are check file mismatches in the test tree.
|
||||||
- Some Dropbox tests can fail, notably printing the following message:
|
- Some Dropbox tests can fail, notably printing the following message:
|
||||||
`src and dst identical but can't set mod time without deleting and re-uploading`
|
`src and dst identical but can't set mod time without deleting and re-uploading`
|
||||||
This is expected and happens due a way Dropbox handles modificaion times.
|
This is expected and happens due a way Dropbox handles modification times.
|
||||||
You should use the `-refresh-times` test flag to make up for this.
|
You should use the `-refresh-times` test flag to make up for this.
|
||||||
- If Dropbox tests hit request limit for you and print error message
|
- If Dropbox tests hit request limit for you and print error message
|
||||||
`too_many_requests/...: Too many requests or write operations.`
|
`too_many_requests/...: Too many requests or write operations.`
|
||||||
@ -939,7 +939,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
|
|||||||
### Updating golden results
|
### Updating golden results
|
||||||
|
|
||||||
Sometimes even a slight change in the bisync source can cause little changes
|
Sometimes even a slight change in the bisync source can cause little changes
|
||||||
spread around many log files. Updating them manually would be a nighmare.
|
spread around many log files. Updating them manually would be a nightmare.
|
||||||
|
|
||||||
The `-golden` flag will store the `test.log` and `*.lst` listings from each
|
The `-golden` flag will store the `test.log` and `*.lst` listings from each
|
||||||
test case into respective golden directories. Golden results will
|
test case into respective golden directories. Golden results will
|
||||||
|
@ -14,7 +14,7 @@ description: "Rclone Changelog"
|
|||||||
* build: Fix android build after GitHub actions change (Nick Craig-Wood)
|
* build: Fix android build after GitHub actions change (Nick Craig-Wood)
|
||||||
* dlna: Fix SOAP action header parsing (Joram Schrijver)
|
* dlna: Fix SOAP action header parsing (Joram Schrijver)
|
||||||
* docs: Fix links to mount command from install docs (albertony)
|
* docs: Fix links to mount command from install docs (albertony)
|
||||||
* dropox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood)
|
* dropbox: Fix ChangeNotify was unable to decrypt errors (Nick Craig-Wood)
|
||||||
* fs: Fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS" (Nick Craig-Wood)
|
* fs: Fix parsing of times and durations of the form "YYYY-MM-DD HH:MM:SS" (Nick Craig-Wood)
|
||||||
* serve sftp: Fix checksum detection (Nick Craig-Wood)
|
* serve sftp: Fix checksum detection (Nick Craig-Wood)
|
||||||
* sync: Add accidentally missed filter-sensitivity to --backup-dir option (Nick Naumann)
|
* sync: Add accidentally missed filter-sensitivity to --backup-dir option (Nick Naumann)
|
||||||
@ -274,7 +274,7 @@ description: "Rclone Changelog"
|
|||||||
* build
|
* build
|
||||||
* Fix ARM architecture version in .deb packages after nfpm change (Nick Craig-Wood)
|
* Fix ARM architecture version in .deb packages after nfpm change (Nick Craig-Wood)
|
||||||
* Hard fork `github.com/jlaffaye/ftp` to fix `go get github.com/rclone/rclone` (Nick Craig-Wood)
|
* Hard fork `github.com/jlaffaye/ftp` to fix `go get github.com/rclone/rclone` (Nick Craig-Wood)
|
||||||
* oauthutil: Fix crash when webrowser requests `/robots.txt` (Nick Craig-Wood)
|
* oauthutil: Fix crash when webbrowser requests `/robots.txt` (Nick Craig-Wood)
|
||||||
* operations: Fix goroutine leak in case of copy retry (Ankur Gupta)
|
* operations: Fix goroutine leak in case of copy retry (Ankur Gupta)
|
||||||
* rc:
|
* rc:
|
||||||
* Fix `operations/publiclink` default for `expires` parameter (Nick Craig-Wood)
|
* Fix `operations/publiclink` default for `expires` parameter (Nick Craig-Wood)
|
||||||
@ -360,7 +360,7 @@ description: "Rclone Changelog"
|
|||||||
* Add rclone to list of supported `md5sum`/`sha1sum` commands to look for (albertony)
|
* Add rclone to list of supported `md5sum`/`sha1sum` commands to look for (albertony)
|
||||||
* Refactor so we only have one way of running remote commands (Nick Craig-Wood)
|
* Refactor so we only have one way of running remote commands (Nick Craig-Wood)
|
||||||
* Fix timeout on hashing large files by sending keepalives (Nick Craig-Wood)
|
* Fix timeout on hashing large files by sending keepalives (Nick Craig-Wood)
|
||||||
* Fix unecessary seeking when uploading and downloading files (Nick Craig-Wood)
|
* Fix unnecessary seeking when uploading and downloading files (Nick Craig-Wood)
|
||||||
* Update docs on how to create `known_hosts` file (Nick Craig-Wood)
|
* Update docs on how to create `known_hosts` file (Nick Craig-Wood)
|
||||||
* Storj
|
* Storj
|
||||||
* Rename tardigrade backend to storj backend (Nick Craig-Wood)
|
* Rename tardigrade backend to storj backend (Nick Craig-Wood)
|
||||||
@ -961,8 +961,8 @@ description: "Rclone Changelog"
|
|||||||
* Add sort by average size in directory (Adam Plánský)
|
* Add sort by average size in directory (Adam Plánský)
|
||||||
* Add toggle option for average s3ize in directory - key 'a' (Adam Plánský)
|
* Add toggle option for average s3ize in directory - key 'a' (Adam Plánský)
|
||||||
* Add empty folder flag into ncdu browser (Adam Plánský)
|
* Add empty folder flag into ncdu browser (Adam Plánský)
|
||||||
* Add `!` (errror) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood)
|
* Add `!` (error) and `.` (unreadable) file flags to go with `e` (empty) (Nick Craig-Wood)
|
||||||
* obscure: Make `rclone osbcure -` ignore newline at end of line (Nick Craig-Wood)
|
* obscure: Make `rclone obscure -` ignore newline at end of line (Nick Craig-Wood)
|
||||||
* operations
|
* operations
|
||||||
* Add logs when need to upload files to set mod times (Nick Craig-Wood)
|
* Add logs when need to upload files to set mod times (Nick Craig-Wood)
|
||||||
* Move and copy log name of the destination object in verbose (Adam Plánský)
|
* Move and copy log name of the destination object in verbose (Adam Plánský)
|
||||||
@ -987,7 +987,7 @@ description: "Rclone Changelog"
|
|||||||
* Make the error count match up in the log message (Nick Craig-Wood)
|
* Make the error count match up in the log message (Nick Craig-Wood)
|
||||||
* move: Fix data loss when source and destination are the same object (Nick Craig-Wood)
|
* move: Fix data loss when source and destination are the same object (Nick Craig-Wood)
|
||||||
* operations
|
* operations
|
||||||
* Fix `--cutof-mode` hard not cutting off immediately (Nick Craig-Wood)
|
* Fix `--cutoff-mode` hard not cutting off immediately (Nick Craig-Wood)
|
||||||
* Fix `--immutable` error message (Nick Craig-Wood)
|
* Fix `--immutable` error message (Nick Craig-Wood)
|
||||||
* sync
|
* sync
|
||||||
* Fix `--cutoff-mode` soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)
|
* Fix `--cutoff-mode` soft & cautious so it doesn't end the transfer early (Nick Craig-Wood)
|
||||||
@ -1035,7 +1035,7 @@ description: "Rclone Changelog"
|
|||||||
* Fixed crash on an empty file name (lluuaapp)
|
* Fixed crash on an empty file name (lluuaapp)
|
||||||
* Box
|
* Box
|
||||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
* Fix finding directories in a case insensitive way (Nick Craig-Wood)
|
||||||
* Chunker
|
* Chunker
|
||||||
* Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)
|
* Skip long local hashing, hash in-transit (fixes) (Ivan Andreev)
|
||||||
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
* Set Features ReadMimeType to false as Object.MimeType not supported (Nick Craig-Wood)
|
||||||
@ -1116,7 +1116,7 @@ description: "Rclone Changelog"
|
|||||||
* Implement `--sftp-use-fstat` for unusual SFTP servers (Nick Craig-Wood)
|
* Implement `--sftp-use-fstat` for unusual SFTP servers (Nick Craig-Wood)
|
||||||
* Sugarsync
|
* Sugarsync
|
||||||
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
* Fix NewObject for files that differ in case (Nick Craig-Wood)
|
||||||
* Fix finding directories in a case insentive way (Nick Craig-Wood)
|
* Fix finding directories in a case insensitive way (Nick Craig-Wood)
|
||||||
* Swift
|
* Swift
|
||||||
* Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân)
|
* Fix deletion of parts of Static Large Object (SLO) (Nguyễn Hữu Luân)
|
||||||
* Ensure partially uploaded large files are uploaded unless `--swift-leave-parts-on-error` (Nguyễn Hữu Luân)
|
* Ensure partially uploaded large files are uploaded unless `--swift-leave-parts-on-error` (Nguyễn Hữu Luân)
|
||||||
@ -1190,7 +1190,7 @@ description: "Rclone Changelog"
|
|||||||
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
[See commits](https://github.com/rclone/rclone/compare/v1.53.1...v1.53.2)
|
||||||
|
|
||||||
* Bug Fixes
|
* Bug Fixes
|
||||||
* acounting
|
* accounting
|
||||||
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
* Fix incorrect speed and transferTime in core/stats (Nick Craig-Wood)
|
||||||
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
* Stabilize display order of transfers on Windows (Nick Craig-Wood)
|
||||||
* operations
|
* operations
|
||||||
@ -2160,7 +2160,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
* rcat: Fix slowdown on systems with multiple hashes (Nick Craig-Wood)
|
||||||
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
* rcd: Fix permissions problems on cache directory with web gui download (Nick Craig-Wood)
|
||||||
* Mount
|
* Mount
|
||||||
* Default `--daemon-timout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
* Default `--daemon-timeout` to 15 minutes on macOS and FreeBSD (Nick Craig-Wood)
|
||||||
* Update docs to show mounting from root OK for bucket-based (Nick Craig-Wood)
|
* Update docs to show mounting from root OK for bucket-based (Nick Craig-Wood)
|
||||||
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
* Remove nonseekable flag from write files (Nick Craig-Wood)
|
||||||
* VFS
|
* VFS
|
||||||
@ -2468,7 +2468,7 @@ all the docs and Edward Barker for helping re-write the front page.
|
|||||||
* Update google cloud storage endpoints (weetmuts)
|
* Update google cloud storage endpoints (weetmuts)
|
||||||
* HTTP
|
* HTTP
|
||||||
* Add an example with username and password which is supported but wasn't documented (Nick Craig-Wood)
|
* Add an example with username and password which is supported but wasn't documented (Nick Craig-Wood)
|
||||||
* Fix backend with `--files-from` and non-existent files (Nick Craig-Wood)
|
* Fix backend with `--files-from` and nonexistent files (Nick Craig-Wood)
|
||||||
* Hubic
|
* Hubic
|
||||||
* Make error message more informative if authentication fails (Nick Craig-Wood)
|
* Make error message more informative if authentication fails (Nick Craig-Wood)
|
||||||
* Jottacloud
|
* Jottacloud
|
||||||
@ -2952,7 +2952,7 @@ Point release to fix hubic and azureblob backends.
|
|||||||
* FTP
|
* FTP
|
||||||
* Work around strange response from box FTP server
|
* Work around strange response from box FTP server
|
||||||
* More workarounds for FTP servers to fix mkParentDir error
|
* More workarounds for FTP servers to fix mkParentDir error
|
||||||
* Fix no error on listing non-existent directory
|
* Fix no error on listing nonexistent directory
|
||||||
* Google Cloud Storage
|
* Google Cloud Storage
|
||||||
* Add service_account_credentials (Matt Holt)
|
* Add service_account_credentials (Matt Holt)
|
||||||
* Detect bucket presence by listing it - minimises permissions needed
|
* Detect bucket presence by listing it - minimises permissions needed
|
||||||
@ -3025,7 +3025,7 @@ Point release to fix hubic and azureblob backends.
|
|||||||
* Add .deb and .rpm packages as part of the build
|
* Add .deb and .rpm packages as part of the build
|
||||||
* Make a beta release for all branches on the main repo (but not pull requests)
|
* Make a beta release for all branches on the main repo (but not pull requests)
|
||||||
* Bug Fixes
|
* Bug Fixes
|
||||||
* config: fixes errors on non existing config by loading config file only on first access
|
* config: fixes errors on nonexistent config by loading config file only on first access
|
||||||
* config: retry saving the config after failure (Mateusz)
|
* config: retry saving the config after failure (Mateusz)
|
||||||
* sync: when using `--backup-dir` don't delete files if we can't set their modtime
|
* sync: when using `--backup-dir` don't delete files if we can't set their modtime
|
||||||
* this fixes odd behaviour with Dropbox and `--backup-dir`
|
* this fixes odd behaviour with Dropbox and `--backup-dir`
|
||||||
@ -3560,7 +3560,7 @@ Point release to fix hubic and azureblob backends.
|
|||||||
* Update B2 docs with Data usage, and Crypt section - thanks Tomasz Mazur
|
* Update B2 docs with Data usage, and Crypt section - thanks Tomasz Mazur
|
||||||
* S3
|
* S3
|
||||||
* Command line and config file support for
|
* Command line and config file support for
|
||||||
* Setting/overriding ACL - thanks Radek Senfeld
|
* Setting/overriding ACL - thanks Radek Šenfeld
|
||||||
* Setting storage class - thanks Asko Tamm
|
* Setting storage class - thanks Asko Tamm
|
||||||
* Drive
|
* Drive
|
||||||
* Make exponential backoff work exactly as per Google specification
|
* Make exponential backoff work exactly as per Google specification
|
||||||
|
@ -129,7 +129,7 @@ Generally -1 (default, equivalent to 5) is recommended.
|
|||||||
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
Levels 1 to 9 increase compression at the cost of speed. Going past 6
|
||||||
generally offers very little return.
|
generally offers very little return.
|
||||||
|
|
||||||
Level -2 uses Huffmann encoding only. Only use if you know what you
|
Level -2 uses Huffman encoding only. Only use if you know what you
|
||||||
are doing.
|
are doing.
|
||||||
Level 0 turns off compression.
|
Level 0 turns off compression.
|
||||||
|
|
||||||
|
@ -241,7 +241,7 @@ the password configured for an existing crypt remote means you will no longer
|
|||||||
able to decrypt any of the previously encrypted content. The only possibility
|
able to decrypt any of the previously encrypted content. The only possibility
|
||||||
is to re-upload everything via a crypt remote configured with your new password.
|
is to re-upload everything via a crypt remote configured with your new password.
|
||||||
|
|
||||||
Depending on the size of your data, your bandwith, storage quota etc, there are
|
Depending on the size of your data, your bandwidth, storage quota etc, there are
|
||||||
different approaches you can take:
|
different approaches you can take:
|
||||||
- If you have everything in a different location, for example on your local system,
|
- If you have everything in a different location, for example on your local system,
|
||||||
you could remove all of the prior encrypted files, change the password for your
|
you could remove all of the prior encrypted files, change the password for your
|
||||||
@ -254,7 +254,7 @@ effectively decrypting everything on the fly using the old password and
|
|||||||
re-encrypting using the new password. When done, delete the original crypt
|
re-encrypting using the new password. When done, delete the original crypt
|
||||||
remote directory and finally the rclone crypt configuration with the old password.
|
remote directory and finally the rclone crypt configuration with the old password.
|
||||||
All data will be streamed from the storage system and back, so you will
|
All data will be streamed from the storage system and back, so you will
|
||||||
get half the bandwith and be charged twice if you have upload and download quota
|
get half the bandwidth and be charged twice if you have upload and download quota
|
||||||
on the storage system.
|
on the storage system.
|
||||||
|
|
||||||
**Note**: A security problem related to the random password generator
|
**Note**: A security problem related to the random password generator
|
||||||
@ -567,7 +567,7 @@ How to encode the encrypted filename to text string.
|
|||||||
|
|
||||||
This option could help with shortening the encrypted filename. The
|
This option could help with shortening the encrypted filename. The
|
||||||
suitable option would depend on the way your remote count the filename
|
suitable option would depend on the way your remote count the filename
|
||||||
length and if it's case sensitve.
|
length and if it's case sensitive.
|
||||||
|
|
||||||
Properties:
|
Properties:
|
||||||
|
|
||||||
|
@ -498,7 +498,7 @@ backends can also store arbitrary user metadata.
|
|||||||
|
|
||||||
Where possible the key names are standardized, so, for example, it is
|
Where possible the key names are standardized, so, for example, it is
|
||||||
possible to copy object metadata from s3 to azureblob for example and
|
possible to copy object metadata from s3 to azureblob for example and
|
||||||
metadata will be translated apropriately.
|
metadata will be translated appropriately.
|
||||||
|
|
||||||
Some backends have limits on the size of the metadata and rclone will
|
Some backends have limits on the size of the metadata and rclone will
|
||||||
give errors on upload if they are exceeded.
|
give errors on upload if they are exceeded.
|
||||||
@ -641,7 +641,7 @@ would mean limit the upload and download bandwidth to 10 MiB/s.
|
|||||||
single limit, specify the desired bandwidth in KiB/s, or use a
|
single limit, specify the desired bandwidth in KiB/s, or use a
|
||||||
suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth.
|
suffix B|K|M|G|T|P. The default is `0` which means to not limit bandwidth.
|
||||||
|
|
||||||
The upload and download bandwidth can be specified seperately, as
|
The upload and download bandwidth can be specified separately, as
|
||||||
`--bwlimit UP:DOWN`, so
|
`--bwlimit UP:DOWN`, so
|
||||||
|
|
||||||
--bwlimit 10M:100k
|
--bwlimit 10M:100k
|
||||||
@ -2011,7 +2011,7 @@ In all other cases the file will not be updated.
|
|||||||
Consider using the `--modify-window` flag to compensate for time skews
|
Consider using the `--modify-window` flag to compensate for time skews
|
||||||
between the source and the backend, for backends that do not support
|
between the source and the backend, for backends that do not support
|
||||||
mod times, and instead use uploaded times. However, if the backend
|
mod times, and instead use uploaded times. However, if the backend
|
||||||
does not support checksums, note that sync'ing or copying within the
|
does not support checksums, note that syncing or copying within the
|
||||||
time skew window may still result in additional transfers for safety.
|
time skew window may still result in additional transfers for safety.
|
||||||
|
|
||||||
### --use-mmap ###
|
### --use-mmap ###
|
||||||
|
@ -1335,7 +1335,7 @@ drives found and a combined drive.
|
|||||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||||
|
|
||||||
Adding this to the rclone config file will cause those team drives to
|
Adding this to the rclone config file will cause those team drives to
|
||||||
be accessible with the aliases shown. Any illegal charactes will be
|
be accessible with the aliases shown. Any illegal characters will be
|
||||||
substituted with "_" and duplicate names will have numbers suffixed.
|
substituted with "_" and duplicate names will have numbers suffixed.
|
||||||
It will also add a remote called AllDrives which shows all the shared
|
It will also add a remote called AllDrives which shows all the shared
|
||||||
drives combined into one directory tree.
|
drives combined into one directory tree.
|
||||||
|
@ -409,7 +409,7 @@ Properties:
|
|||||||
|
|
||||||
#### --dropbox-batch-commit-timeout
|
#### --dropbox-batch-commit-timeout
|
||||||
|
|
||||||
Max time to wait for a batch to finish comitting
|
Max time to wait for a batch to finish committing
|
||||||
|
|
||||||
Properties:
|
Properties:
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ through a global file system.
|
|||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
The initial setup for the Enterprise File Fabric backend involves
|
The initial setup for the Enterprise File Fabric backend involves
|
||||||
getting a token from the the Enterprise File Fabric which you need to
|
getting a token from the Enterprise File Fabric which you need to
|
||||||
do in your browser. `rclone config` walks you through it.
|
do in your browser. `rclone config` walks you through it.
|
||||||
|
|
||||||
Here is an example of how to make a remote called `remote`. First run:
|
Here is an example of how to make a remote called `remote`. First run:
|
||||||
|
@ -313,7 +313,7 @@ and may be set in the config file.
|
|||||||
--drive-use-trash Send files to the trash instead of deleting permanently (default true)
|
--drive-use-trash Send files to the trash instead of deleting permanently (default true)
|
||||||
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
|
--drive-v2-download-min-size SizeSuffix If Object's are greater, use drive v2 API to download (default off)
|
||||||
--dropbox-auth-url string Auth server URL
|
--dropbox-auth-url string Auth server URL
|
||||||
--dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish comitting (default 10m0s)
|
--dropbox-batch-commit-timeout Duration Max time to wait for a batch to finish committing (default 10m0s)
|
||||||
--dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
|
--dropbox-batch-mode string Upload file batching sync|async|off (default "sync")
|
||||||
--dropbox-batch-size int Max number of files in upload batch
|
--dropbox-batch-size int Max number of files in upload batch
|
||||||
--dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
|
--dropbox-batch-timeout Duration Max time to allow an idle upload batch before uploading (default 0s)
|
||||||
|
@ -138,7 +138,7 @@ can be set with [`--ftp-port`](#ftp-port).
|
|||||||
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
In addition to the [default restricted characters set](/overview/#restricted-characters)
|
||||||
the following characters are also replaced:
|
the following characters are also replaced:
|
||||||
|
|
||||||
File names cannot end with the following characters. Repacement is
|
File names cannot end with the following characters. Replacement is
|
||||||
limited to the last character in a file name:
|
limited to the last character in a file name:
|
||||||
|
|
||||||
| Character | Value | Replacement |
|
| Character | Value | Replacement |
|
||||||
|
@ -607,7 +607,7 @@ Properties:
|
|||||||
If set this will decompress gzip encoded objects.
|
If set this will decompress gzip encoded objects.
|
||||||
|
|
||||||
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
It is possible to upload objects to GCS with "Content-Encoding: gzip"
|
||||||
set. Normally rclone will download these files files as compressed objects.
|
set. Normally rclone will download these files as compressed objects.
|
||||||
|
|
||||||
If this flag is set then rclone will decompress these files with
|
If this flag is set then rclone will decompress these files with
|
||||||
"Content-Encoding: gzip" as they are received. This means that rclone
|
"Content-Encoding: gzip" as they are received. This means that rclone
|
||||||
|
@ -124,7 +124,7 @@ the process is very similar to the process of initial setup exemplified before.
|
|||||||
HiDrive allows modification times to be set on objects accurate to 1 second.
|
HiDrive allows modification times to be set on objects accurate to 1 second.
|
||||||
|
|
||||||
HiDrive supports [its own hash type](https://static.hidrive.com/dev/0001)
|
HiDrive supports [its own hash type](https://static.hidrive.com/dev/0001)
|
||||||
which is used to verify the integrety of file contents after successful transfers.
|
which is used to verify the integrity of file contents after successful transfers.
|
||||||
|
|
||||||
### Restricted filename characters
|
### Restricted filename characters
|
||||||
|
|
||||||
|
@ -360,7 +360,7 @@ the system. Both scheduled task and Windows service can be used to achieve this.
|
|||||||
NOTE: Remember that when rclone runs as the `SYSTEM` user, the user profile
|
NOTE: Remember that when rclone runs as the `SYSTEM` user, the user profile
|
||||||
that it sees will not be yours. This means that if you normally run rclone with
|
that it sees will not be yours. This means that if you normally run rclone with
|
||||||
configuration file in the default location, to be able to use the same configuration
|
configuration file in the default location, to be able to use the same configuration
|
||||||
when running as the system user you must explicitely tell rclone where to find
|
when running as the system user you must explicitly tell rclone where to find
|
||||||
it with the [`--config`](https://rclone.org/docs/#config-config-file) option,
|
it with the [`--config`](https://rclone.org/docs/#config-config-file) option,
|
||||||
or else it will look in the system users profile path (`C:\Windows\System32\config\systemprofile`).
|
or else it will look in the system users profile path (`C:\Windows\System32\config\systemprofile`).
|
||||||
To test your command manually from a Command Prompt, you can run it with
|
To test your command manually from a Command Prompt, you can run it with
|
||||||
@ -424,7 +424,7 @@ it should be possible through path rewriting as described [here](https://github.
|
|||||||
|
|
||||||
To Windows service running any rclone command, the excellent third-party utility
|
To Windows service running any rclone command, the excellent third-party utility
|
||||||
[NSSM](http://nssm.cc), the "Non-Sucking Service Manager", can be used.
|
[NSSM](http://nssm.cc), the "Non-Sucking Service Manager", can be used.
|
||||||
It includes some advanced features such as adjusting process periority, defining
|
It includes some advanced features such as adjusting process priority, defining
|
||||||
process environment variables, redirect to file anything written to stdout, and
|
process environment variables, redirect to file anything written to stdout, and
|
||||||
customized response to different exit codes, with a GUI to configure everything from
|
customized response to different exit codes, with a GUI to configure everything from
|
||||||
(although it can also be used from command line ).
|
(although it can also be used from command line ).
|
||||||
|
@ -18,7 +18,7 @@ it also provides white-label solutions to different companies, such as:
|
|||||||
* Elgiganten Sweden (cloud.elgiganten.se)
|
* Elgiganten Sweden (cloud.elgiganten.se)
|
||||||
* Elgiganten Denmark (cloud.elgiganten.dk)
|
* Elgiganten Denmark (cloud.elgiganten.dk)
|
||||||
* Giganti Cloud (cloud.gigantti.fi)
|
* Giganti Cloud (cloud.gigantti.fi)
|
||||||
* ELKO Clouud (cloud.elko.is)
|
* ELKO Cloud (cloud.elko.is)
|
||||||
|
|
||||||
Most of the white-label versions are supported by this backend, although may require different
|
Most of the white-label versions are supported by this backend, although may require different
|
||||||
authentication setup - described below.
|
authentication setup - described below.
|
||||||
|
@ -110,7 +110,7 @@ Use `rclone dedupe` to fix duplicated files.
|
|||||||
#### Object not found
|
#### Object not found
|
||||||
|
|
||||||
If you are connecting to your Mega remote for the first time,
|
If you are connecting to your Mega remote for the first time,
|
||||||
to test access and syncronisation, you may receive an error such as
|
to test access and synchronization, you may receive an error such as
|
||||||
|
|
||||||
```
|
```
|
||||||
Failed to create file system for "my-mega-remote:":
|
Failed to create file system for "my-mega-remote:":
|
||||||
|
@ -152,7 +152,7 @@ Individual symlink files on the remote can be used with the commands like "cat"
|
|||||||
With NetStorage, directories can exist in one of two forms:
|
With NetStorage, directories can exist in one of two forms:
|
||||||
|
|
||||||
1. **Explicit Directory**. This is an actual, physical directory that you have created in a storage group.
|
1. **Explicit Directory**. This is an actual, physical directory that you have created in a storage group.
|
||||||
2. **Implicit Directory**. This refers to a directory within a path that has not been physically created. For example, during upload of a file, non-existent subdirectories can be specified in the target path. NetStorage creates these as "implicit." While the directories aren't physically created, they exist implicitly and the noted path is connected with the uploaded file.
|
2. **Implicit Directory**. This refers to a directory within a path that has not been physically created. For example, during upload of a file, nonexistent subdirectories can be specified in the target path. NetStorage creates these as "implicit." While the directories aren't physically created, they exist implicitly and the noted path is connected with the uploaded file.
|
||||||
|
|
||||||
Rclone will intercept all file uploads and mkdir commands for the NetStorage remote and will explicitly issue the mkdir command for each directory in the uploading path. This will help with the interoperability with the other Akamai services such as SFTP and the Content Management Shell (CMShell). Rclone will not guarantee correctness of operations with implicit directories which might have been created as a result of using an upload API directly.
|
Rclone will intercept all file uploads and mkdir commands for the NetStorage remote and will explicitly issue the mkdir command for each directory in the uploading path. This will help with the interoperability with the other Akamai services such as SFTP and the Content Management Shell (CMShell). Rclone will not guarantee correctness of operations with implicit directories which might have been created as a result of using an upload API directly.
|
||||||
|
|
||||||
|
@ -568,7 +568,7 @@ An official document about the limitations for different types of OneDrive can b
|
|||||||
## Versions
|
## Versions
|
||||||
|
|
||||||
Every change in a file OneDrive causes the service to create a new
|
Every change in a file OneDrive causes the service to create a new
|
||||||
version of the the file. This counts against a users quota. For
|
version of the file. This counts against a users quota. For
|
||||||
example changing the modification time of a file creates a second
|
example changing the modification time of a file creates a second
|
||||||
version, so the file apparently uses twice the space.
|
version, so the file apparently uses twice the space.
|
||||||
|
|
||||||
|
@ -111,7 +111,7 @@ systems they must support a common hash type.
|
|||||||
|
|
||||||
### ModTime ###
|
### ModTime ###
|
||||||
|
|
||||||
Allmost all cloud storage systems store some sort of timestamp
|
Almost all cloud storage systems store some sort of timestamp
|
||||||
on objects, but several of them not something that is appropriate
|
on objects, but several of them not something that is appropriate
|
||||||
to use for syncing. E.g. some backends will only write a timestamp
|
to use for syncing. E.g. some backends will only write a timestamp
|
||||||
that represent the time of the upload. To be relevant for syncing
|
that represent the time of the upload. To be relevant for syncing
|
||||||
|
@ -397,7 +397,7 @@ The parameters can be a string as per the rest of rclone, eg
|
|||||||
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
|
`s3:bucket/path` or `:sftp:/my/dir`. They can also be specified as
|
||||||
JSON blobs.
|
JSON blobs.
|
||||||
|
|
||||||
If specifyng a JSON blob it should be a object mapping strings to
|
If specifying a JSON blob it should be a object mapping strings to
|
||||||
strings. These values will be used to configure the remote. There are
|
strings. These values will be used to configure the remote. There are
|
||||||
3 special values which may be set:
|
3 special values which may be set:
|
||||||
|
|
||||||
@ -1568,7 +1568,7 @@ check that parameter passing is working properly.
|
|||||||
|
|
||||||
**Authentication is required for this call.**
|
**Authentication is required for this call.**
|
||||||
|
|
||||||
### sync/bisync: Perform bidirectonal synchronization between two paths. {#sync-bisync}
|
### sync/bisync: Perform bidirectional synchronization between two paths. {#sync-bisync}
|
||||||
|
|
||||||
This takes the following parameters
|
This takes the following parameters
|
||||||
|
|
||||||
|
@ -332,7 +332,7 @@ upload.
|
|||||||
|
|
||||||
Rclone's default directory traversal is to process each directory
|
Rclone's default directory traversal is to process each directory
|
||||||
individually. This takes one API call per directory. Using the
|
individually. This takes one API call per directory. Using the
|
||||||
`--fast-list` flag will read all info about the the objects into
|
`--fast-list` flag will read all info about the objects into
|
||||||
memory first using a smaller number of API calls (one per 1000
|
memory first using a smaller number of API calls (one per 1000
|
||||||
objects). See the [rclone docs](/docs/#fast-list) for more details.
|
objects). See the [rclone docs](/docs/#fast-list) for more details.
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ SSH installations.
|
|||||||
Paths are specified as `remote:path`. If the path does not begin with
|
Paths are specified as `remote:path`. If the path does not begin with
|
||||||
a `/` it is relative to the home directory of the user. An empty path
|
a `/` it is relative to the home directory of the user. An empty path
|
||||||
`remote:` refers to the user's home directory. For example, `rclone lsd remote:`
|
`remote:` refers to the user's home directory. For example, `rclone lsd remote:`
|
||||||
would list the home directory of the user cofigured in the rclone remote config
|
would list the home directory of the user configured in the rclone remote config
|
||||||
(`i.e /home/sftpuser`). However, `rclone lsd remote:/` would list the root
|
(`i.e /home/sftpuser`). However, `rclone lsd remote:/` would list the root
|
||||||
directory for remote machine (i.e. `/`)
|
directory for remote machine (i.e. `/`)
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ can also run a SSH server, which is a port of OpenSSH (see official
|
|||||||
[installation guide](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)). On a Windows server the shell handling is different: Although it can also
|
[installation guide](https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse)). On a Windows server the shell handling is different: Although it can also
|
||||||
be set up to use a Unix type shell, e.g. Cygwin bash, the default is to
|
be set up to use a Unix type shell, e.g. Cygwin bash, the default is to
|
||||||
use Windows Command Prompt (cmd.exe), and PowerShell is a recommended
|
use Windows Command Prompt (cmd.exe), and PowerShell is a recommended
|
||||||
alternative. All of these have bahave differently, which rclone must handle.
|
alternative. All of these have behave differently, which rclone must handle.
|
||||||
|
|
||||||
Rclone tries to auto-detect what type of shell is used on the server,
|
Rclone tries to auto-detect what type of shell is used on the server,
|
||||||
first time you access the SFTP remote. If a remote shell session is
|
first time you access the SFTP remote. If a remote shell session is
|
||||||
@ -296,7 +296,7 @@ a new sftp remote is accessed. If you configure a sftp remote
|
|||||||
without a config file, e.g. an [on the fly](/docs/#backend-path-to-dir])
|
without a config file, e.g. an [on the fly](/docs/#backend-path-to-dir])
|
||||||
remote, rclone will have nowhere to store the result, and it
|
remote, rclone will have nowhere to store the result, and it
|
||||||
will re-run the command on every access. To avoid this you should
|
will re-run the command on every access. To avoid this you should
|
||||||
explicitely set the `shell_type` option to the correct value,
|
explicitly set the `shell_type` option to the correct value,
|
||||||
or to `none` if you want to prevent rclone from executing any
|
or to `none` if you want to prevent rclone from executing any
|
||||||
remote shell commands.
|
remote shell commands.
|
||||||
|
|
||||||
@ -304,7 +304,7 @@ It is also important to note that, since the shell type decides
|
|||||||
how quoting and escaping of file paths used as command-line arguments
|
how quoting and escaping of file paths used as command-line arguments
|
||||||
are performed, configuring the wrong shell type may leave you exposed
|
are performed, configuring the wrong shell type may leave you exposed
|
||||||
to command injection exploits. Make sure to confirm the auto-detected
|
to command injection exploits. Make sure to confirm the auto-detected
|
||||||
shell type, or explicitely set the shell type you know is correct,
|
shell type, or explicitly set the shell type you know is correct,
|
||||||
or disable shell access until you know.
|
or disable shell access until you know.
|
||||||
|
|
||||||
### Checksum
|
### Checksum
|
||||||
|
@ -278,7 +278,7 @@ type transferStats struct {
|
|||||||
speed float64
|
speed float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculateTransferStats calculates some addtional transfer stats not
|
// calculateTransferStats calculates some additional transfer stats not
|
||||||
// stored directly in StatsInfo
|
// stored directly in StatsInfo
|
||||||
func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
|
func (s *StatsInfo) calculateTransferStats() (ts transferStats) {
|
||||||
// checking and transferring have their own locking so read
|
// checking and transferring have their own locking so read
|
||||||
|
2
fs/cache/cache_test.go
vendored
2
fs/cache/cache_test.go
vendored
@ -154,7 +154,7 @@ func TestPin(t *testing.T) {
|
|||||||
cleanup, create := mockNewFs(t)
|
cleanup, create := mockNewFs(t)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
// Test pinning and unpinning non-existent
|
// Test pinning and unpinning nonexistent
|
||||||
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
f := mockfs.NewFs(context.Background(), "mock", "/alien")
|
||||||
Pin(f)
|
Pin(f)
|
||||||
Unpin(f)
|
Unpin(f)
|
||||||
|
@ -40,7 +40,7 @@ func (s *Storage) check() {
|
|||||||
if err == nil {
|
if err == nil {
|
||||||
// check to see if config file has changed and if it has, reload it
|
// check to see if config file has changed and if it has, reload it
|
||||||
if s.fi == nil || !fi.ModTime().Equal(s.fi.ModTime()) || fi.Size() != s.fi.Size() {
|
if s.fi == nil || !fi.ModTime().Equal(s.fi.ModTime()) || fi.Size() != s.fi.Size() {
|
||||||
fs.Debugf(nil, "Config file has changed externaly - reloading")
|
fs.Debugf(nil, "Config file has changed externally - reloading")
|
||||||
err := s._load()
|
err := s._load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(nil, "Failed to read config file - using previous config: %v", err)
|
fs.Errorf(nil, "Failed to read config file - using previous config: %v", err)
|
||||||
|
@ -137,7 +137,7 @@ func (c Simple) Set(key, value string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// String the map value the same way the config parser does, but with
|
// String the map value the same way the config parser does, but with
|
||||||
// sorted keys for reproducability.
|
// sorted keys for reproducibility.
|
||||||
func (c Simple) String() string {
|
func (c Simple) String() string {
|
||||||
var ks = make([]string, 0, len(c))
|
var ks = make([]string, 0, len(c))
|
||||||
for k := range c {
|
for k := range c {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
// Package dirtree contains the DirTree type which is used for
|
// Package dirtree contains the DirTree type which is used for
|
||||||
// building filesystem heirachies in memory.
|
// building filesystem hierarchies in memory.
|
||||||
package dirtree
|
package dirtree
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -33,7 +33,7 @@ func NewFs(ctx context.Context, path string) (Fs, error) {
|
|||||||
overridden := fsInfo.Options.Overridden(config)
|
overridden := fsInfo.Options.Overridden(config)
|
||||||
if len(overridden) > 0 {
|
if len(overridden) > 0 {
|
||||||
extraConfig := overridden.String()
|
extraConfig := overridden.String()
|
||||||
//Debugf(nil, "detected overriden config %q", extraConfig)
|
//Debugf(nil, "detected overridden config %q", extraConfig)
|
||||||
md5sumBinary := md5.Sum([]byte(extraConfig))
|
md5sumBinary := md5.Sum([]byte(extraConfig))
|
||||||
suffix := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])
|
suffix := base64.RawURLEncoding.EncodeToString(md5sumBinary[:])
|
||||||
// 5 characters length is 5*6 = 30 bits of base64
|
// 5 characters length is 5*6 = 30 bits of base64
|
||||||
|
@ -186,11 +186,11 @@ func TestCheck(t *testing.T) {
|
|||||||
|
|
||||||
func TestCheckFsError(t *testing.T) {
|
func TestCheckFsError(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
dstFs, err := fs.NewFs(ctx, "non-existent")
|
dstFs, err := fs.NewFs(ctx, "nonexistent")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
srcFs, err := fs.NewFs(ctx, "non-existent")
|
srcFs, err := fs.NewFs(ctx, "nonexistent")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Compare a and b in a file system idependent way
|
// Compare a and b in a file system independent way
|
||||||
func compareListJSONItem(t *testing.T, a, b *operations.ListJSONItem, precision time.Duration) {
|
func compareListJSONItem(t *testing.T, a, b *operations.ListJSONItem, precision time.Duration) {
|
||||||
assert.Equal(t, a.Path, b.Path, "Path")
|
assert.Equal(t, a.Path, b.Path, "Path")
|
||||||
assert.Equal(t, a.Name, b.Name, "Name")
|
assert.Equal(t, a.Name, b.Name, "Name")
|
||||||
|
@ -1787,7 +1787,7 @@ func copyURLFn(ctx context.Context, dstFileName string, url string, autoFilename
|
|||||||
_, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition"))
|
_, params, err := mime.ParseMediaType(resp.Header.Get("Content-Disposition"))
|
||||||
headerFilename := path.Base(strings.Replace(params["filename"], "\\", "/", -1))
|
headerFilename := path.Base(strings.Replace(params["filename"], "\\", "/", -1))
|
||||||
if err != nil || headerFilename == "" {
|
if err != nil || headerFilename == "" {
|
||||||
return fmt.Errorf("CopyURL failed: filename not found in the Content-Dispoition header")
|
return fmt.Errorf("CopyURL failed: filename not found in the Content-Disposition header")
|
||||||
}
|
}
|
||||||
fs.Debugf(headerFilename, "filename found in Content-Disposition header.")
|
fs.Debugf(headerFilename, "filename found in Content-Disposition header.")
|
||||||
return fn(ctx, headerFilename, resp.Body, resp.ContentLength, modTime)
|
return fn(ctx, headerFilename, resp.Body, resp.ContentLength, modTime)
|
||||||
|
@ -227,14 +227,14 @@ func (p *Plugins) GetPluginByName(name string) (out *PackageJSON, err error) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAuthorRepoBranchGithub gives author, repoName and branch from a github.com url
|
// getAuthorRepoBranchGitHub gives author, repoName and branch from a github.com url
|
||||||
//
|
//
|
||||||
// url examples:
|
// url examples:
|
||||||
// https://github.com/rclone/rclone-webui-react/
|
// https://github.com/rclone/rclone-webui-react/
|
||||||
// http://github.com/rclone/rclone-webui-react
|
// http://github.com/rclone/rclone-webui-react
|
||||||
// https://github.com/rclone/rclone-webui-react/tree/caman-js
|
// https://github.com/rclone/rclone-webui-react/tree/caman-js
|
||||||
// github.com/rclone/rclone-webui-react
|
// github.com/rclone/rclone-webui-react
|
||||||
func getAuthorRepoBranchGithub(url string) (author string, repoName string, branch string, err error) {
|
func getAuthorRepoBranchGitHub(url string) (author string, repoName string, branch string, err error) {
|
||||||
repoURL := url
|
repoURL := url
|
||||||
repoURL = strings.Replace(repoURL, "https://", "", 1)
|
repoURL = strings.Replace(repoURL, "https://", "", 1)
|
||||||
repoURL = strings.Replace(repoURL, "http://", "", 1)
|
repoURL = strings.Replace(repoURL, "http://", "", 1)
|
||||||
|
@ -102,7 +102,7 @@ func rcAddPlugin(_ context.Context, in rc.Params) (out rc.Params, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
author, repoName, repoBranch, err := getAuthorRepoBranchGithub(pluginURL)
|
author, repoName, repoBranch, err := getAuthorRepoBranchGitHub(pluginURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ type RegInfo struct {
|
|||||||
// Prefix for command line flags for this fs - defaults to Name if not set
|
// Prefix for command line flags for this fs - defaults to Name if not set
|
||||||
Prefix string
|
Prefix string
|
||||||
// Create a new file system. If root refers to an existing
|
// Create a new file system. If root refers to an existing
|
||||||
// object, then it should return an Fs which which points to
|
// object, then it should return an Fs which points to
|
||||||
// the parent of that object and ErrorIsFile.
|
// the parent of that object and ErrorIsFile.
|
||||||
NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
|
NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"`
|
||||||
// Function to call to help with config - see docs for ConfigIn for more info
|
// Function to call to help with config - see docs for ConfigIn for more info
|
||||||
@ -179,7 +179,7 @@ func (o *Option) MarshalJSON() ([]byte, error) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetValue gets the current current value which is the default if not set
|
// GetValue gets the current value which is the default if not set
|
||||||
func (o *Option) GetValue() interface{} {
|
func (o *Option) GetValue() interface{} {
|
||||||
val := o.Value
|
val := o.Value
|
||||||
if val == nil {
|
if val == nil {
|
||||||
|
@ -531,14 +531,14 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp")
|
assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp")
|
||||||
})
|
})
|
||||||
|
|
||||||
// TestFsRmdirNotFound tests deleting a non-existent directory
|
// TestFsRmdirNotFound tests deleting a nonexistent directory
|
||||||
t.Run("FsRmdirNotFound", func(t *testing.T) {
|
t.Run("FsRmdirNotFound", func(t *testing.T) {
|
||||||
skipIfNotOk(t)
|
skipIfNotOk(t)
|
||||||
if isBucketBasedButNotRoot(f) {
|
if isBucketBasedButNotRoot(f) {
|
||||||
t.Skip("Skipping test as non root bucket-based remote")
|
t.Skip("Skipping test as non root bucket-based remote")
|
||||||
}
|
}
|
||||||
err := f.Rmdir(ctx, "")
|
err := f.Rmdir(ctx, "")
|
||||||
assert.Error(t, err, "Expecting error on Rmdir non-existent")
|
assert.Error(t, err, "Expecting error on Rmdir nonexistent")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Make the directory
|
// Make the directory
|
||||||
@ -729,7 +729,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
o, err := f.NewObject(ctx, "potato")
|
o, err := f.NewObject(ctx, "potato")
|
||||||
assert.Nil(t, o)
|
assert.Nil(t, o)
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
// Now try an object in a non existing directory
|
// Now try an object in a nonexistent directory
|
||||||
o, err = f.NewObject(ctx, "directory/not/found/potato")
|
o, err = f.NewObject(ctx, "directory/not/found/potato")
|
||||||
assert.Nil(t, o)
|
assert.Nil(t, o)
|
||||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||||
@ -1632,7 +1632,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
|
fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision())
|
||||||
})
|
})
|
||||||
|
|
||||||
// Check that that listing the entries is OK
|
// Check that listing the entries is OK
|
||||||
t.Run("ListEntries", func(t *testing.T) {
|
t.Run("ListEntries", func(t *testing.T) {
|
||||||
entries, err := rootRemote.List(context.Background(), configLeaf)
|
entries, err := rootRemote.List(context.Background(), configLeaf)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@ -2068,7 +2068,7 @@ func Run(t *testing.T, opt *Opt) {
|
|||||||
|
|
||||||
// TestFsRootCollapse tests if the root of an fs "collapses" to the
|
// TestFsRootCollapse tests if the root of an fs "collapses" to the
|
||||||
// absolute root. It creates a new fs of the same backend type with its
|
// absolute root. It creates a new fs of the same backend type with its
|
||||||
// root set to a *non-existent* folder, and attempts to read the info of
|
// root set to a *nonexistent* folder, and attempts to read the info of
|
||||||
// an object in that folder, whose name is taken from a directory that
|
// an object in that folder, whose name is taken from a directory that
|
||||||
// exists in the absolute root.
|
// exists in the absolute root.
|
||||||
// This test is added after
|
// This test is added after
|
||||||
|
@ -17,7 +17,7 @@ func TestMkdir(t *testing.T) {
|
|||||||
// test stuff
|
// test stuff
|
||||||
}
|
}
|
||||||
|
|
||||||
This will make r.Fremote and r.Flocal for a remote remote and a local
|
This will make r.Fremote and r.Flocal for a remote and a local
|
||||||
remote. The remote is determined by the -remote flag passed in.
|
remote. The remote is determined by the -remote flag passed in.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
6
lib/cache/cache_test.go
vendored
6
lib/cache/cache_test.go
vendored
@ -158,7 +158,7 @@ func TestCachePin(t *testing.T) {
|
|||||||
_, err := c.Get("/", create)
|
_, err := c.Get("/", create)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Pin a non-existent item to show nothing happens
|
// Pin a nonexistent item to show nothing happens
|
||||||
c.Pin("notfound")
|
c.Pin("notfound")
|
||||||
|
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
@ -312,7 +312,7 @@ func TestCacheRename(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, 2, c.Entries())
|
assert.Equal(t, 2, c.Entries())
|
||||||
|
|
||||||
// rename to non-existent
|
// rename to nonexistent
|
||||||
value, found := c.Rename("existing1", "EXISTING1")
|
value, found := c.Rename("existing1", "EXISTING1")
|
||||||
assert.Equal(t, true, found)
|
assert.Equal(t, true, found)
|
||||||
assert.Equal(t, existing1, value)
|
assert.Equal(t, existing1, value)
|
||||||
@ -326,7 +326,7 @@ func TestCacheRename(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, 1, c.Entries())
|
assert.Equal(t, 1, c.Entries())
|
||||||
|
|
||||||
// rename non-existent
|
// rename nonexistent
|
||||||
value, found = c.Rename("notfound", "NOTFOUND")
|
value, found = c.Rename("notfound", "NOTFOUND")
|
||||||
assert.Equal(t, false, found)
|
assert.Equal(t, false, found)
|
||||||
assert.Nil(t, value)
|
assert.Nil(t, value)
|
||||||
|
@ -140,7 +140,7 @@ func (dc *DirCache) SetRootIDAlias(rootID string) {
|
|||||||
dc.Put("", dc.rootID)
|
dc.Put("", dc.rootID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FlushDir flushes the map of all data starting with with the path
|
// FlushDir flushes the map of all data starting with the path
|
||||||
// dir.
|
// dir.
|
||||||
//
|
//
|
||||||
// If dir is empty string then this is equivalent to calling ResetRoot
|
// If dir is empty string then this is equivalent to calling ResetRoot
|
||||||
|
@ -70,8 +70,8 @@ func Config(id, name string, claims *jws.ClaimSet, header *jws.Header, queryPara
|
|||||||
return fmt.Errorf("jwtutil: failed making auth request: %w", err)
|
return fmt.Errorf("jwtutil: failed making auth request: %w", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
deferedErr := resp.Body.Close()
|
deferredErr := resp.Body.Close()
|
||||||
if deferedErr != nil {
|
if deferredErr != nil {
|
||||||
err = fmt.Errorf("jwtutil: failed to close resp.Body: %w", err)
|
err = fmt.Errorf("jwtutil: failed to close resp.Body: %w", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -419,7 +419,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte
|
|||||||
// opts.Body are set then CallJSON will do a multipart upload with a
|
// opts.Body are set then CallJSON will do a multipart upload with a
|
||||||
// file attached. opts.MultipartContentName is the name of the
|
// file attached. opts.MultipartContentName is the name of the
|
||||||
// parameter and opts.MultipartFileName is the name of the file. If
|
// parameter and opts.MultipartFileName is the name of the file. If
|
||||||
// MultpartContentName is set, and request != nil is supplied, then
|
// MultipartContentName is set, and request != nil is supplied, then
|
||||||
// the request will be marshalled into JSON and added to the form with
|
// the request will be marshalled into JSON and added to the form with
|
||||||
// parameter name MultipartMetadataName.
|
// parameter name MultipartMetadataName.
|
||||||
//
|
//
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// Package librclone exports shims for library use
|
// Package librclone exports shims for library use
|
||||||
//
|
//
|
||||||
// This is the internal implementation which is used for C and
|
// This is the internal implementation which is used for C and
|
||||||
// Gomobile libaries which need slightly different export styles.
|
// Gomobile libraries which need slightly different export styles.
|
||||||
//
|
//
|
||||||
// The shims are a thin wrapper over the rclone RPC.
|
// The shims are a thin wrapper over the rclone RPC.
|
||||||
package librclone
|
package librclone
|
||||||
|
@ -186,7 +186,7 @@ func (d *Dir) ForgetAll() (hasVirtual bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Purge any unecessary virtual entries
|
// Purge any unnecessary virtual entries
|
||||||
d._purgeVirtual()
|
d._purgeVirtual()
|
||||||
|
|
||||||
d.read = time.Time{}
|
d.read = time.Time{}
|
||||||
|
@ -450,7 +450,7 @@ func TestCachePurgeClean(t *testing.T) {
|
|||||||
_, err = os.Stat(potato1.c.toOSPath(potato1.name))
|
_, err = os.Stat(potato1.c.toOSPath(potato1.name))
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Add some potatos
|
// Add some potatoes
|
||||||
potato2 := c.Item("sub/dir/potato2")
|
potato2 := c.Item("sub/dir/potato2")
|
||||||
require.NoError(t, potato2.Open(nil))
|
require.NoError(t, potato2.Open(nil))
|
||||||
require.NoError(t, potato2.Truncate(5))
|
require.NoError(t, potato2.Truncate(5))
|
||||||
@ -603,7 +603,7 @@ func TestCacheRename(t *testing.T) {
|
|||||||
assertPathNotExist(t, osPathMeta)
|
assertPathNotExist(t, osPathMeta)
|
||||||
assert.False(t, c.Exists("sub/newPotato"))
|
assert.False(t, c.Exists("sub/newPotato"))
|
||||||
|
|
||||||
// non-existent file - is ignored
|
// nonexistent file - is ignored
|
||||||
assert.NoError(t, c.Rename("nonexist", "nonexist2", nil))
|
assert.NoError(t, c.Rename("nonexist", "nonexist2", nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1152,7 +1152,7 @@ func (item *Item) _ensure(offset, size int64) (err error) {
|
|||||||
// This is called by the downloader downloading file segments and the
|
// This is called by the downloader downloading file segments and the
|
||||||
// vfs layer writing to the file.
|
// vfs layer writing to the file.
|
||||||
//
|
//
|
||||||
// This doesn't mark the item as Dirty - that the the responsibility
|
// This doesn't mark the item as Dirty - that the responsibility
|
||||||
// of the caller as we don't know here whether we are adding reads or
|
// of the caller as we don't know here whether we are adding reads or
|
||||||
// writes to the cache file.
|
// writes to the cache file.
|
||||||
//
|
//
|
||||||
|
@ -408,7 +408,7 @@ func TestWriteBackAddUpdateNotModified(t *testing.T) {
|
|||||||
pi2 := newPutItem(t)
|
pi2 := newPutItem(t)
|
||||||
id2 := wb.Add(id, "one", false, pi2.put)
|
id2 := wb.Add(id, "one", false, pi2.put)
|
||||||
assert.Equal(t, id, id2)
|
assert.Equal(t, id, id2)
|
||||||
checkNotOnHeap(t, wb, wbItem) // object still being transfered
|
checkNotOnHeap(t, wb, wbItem) // object still being transferred
|
||||||
checkInLookup(t, wb, wbItem)
|
checkInLookup(t, wb, wbItem)
|
||||||
|
|
||||||
// Because modified was false above this should not cancel the
|
// Because modified was false above this should not cancel the
|
||||||
@ -525,7 +525,7 @@ func TestWriteBackMaxQueue(t *testing.T) {
|
|||||||
assert.Equal(t, toTransfer-maxTransfers, queued)
|
assert.Equal(t, toTransfer-maxTransfers, queued)
|
||||||
assert.Equal(t, maxTransfers, inProgress)
|
assert.Equal(t, maxTransfers, inProgress)
|
||||||
|
|
||||||
// now finish the the first maxTransfers
|
// now finish the first maxTransfers
|
||||||
for i := 0; i < maxTransfers; i++ {
|
for i := 0; i < maxTransfers; i++ {
|
||||||
pis[i].finish(nil)
|
pis[i].finish(nil)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user