docs: fix typos found by codespell in docs and code comments

This commit is contained in:
Dimitri Papadopoulos 2025-01-15 14:01:06 +01:00 committed by albertony
parent 5316acd046
commit b1d4de69c2
30 changed files with 43 additions and 43 deletions

View File

@ -899,7 +899,7 @@ func (o *Object) getMetadata(ctx context.Context) error {
// Hash returns the MD5 of an object returning a lowercase hex string // Hash returns the MD5 of an object returning a lowercase hex string
// //
// May make a network request becaue the [fs.List] method does not // May make a network request because the [fs.List] method does not
// return MD5 hashes for DirEntry // return MD5 hashes for DirEntry
func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
if ty != hash.MD5 { if ty != hash.MD5 {

View File

@ -445,7 +445,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
} }
// build request // build request
// cant use normal rename as file needs to be "activated" first // can't use normal rename as file needs to be "activated" first
r := api.NewUpdateFileInfo() r := api.NewUpdateFileInfo()
r.DocumentID = doc.DocumentID r.DocumentID = doc.DocumentID

View File

@ -75,7 +75,7 @@ type MoveFolderParam struct {
DestinationPath string `validate:"nonzero" json:"destinationPath"` DestinationPath string `validate:"nonzero" json:"destinationPath"`
} }
// JobIDResponse respresents response struct with JobID for folder operations // JobIDResponse represents response struct with JobID for folder operations
type JobIDResponse struct { type JobIDResponse struct {
JobID string `json:"jobId"` JobID string `json:"jobId"`
} }

View File

@ -424,7 +424,7 @@ func (f *Fs) newSingleConnClient(ctx context.Context) (*rest.Client, error) {
}) })
// Set our own http client in the context // Set our own http client in the context
ctx = oauthutil.Context(ctx, baseClient) ctx = oauthutil.Context(ctx, baseClient)
// create a new oauth client, re-use the token source // create a new oauth client, reuse the token source
oAuthClient := oauth2.NewClient(ctx, f.ts) oAuthClient := oauth2.NewClient(ctx, f.ts)
return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil return rest.NewClient(oAuthClient).SetRoot("https://" + f.opt.Hostname), nil
} }

View File

@ -3344,7 +3344,7 @@ func setQuirks(opt *Options) {
listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects listObjectsV2 = true // Always use ListObjectsV2 instead of ListObjects
virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL virtualHostStyle = true // Use bucket.provider.com instead of putting the bucket in the URL
urlEncodeListings = true // URL encode the listings to help with control characters urlEncodeListings = true // URL encode the listings to help with control characters
useMultipartEtag = true // Set if Etags for multpart uploads are compatible with AWS useMultipartEtag = true // Set if Etags for multipart uploads are compatible with AWS
useAcceptEncodingGzip = true // Set Accept-Encoding: gzip useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
mightGzip = true // assume all providers might use content encoding gzip until proven otherwise mightGzip = true // assume all providers might use content encoding gzip until proven otherwise
useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket useAlreadyExists = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
@ -6057,7 +6057,7 @@ func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectIn
if mOut == nil { if mOut == nil {
err = fserrors.RetryErrorf("internal error: no info from multipart upload") err = fserrors.RetryErrorf("internal error: no info from multipart upload")
} else if mOut.UploadId == nil { } else if mOut.UploadId == nil {
err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut) err = fserrors.RetryErrorf("internal error: no UploadId in multipart upload: %#v", *mOut)
} }
} }
return f.shouldRetry(ctx, err) return f.shouldRetry(ctx, err)

View File

@ -29,7 +29,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
cmd := exec.Command("git", "log", "--oneline", from+".."+to) cmd := exec.Command("git", "log", "--oneline", from+".."+to)
out, err := cmd.Output() out, err := cmd.Output()
if err != nil { if err != nil {
log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. log.Fatalf("failed to run git log %s: %v", from+".."+to, err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
} }
logMap = map[string]string{} logMap = map[string]string{}
logs = []string{} logs = []string{}
@ -39,7 +39,7 @@ func readCommits(from, to string) (logMap map[string]string, logs []string) {
} }
match := logRe.FindSubmatch(line) match := logRe.FindSubmatch(line)
if match == nil { if match == nil {
log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. log.Fatalf("failed to parse line: %q", line) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
} }
var hash, logMessage = string(match[1]), string(match[2]) var hash, logMessage = string(match[1]), string(match[2])
logMap[logMessage] = hash logMap[logMessage] = hash
@ -52,12 +52,12 @@ func main() {
flag.Parse() flag.Parse()
args := flag.Args() args := flag.Args()
if len(args) != 0 { if len(args) != 0 {
log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. log.Fatalf("Syntax: %s", os.Args[0]) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
} }
// v1.54.0 // v1.54.0
versionBytes, err := os.ReadFile("VERSION") versionBytes, err := os.ReadFile("VERSION")
if err != nil { if err != nil {
log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. log.Fatalf("Failed to read version: %v", err) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
} }
if versionBytes[0] == 'v' { if versionBytes[0] == 'v' {
versionBytes = versionBytes[1:] versionBytes = versionBytes[1:]
@ -65,7 +65,7 @@ func main() {
versionBytes = bytes.TrimSpace(versionBytes) versionBytes = bytes.TrimSpace(versionBytes)
semver := semver.New(string(versionBytes)) semver := semver.New(string(versionBytes))
stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1) stable := fmt.Sprintf("v%d.%d", semver.Major, semver.Minor-1)
log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. intead of log. log.Printf("Finding commits in %v not in stable %s", semver, stable) //nolint:gocritic // Don't include gocritic when running golangci-lint to avoid ruleguard suggesting fs. instead of log.
masterMap, masterLogs := readCommits(stable+".0", "master") masterMap, masterLogs := readCommits(stable+".0", "master")
stableMap, _ := readCommits(stable+".0", stable+"-stable") stableMap, _ := readCommits(stable+".0", stable+"-stable")
for _, logMessage := range masterLogs { for _, logMessage := range masterLogs {

View File

@ -218,7 +218,7 @@ func (b *bisyncRun) setFromCompareFlag(ctx context.Context) error {
if b.opt.CompareFlag == "" { if b.opt.CompareFlag == "" {
return nil return nil
} }
var CompareFlag CompareOpt // for exlcusions var CompareFlag CompareOpt // for exclusions
opts := strings.Split(b.opt.CompareFlag, ",") opts := strings.Split(b.opt.CompareFlag, ",")
for _, opt := range opts { for _, opt := range opts {
switch strings.ToLower(strings.TrimSpace(opt)) { switch strings.ToLower(strings.TrimSpace(opt)) {

View File

@ -394,7 +394,7 @@ func parseHash(str string) (string, string, error) {
return "", "", fmt.Errorf("invalid hash %q", str) return "", "", fmt.Errorf("invalid hash %q", str)
} }
// checkListing verifies that listing is not empty (unless resynching) // checkListing verifies that listing is not empty (unless resyncing)
func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error { func (b *bisyncRun) checkListing(ls *fileList, listing, msg string) error {
if b.opt.Resync || !ls.empty() { if b.opt.Resync || !ls.empty() {
return nil return nil

View File

@ -23,7 +23,7 @@ INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful INFO : Bisync successful
(05) : move-listings empty-path1 (05) : move-listings empty-path1
(06) : test 2. resync with empty path2, resulting in synching all content to path2. (06) : test 2. resync with empty path2, resulting in syncing all content to path2.
(07) : purge-children {path2/} (07) : purge-children {path2/}
(08) : bisync resync (08) : bisync resync
INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set. INFO : Setting --ignore-listing-checksum as neither --checksum nor --compare checksum are set.

View File

@ -1,6 +1,6 @@
test resync test resync
# 1. Resync with empty Path1, resulting in copying all content FROM Path2 # 1. Resync with empty Path1, resulting in copying all content FROM Path2
# 2. Resync with empty Path2, resulting in synching all content TO Path2 # 2. Resync with empty Path2, resulting in syncing all content TO Path2
# 3. Exercise all of the various file difference scenarios during a resync: # 3. Exercise all of the various file difference scenarios during a resync:
# File Path1 Path2 Expected action Who wins # File Path1 Path2 Expected action Who wins
# - file1.txt Exists Missing Sync Path1 >Path2 Path1 # - file1.txt Exists Missing Sync Path1 >Path2 Path1
@ -17,7 +17,7 @@ purge-children {path1/}
bisync resync bisync resync
move-listings empty-path1 move-listings empty-path1
test 2. resync with empty path2, resulting in synching all content to path2. test 2. resync with empty path2, resulting in syncing all content to path2.
purge-children {path2/} purge-children {path2/}
bisync resync bisync resync
move-listings empty-path2 move-listings empty-path2

View File

@ -549,12 +549,12 @@ password to re-encrypt the config.
When |--password-command| is called to change the password then the When |--password-command| is called to change the password then the
environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if environment variable |RCLONE_PASSWORD_CHANGE=1| will be set. So if
changing passwords programatically you can use the environment changing passwords programmatically you can use the environment
variable to distinguish which password you must supply. variable to distinguish which password you must supply.
Alternatively you can remove the password first (with |rclone config Alternatively you can remove the password first (with |rclone config
encryption remove|), then set it again with this command which may be encryption remove|), then set it again with this command which may be
easier if you don't mind the unecrypted config file being on the disk easier if you don't mind the unencrypted config file being on the disk
briefly. briefly.
`, "|", "`"), `, "|", "`"),
RunE: func(command *cobra.Command, args []string) error { RunE: func(command *cobra.Command, args []string) error {

View File

@ -54,7 +54,7 @@ destination if there is one with the same name.
Setting |--stdout| or making the output file name |-| Setting |--stdout| or making the output file name |-|
will cause the output to be written to standard output. will cause the output to be written to standard output.
### Troublshooting ### Troubleshooting
If you can't get |rclone copyurl| to work then here are some things you can try: If you can't get |rclone copyurl| to work then here are some things you can try:

View File

@ -194,7 +194,7 @@ func (f *FS) Chown(name string, uid, gid int) (err error) {
return file.Chown(uid, gid) return file.Chown(uid, gid)
} }
// Chtimes changes the acces time and modified time // Chtimes changes the access time and modified time
func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) { func (f *FS) Chtimes(name string, atime time.Time, mtime time.Time) (err error) {
defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err) defer log.Trace(name, "atime=%v, mtime=%v", atime, mtime)("err=%v", &err)
return f.vfs.Chtimes(name, atime, mtime) return f.vfs.Chtimes(name, atime, mtime)

View File

@ -145,7 +145,7 @@ that it uses an on disk cache, but the cache entries are held as
symlinks. Rclone will use the handle of the underlying file as the NFS symlinks. Rclone will use the handle of the underlying file as the NFS
handle which improves performance. This sort of cache can't be backed handle which improves performance. This sort of cache can't be backed
up and restored as the underlying handles will change. This is Linux up and restored as the underlying handles will change. This is Linux
only. It requres running rclone as root or with |CAP_DAC_READ_SEARCH|. only. It requires running rclone as root or with |CAP_DAC_READ_SEARCH|.
You can run rclone with this extra permission by doing this to the You can run rclone with this extra permission by doing this to the
rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|. rclone binary |sudo setcap cap_dac_read_search+ep /path/to/rclone|.

View File

@ -158,7 +158,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin
}, nil }, nil
} }
// GetObject fetchs the object from the filesystem. // GetObject fetches the object from the filesystem.
func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) { func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string, rangeRequest *gofakes3.ObjectRangeRequest) (obj *gofakes3.Object, err error) {
_vfs, err := b.s.getVFS(ctx) _vfs, err := b.s.getVFS(ctx)
if err != nil { if err != nil {
@ -400,7 +400,7 @@ func (b *s3Backend) deleteObject(ctx context.Context, bucketName, objectName str
} }
fp := path.Join(bucketName, objectName) fp := path.Join(bucketName, objectName)
// S3 does not report an error when attemping to delete a key that does not exist, so // S3 does not report an error when attempting to delete a key that does not exist, so
// we need to skip IsNotExist errors. // we need to skip IsNotExist errors.
if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) { if err := _vfs.Remove(fp); err != nil && !os.IsNotExist(err) {
return err return err

View File

@ -19,7 +19,7 @@ func (b *s3Backend) entryListR(_vfs *vfs.VFS, bucket, fdPath, name string, addPr
for _, entry := range dirEntries { for _, entry := range dirEntries {
object := entry.Name() object := entry.Name()
// workround for control-chars detect // workaround for control-chars detect
objectPath := path.Join(fdPath, object) objectPath := path.Join(fdPath, object)
if !strings.HasPrefix(object, name) { if !strings.HasPrefix(object, name) {

View File

@ -487,7 +487,7 @@ See the [bisync filters](#filtering) section and generic
[--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file) [--filter-from](/filtering/#filter-from-read-filtering-patterns-from-a-file)
documentation. documentation.
An [example filters file](#example-filters-file) contains filters for An [example filters file](#example-filters-file) contains filters for
non-allowed files for synching with Dropbox. non-allowed files for syncing with Dropbox.
If you make changes to your filters file then bisync requires a run If you make changes to your filters file then bisync requires a run
with `--resync`. This is a safety feature, which prevents existing files with `--resync`. This is a safety feature, which prevents existing files
@ -664,7 +664,7 @@ Using `--check-sync=false` will disable it and may significantly reduce the
sync run times for very large numbers of files. sync run times for very large numbers of files.
The check may be run manually with `--check-sync=only`. It runs only the The check may be run manually with `--check-sync=only`. It runs only the
integrity check and terminates without actually synching. integrity check and terminates without actually syncing.
Note that currently, `--check-sync` **only checks listing snapshots and NOT the Note that currently, `--check-sync` **only checks listing snapshots and NOT the
actual files on the remotes.** Note also that the listing snapshots will not actual files on the remotes.** Note also that the listing snapshots will not
@ -1141,7 +1141,7 @@ The `--include*`, `--exclude*`, and `--filter` flags are also supported.
### How to filter directories ### How to filter directories
Filtering portions of the directory tree is a critical feature for synching. Filtering portions of the directory tree is a critical feature for syncing.
Examples of directory trees (always beneath the Path1/Path2 root level) Examples of directory trees (always beneath the Path1/Path2 root level)
you may want to exclude from your sync: you may want to exclude from your sync:
@ -1250,7 +1250,7 @@ quashed by adding `--quiet` to the bisync command line.
## Example exclude-style filters files for use with Dropbox {#exclude-filters} ## Example exclude-style filters files for use with Dropbox {#exclude-filters}
- Dropbox disallows synching the listed temporary and configuration/data files. - Dropbox disallows syncing the listed temporary and configuration/data files.
The `- <filename>` filters exclude these files where ever they may occur The `- <filename>` filters exclude these files where ever they may occur
in the sync tree. Consider adding similar exclusions for file types in the sync tree. Consider adding similar exclusions for file types
you don't need to sync, such as core dump and software build files. you don't need to sync, such as core dump and software build files.
@ -1584,7 +1584,7 @@ test command flags can be equally prefixed by a single `-` or double dash.
- `go test . -case basic -remote local -remote2 local` - `go test . -case basic -remote local -remote2 local`
runs the `test_basic` test case using only the local filesystem, runs the `test_basic` test case using only the local filesystem,
synching one local directory with another local directory. syncing one local directory with another local directory.
Test script output is to the console, while commands within scenario.txt Test script output is to the console, while commands within scenario.txt
have their output sent to the `.../workdir/test.log` file, have their output sent to the `.../workdir/test.log` file,
which is finally compared to the golden copy. which is finally compared to the golden copy.
@ -1860,4 +1860,4 @@ causing bisync to consider more files than necessary due to overbroad filters du
* Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes) * Added [new `--ignore-listing-checksum` flag](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20%2D%2Dignore%2Dchecksum%20should%20be%20split%20into%20two%20flags%20for%20separate%20purposes)
to distinguish from `--ignore-checksum` to distinguish from `--ignore-checksum`
* [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes * [Performance improvements](https://forum.rclone.org/t/bisync-bugs-and-feature-requests/37636#:~:text=6.%20Deletes%20take%20several%20times%20longer%20than%20copies) for large remotes
* Documentation and testing improvements * Documentation and testing improvements

View File

@ -741,7 +741,7 @@ strong random number generator. The nonce is incremented for each
chunk read making sure each nonce is unique for each block written. chunk read making sure each nonce is unique for each block written.
The chance of a nonce being reused is minuscule. If you wrote an The chance of a nonce being reused is minuscule. If you wrote an
exabyte of data (10¹⁸ bytes) you would have a probability of exabyte of data (10¹⁸ bytes) you would have a probability of
approximately 2×10⁻³² of re-using a nonce. approximately 2×10⁻³² of reusing a nonce.
#### Chunk #### Chunk

View File

@ -2930,7 +2930,7 @@ so they take exactly the same form.
The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`. The options set by environment variables can be seen with the `-vv` flag, e.g. `rclone version -vv`.
Options that can appear multiple times (type `stringArray`) are Options that can appear multiple times (type `stringArray`) are
treated slighly differently as environment variables can only be treated slightly differently as environment variables can only be
defined once. In order to allow a simple mechanism for adding one or defined once. In order to allow a simple mechanism for adding one or
many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv) many items, the input is treated as a [CSV encoded](https://godoc.org/encoding/csv)
string. For example string. For example

View File

@ -384,7 +384,7 @@ Use the gphotosdl proxy for downloading the full resolution images
The Google API will deliver images and video which aren't full The Google API will deliver images and video which aren't full
resolution, and/or have EXIF data missing. resolution, and/or have EXIF data missing.
However if you ue the gphotosdl proxy tnen you can download original, However if you use the gphotosdl proxy then you can download original,
unchanged images. unchanged images.
This runs a headless browser in the background. This runs a headless browser in the background.

View File

@ -2068,7 +2068,7 @@ the `--vfs-cache-mode` is off, it will return an empty result.
], ],
} }
The `expiry` time is the time until the file is elegible for being The `expiry` time is the time until the file is eligible for being
uploaded in floating point seconds. This may go negative. As rclone uploaded in floating point seconds. This may go negative. As rclone
only transfers `--transfers` files at once, only the lowest only transfers `--transfers` files at once, only the lowest
`--transfers` expiry times will have `uploading` as `true`. So there `--transfers` expiry times will have `uploading` as `true`. So there

View File

@ -750,7 +750,7 @@ Notes on above:
that `USER_NAME` has been created. that `USER_NAME` has been created.
2. The Resource entry must include both resource ARNs, as one implies 2. The Resource entry must include both resource ARNs, as one implies
the bucket and the other implies the bucket's objects. the bucket and the other implies the bucket's objects.
3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exsits, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included. 3. When using [s3-no-check-bucket](#s3-no-check-bucket) and the bucket already exists, the `"arn:aws:s3:::BUCKET_NAME"` doesn't have to be included.
For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b) For reference, [here's an Ansible script](https://gist.github.com/ebridges/ebfc9042dd7c756cd101cfa807b7ae2b)
that will generate one or more buckets that will work with `rclone sync`. that will generate one or more buckets that will work with `rclone sync`.

View File

@ -133,7 +133,7 @@ func TestCertificates(t *testing.T) {
assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime)) assert.Fail(t, "Certificate expired", "Certificate expires at %s, current time is %s", cert[0].NotAfter.Sub(startTime), time.Since(startTime))
} }
// Write some test data to fullfil the request // Write some test data to fulfill the request
w.Header().Set("Content-Type", "text/plain") w.Header().Set("Content-Type", "text/plain")
_, _ = fmt.Fprintln(w, "test data") _, _ = fmt.Fprintln(w, "test data")
})) }))

View File

@ -95,7 +95,7 @@ func LogValueHide(key string, value interface{}) LogValueItem {
return LogValueItem{key: key, value: value, render: false} return LogValueItem{key: key, value: value, render: false}
} }
// String returns the representation of value. If render is fals this // String returns the representation of value. If render is false this
// is an empty string so LogValueItem entries won't show in the // is an empty string so LogValueItem entries won't show in the
// textual representation of logs. // textual representation of logs.
func (j LogValueItem) String() string { func (j LogValueItem) String() string {

View File

@ -297,7 +297,7 @@ func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.R
// Update in to the object with the modTime given of the given size // Update in to the object with the modTime given of the given size
// //
// This re-uses the internal buffer if at all possible. // This reuses the internal buffer if at all possible.
func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
size := src.Size() size := src.Size()
if size == 0 { if size == 0 {

View File

@ -142,7 +142,7 @@ func TestMemoryObject(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
checkContent(o, "Rutabaga") checkContent(o, "Rutabaga")
assert.Equal(t, newNow, o.ModTime(context.Background())) assert.Equal(t, newNow, o.ModTime(context.Background()))
assert.Equal(t, "Rutaba", string(content)) // check we re-used the buffer assert.Equal(t, "Rutaba", string(content)) // check we reused the buffer
// not within the buffer // not within the buffer
newStr := "0123456789" newStr := "0123456789"

View File

@ -2391,7 +2391,7 @@ func Run(t *testing.T, opt *Opt) {
var itemCopy = item var itemCopy = item
itemCopy.Path += ".copy" itemCopy.Path += ".copy"
// Set copy cutoff to mininum value so we make chunks // Set copy cutoff to minimum value so we make chunks
origCutoff, err := do.SetCopyCutoff(minChunkSize) origCutoff, err := do.SetCopyCutoff(minChunkSize)
require.NoError(t, err) require.NoError(t, err)
defer func() { defer func() {

View File

@ -464,7 +464,7 @@ the |--vfs-cache-mode| is off, it will return an empty result.
], ],
} }
The |expiry| time is the time until the file is elegible for being The |expiry| time is the time until the file is eligible for being
uploaded in floating point seconds. This may go negative. As rclone uploaded in floating point seconds. This may go negative. As rclone
only transfers |--transfers| files at once, only the lowest only transfers |--transfers| files at once, only the lowest
|--transfers| expiry times will have |uploading| as |true|. So there |--transfers| expiry times will have |uploading| as |true|. So there

View File

@ -216,7 +216,7 @@ func New(f fs.Fs, opt *vfscommon.Options) *VFS {
configName := fs.ConfigString(f) configName := fs.ConfigString(f)
for _, activeVFS := range active[configName] { for _, activeVFS := range active[configName] {
if vfs.Opt == activeVFS.Opt { if vfs.Opt == activeVFS.Opt {
fs.Debugf(f, "Re-using VFS from active cache") fs.Debugf(f, "Reusing VFS from active cache")
activeVFS.inUse.Add(1) activeVFS.inUse.Add(1)
return activeVFS return activeVFS
} }

View File

@ -428,7 +428,7 @@ func TestItemReloadCacheStale(t *testing.T) {
assert.Equal(t, int64(110), fi.Size()) assert.Equal(t, int64(110), fi.Size())
// Write to the file to make it dirty // Write to the file to make it dirty
// This checks we aren't re-using stale data // This checks we aren't reusing stale data
n, err := item.WriteAt([]byte("HELLO"), 0) n, err := item.WriteAt([]byte("HELLO"), 0)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, 5, n) assert.Equal(t, 5, n)