diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 17c030eb1..e7db8096f 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -2119,7 +2119,7 @@ func (w *azChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader return currentChunkSize, err } -// Abort the multpart upload. +// Abort the multipart upload. // // FIXME it would be nice to delete uncommitted blocks. // diff --git a/backend/box/box.go b/backend/box/box.go index d925ab7db..0a653af12 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -154,7 +154,7 @@ func init() { Default: "", Help: `Impersonate this user ID when using a service account. -Settng this flag allows rclone, when using a JWT service account, to +Setting this flag allows rclone, when using a JWT service account, to act on behalf of another user by setting the as-user header. The user ID is the Box identifier for a user. User IDs can found for diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 31ef39a7f..6f974b779 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -206,7 +206,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e } ci := fs.GetConfig(ctx) - // cache *mega.Mega on username so we can re-use and share + // cache *mega.Mega on username so we can reuse and share // them between remotes. They are expensive to make as they // contain all the objects and sharing the objects makes the // move code easier as we don't have to worry about mixing diff --git a/backend/pikpak/pikpak.go b/backend/pikpak/pikpak.go index d82361022..7cd5c1117 100644 --- a/backend/pikpak/pikpak.go +++ b/backend/pikpak/pikpak.go @@ -1215,7 +1215,7 @@ func (f *Fs) upload(ctx context.Context, in io.Reader, leaf, dirID, sha1Str stri return nil, fmt.Errorf("failed to upload: %w", err) } // refresh uploaded file info - // Compared to `newfile.File` this upgrades several feilds... + // Compared to `newfile.File` this upgrades several fields... // audit, links, modified_time, phase, revision, and web_content_link return f.getFile(ctx, newfile.File.ID) } diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 935daaacf..db9fab8d8 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -5611,7 +5611,7 @@ func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader return currentChunkSize, err } -// Abort the multpart upload +// Abort the multipart upload func (w *s3ChunkWriter) Abort(ctx context.Context) error { err := w.f.pacer.Call(func() (bool, error) { _, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ diff --git a/backend/sftp/sftp.go b/backend/sftp/sftp.go index d9c300bfb..6b425c7d0 100644 --- a/backend/sftp/sftp.go +++ b/backend/sftp/sftp.go @@ -1014,7 +1014,7 @@ func (f *Fs) keyboardInteractiveReponse(user, instruction string, questions []st // save it so on reconnection we give back the previous string. // This removes the ability to let the user correct a mistaken entry, // but means that reconnects are transparent. -// We'll re-use config.Pass for this, 'cos we know it's not been +// We'll reuse config.Pass for this, 'cos we know it's not been // specified. func (f *Fs) getPass() (string, error) { for f.savedpswd == "" { @@ -1602,7 +1602,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { fs.Debugf(f, "About path %q", aboutPath) vfsStats, err = c.sftpClient.StatVFS(aboutPath) } - f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be re-used + f.putSftpConnection(&c, err) // Return to pool asap, if running shell command below it will be reused if vfsStats != nil { total := vfsStats.TotalSpace() free := vfsStats.FreeSpace() @@ -2044,7 +2044,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op if err != nil { return fmt.Errorf("Update: %w", err) } - // Hang on to the connection for the whole upload so it doesn't get re-used while we are uploading + // Hang on to the connection for the whole upload so it doesn't get reused while we are uploading file, err := c.sftpClient.OpenFile(o.path(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC) if err != nil { o.fs.putSftpConnection(&c, err) diff --git a/cmd/cmd.go b/cmd/cmd.go index e80e75c51..7043995f0 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -1,6 +1,6 @@ // Package cmd implements the rclone command // -// It is in a sub package so it's internals can be re-used elsewhere +// It is in a sub package so it's internals can be reused elsewhere package cmd // FIXME only attach the remote flags when using a remote??? diff --git a/cmd/mount2/mount.go b/cmd/mount2/mount.go index 811cab235..f244bf40d 100644 --- a/cmd/mount2/mount.go +++ b/cmd/mount2/mount.go @@ -83,7 +83,7 @@ func mountOptions(fsys *FS, f fs.Fs, opt *mountlib.Options) (mountOpts *fuse.Mou // (128 kiB on Linux) and cannot be larger than MaxWrite. // // MaxReadAhead only affects buffered reads (=non-direct-io), but even then, the - // kernel can and does send larger reads to satisfy read reqests from applications + // kernel can and does send larger reads to satisfy read requests from applications // (up to MaxWrite or VM_READAHEAD_PAGES=128 kiB, whichever is less). MaxReadAhead int diff --git a/docs/content/box.md b/docs/content/box.md index 8c4123d14..025e12902 100644 --- a/docs/content/box.md +++ b/docs/content/box.md @@ -442,7 +442,7 @@ Properties: Impersonate this user ID when using a service account. -Settng this flag allows rclone, when using a JWT service account, to +Setting this flag allows rclone, when using a JWT service account, to act on behalf of another user by setting the as-user header. The user ID is the Box identifier for a user. User IDs can found for diff --git a/docs/content/changelog.md b/docs/content/changelog.md index 8e23f3412..babf51b4e 100644 --- a/docs/content/changelog.md +++ b/docs/content/changelog.md @@ -105,14 +105,14 @@ description: "Rclone Changelog" * Fix 425 "TLS session of data connection not resumed" errors (Nick Craig-Wood) * Hdfs * Retry "replication in progress" errors when uploading (Nick Craig-Wood) - * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood) + * Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood) * HTTP * CORS should not be sent if not set (yuudi) * Fix webdav OPTIONS response (yuudi) * Opendrive * Fix List on a just deleted and remade directory (Nick Craig-Wood) * Oracleobjectstorage - * Use rclone's rate limiter in mutipart transfers (Manoj Ghosh) + * Use rclone's rate limiter in multipart transfers (Manoj Ghosh) * Implement `OpenChunkWriter` and multi-thread uploads (Manoj Ghosh) * S3 * Refactor multipart upload to use `OpenChunkWriter` and `ChunkWriter` (Vitor Gomes) @@ -285,14 +285,14 @@ description: "Rclone Changelog" * Fix quickxorhash on 32 bit architectures (Nick Craig-Wood) * Report any list errors during `rclone cleanup` (albertony) * Putio - * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood) + * Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood) * Fix modification times not being preserved for server side copy and move (Nick Craig-Wood) * Fix server side copy failures (400 errors) (Nick Craig-Wood) * S3 * Empty directory markers (Jānis Bebrītis, Nick Craig-Wood) * Update Scaleway storage classes (Brian Starkey) * Fix `--s3-versions` on individual objects (Nick Craig-Wood) - * Fix hang on aborting multpart upload with iDrive e2 (Nick Craig-Wood) + * Fix hang on aborting multipart upload with iDrive e2 (Nick Craig-Wood) * Fix missing "tier" metadata (Nick Craig-Wood) * Fix V3sign: add missing subresource delete (cc) * Fix Arvancloud Domain and region changes and alphabetise the provider (Ehsan Tadayon) @@ -309,7 +309,7 @@ description: "Rclone Changelog" * Code cleanup to avoid overwriting ctx before first use (fixes issue reported by the staticcheck linter) (albertony) * Storj * Fix "uplink: too many requests" errors when uploading to the same file (Nick Craig-Wood) - * Fix uploading to the wrong object on Update with overriden remote name (Nick Craig-Wood) + * Fix uploading to the wrong object on Update with overridden remote name (Nick Craig-Wood) * Swift * Ignore 404 error when deleting an object (Nick Craig-Wood) * Union @@ -3938,7 +3938,7 @@ Point release to fix hubic and azureblob backends. * Revert to copy when moving file across file system boundaries * `--skip-links` to suppress symlink warnings (thanks Zhiming Wang) * Mount - * Re-use `rcat` internals to support uploads from all remotes + * Reuse `rcat` internals to support uploads from all remotes * Dropbox * Fix "entry doesn't belong in directory" error * Stop using deprecated API methods diff --git a/docs/content/crypt.md b/docs/content/crypt.md index 8d16538fc..9599dccc1 100644 --- a/docs/content/crypt.md +++ b/docs/content/crypt.md @@ -712,7 +712,7 @@ has a header and is divided into chunks. The initial nonce is generated from the operating systems crypto strong random number generator. The nonce is incremented for each chunk read making sure each nonce is unique for each block written. -The chance of a nonce being re-used is minuscule. If you wrote an +The chance of a nonce being reused is minuscule. If you wrote an exabyte of data (10¹⁸ bytes) you would have a probability of approximately 2×10⁻³² of re-using a nonce. diff --git a/docs/content/install.md b/docs/content/install.md index 8068ed930..885ea5e32 100644 --- a/docs/content/install.md +++ b/docs/content/install.md @@ -315,7 +315,7 @@ Make sure you have [Snapd installed](https://snapcraft.io/docs/installing-snapd) ```bash $ sudo snap install rclone ``` -Due to the strict confinement of Snap, rclone snap cannot acess real /home/$USER/.config/rclone directory, default config path is as below. +Due to the strict confinement of Snap, rclone snap cannot access real /home/$USER/.config/rclone directory, default config path is as below. - Default config directory: - /home/$USER/snap/rclone/current/.config/rclone @@ -585,7 +585,7 @@ It requires .NET Framework, but it is preinstalled on newer versions of Windows, also provides alternative standalone distributions which includes necessary runtime (.NET 5). WinSW is a command-line only utility, where you have to manually create an XML file with service configuration. This may be a drawback for some, but it can also be an advantage -as it is easy to back up and re-use the configuration +as it is easy to back up and reuse the configuration settings, without having go through manual steps in a GUI. One thing to note is that by default it does not restart the service on error, one have to explicit enable this in the configuration file (via the "onfailure" parameter). diff --git a/docs/content/oracleobjectstorage/tutorial_mount.md b/docs/content/oracleobjectstorage/tutorial_mount.md index 924fc4344..85364d0aa 100644 --- a/docs/content/oracleobjectstorage/tutorial_mount.md +++ b/docs/content/oracleobjectstorage/tutorial_mount.md @@ -209,7 +209,7 @@ rclone mount \ # its exact meaning will depend on the backend. For HTTP based backends it is an HTTP PUT/GET/POST/etc and its response --cache-dir /tmp/rclone/cache # Directory rclone will use for caching. --dir-cache-time 5m \ # Time to cache directory entries for (default 5m0s) - --vfs-cache-mode writes \ # Cache mode off|minimal|writes|full (default off), writes gives the maximum compatiblity like a local disk + --vfs-cache-mode writes \ # Cache mode off|minimal|writes|full (default off), writes gives the maximum compatibility like a local disk --vfs-cache-max-age 20m \ # Max age of objects in the cache (default 1h0m0s) --vfs-cache-max-size 10G \ # Max total size of objects in the cache (default off) --vfs-cache-poll-interval 1m \ # Interval to poll the cache for stale objects (default 1m0s) @@ -372,7 +372,7 @@ Install NFS Utils sudo yum install -y nfs-utils ``` -Export the desired directory via NFS Server in the same machine where rclone has mounted to, ensure NFS serivce has +Export the desired directory via NFS Server in the same machine where rclone has mounted to, ensure NFS service has desired permissions to read the directory. If it runs as root, then it will have permissions for sure, but if it runs as separate user then ensure that user has necessary desired privileges. ```shell diff --git a/fs/object/object_test.go b/fs/object/object_test.go index a00cf29a0..1e5ca75c4 100644 --- a/fs/object/object_test.go +++ b/fs/object/object_test.go @@ -152,7 +152,7 @@ func TestMemoryObject(t *testing.T) { err = o.Update(context.Background(), newContent, src) assert.NoError(t, err) checkContent(o, newStr) - assert.Equal(t, "Rutaba", string(content)) // check we didn't re-use the buffer + assert.Equal(t, "Rutaba", string(content)) // check we didn't reuse the buffer // now try streaming newStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" diff --git a/fs/operations/rc_test.go b/fs/operations/rc_test.go index b550cf85a..98e383270 100644 --- a/fs/operations/rc_test.go +++ b/fs/operations/rc_test.go @@ -318,7 +318,7 @@ func TestRcSetTier(t *testing.T) { r.CheckRemoteItems(t, file1) // Because we don't know what the current tier options here are, let's - // just get the current tier, and re-use that + // just get the current tier, and reuse that o, err := r.Fremote.NewObject(ctx, file1.Path) require.NoError(t, err) trr, ok := o.(fs.GetTierer) @@ -345,7 +345,7 @@ func TestRcSetTierFile(t *testing.T) { r.CheckRemoteItems(t, file1) // Because we don't know what the current tier options here are, let's - // just get the current tier, and re-use that + // just get the current tier, and reuse that o, err := r.Fremote.NewObject(ctx, file1.Path) require.NoError(t, err) trr, ok := o.(fs.GetTierer) diff --git a/vfs/vfscache/downloaders/downloaders.go b/vfs/vfscache/downloaders/downloaders.go index 8d6e90fb2..b12c107b5 100644 --- a/vfs/vfscache/downloaders/downloaders.go +++ b/vfs/vfscache/downloaders/downloaders.go @@ -345,7 +345,7 @@ func (dls *Downloaders) _ensureDownloader(r ranges.Range) (err error) { start, offset := dl.getRange() // The downloader's offset to offset+window is the gap - // in which we would like to re-use this + // in which we would like to reuse this // downloader. The downloader will never reach before // start and offset+windows is too far away - we'd // rather start another downloader.