backend: fix typos found by codespell

This commit is contained in:
Dimitri Papadopoulos 2022-09-20 08:54:47 +02:00 committed by Nick Craig-Wood
parent cce8936802
commit bfe272bf67
10 changed files with 14 additions and 15 deletions

View File

@ -1787,7 +1787,7 @@ func (f *Fs) CleanUpCache(ignoreLastTs bool) {
}
}
// StopBackgroundRunners will signall all the runners to stop their work
// StopBackgroundRunners will signal all the runners to stop their work
// can be triggered from a terminate signal or from testing between runs
func (f *Fs) StopBackgroundRunners() {
f.cleanupChan <- false

View File

@ -1,4 +1,4 @@
// Package combine implents a backend to combine multiple remotes in a directory tree
// Package combine implements a backend to combine multiple remotes in a directory tree
package combine
/*
@ -351,7 +351,7 @@ func (f *Fs) multithread(ctx context.Context, fn func(context.Context, *upstream
return g.Wait()
}
// join the elements together but unline path.Join return empty string
// join the elements together but unlike path.Join return empty string
func join(elem ...string) string {
result := path.Join(elem...)
if result == "." {

View File

@ -209,7 +209,7 @@ func (c *Cipher) setPassBadBlocks(passBadBlocks bool) {
// scrypt.
//
// If salt is "" we use a fixed salt just to make attackers lives
// slighty harder than using no salt.
// slightly harder than using no salt.
//
// Note that empty password makes all 0x00 keys which is used in the
// tests.
@ -914,9 +914,8 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
return n, nil
}
// calculateUnderlying converts an (offset, limit) in a crypted file
// into an (underlyingOffset, underlyingLimit) for the underlying
// file.
// calculateUnderlying converts an (offset, limit) in an encrypted file
// into an (underlyingOffset, underlyingLimit) for the underlying file.
//
// It also returns number of bytes to discard after reading the first
// block and number of blocks this is from the start so the nonce can

View File

@ -465,7 +465,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options [
if err != nil {
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
}
return nil, fmt.Errorf("corrupted on transfer: %v crypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
return nil, fmt.Errorf("corrupted on transfer: %v encrypted hash differ src %q vs dst %q", ht, srcHash, dstHash)
}
fs.Debugf(src, "%v = %s OK", ht, srcHash)
}

View File

@ -536,7 +536,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
default:
return nil, err
}
// if the moint failed we have to abort here
// if the mount failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally

View File

@ -473,7 +473,7 @@ func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse
return shouldRetry(ctx, resp, err)
})
if err != nil {
return nil, fmt.Errorf("didnt got an upload node: %w", err)
return nil, fmt.Errorf("didn't get an upload node: %w", err)
}
// fs.Debugf(f, "Got Upload node")

View File

@ -333,7 +333,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// checking to see if there is one already - use Put() for that.
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
if size > int64(300e9) {
return nil, errors.New("File too big, cant upload")
return nil, errors.New("File too big, can't upload")
} else if size == 0 {
return nil, fs.ErrorCantUploadEmptyFiles
}

View File

@ -2,7 +2,7 @@
package hidrive
// FIXME HiDrive only supports file or folder names of 255 characters or less.
// Operations that create files oder folder with longer names will throw a HTTP error:
// Operations that create files or folders with longer names will throw an HTTP error:
// - 422 Unprocessable Entity
// A more graceful way for rclone to handle this may be desirable.

View File

@ -1930,7 +1930,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
o.md5 = result.Md5
o.modTime = time.Unix(result.Modified/1000, 0)
} else {
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still ned to update our metadata
// If the file state is COMPLETE we don't need to upload it because the file was already found but we still need to update our metadata
return o.readMetaData(ctx, true)
}

View File

@ -1,4 +1,4 @@
// Package s3 provides an interface to Amazon S3 oject storage
// Package s3 provides an interface to Amazon S3 object storage
package s3
//go:generate go run gen_setfrom.go -o setfrom.go
@ -2998,7 +2998,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
return nil, fmt.Errorf("s3: upload cutoff: %w", err)
}
if opt.Versions && opt.VersionAt.IsSet() {
return nil, errors.New("s3: cant use --s3-versions and --s3-version-at at the same time")
return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL