diff --git a/backend/azureblob/azureblob.go b/backend/azureblob/azureblob.go index 4c7f2791e..4ce27db2c 100644 --- a/backend/azureblob/azureblob.go +++ b/backend/azureblob/azureblob.go @@ -19,6 +19,7 @@ import ( "net/url" "os" "path" + "slices" "sort" "strconv" "strings" @@ -681,10 +682,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) { return true, err } statusCode := storageErr.StatusCode - for _, e := range retryErrorCodes { - if statusCode == e { - return true, err - } + if slices.Contains(retryErrorCodes, statusCode) { + return true, err } } return fserrors.ShouldRetry(err), err diff --git a/backend/azurefiles/azurefiles_internal_test.go b/backend/azurefiles/azurefiles_internal_test.go index 77e0156cc..4996267a1 100644 --- a/backend/azurefiles/azurefiles_internal_test.go +++ b/backend/azurefiles/azurefiles_internal_test.go @@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX" func randomString(charCount int) string { strBldr := strings.Builder{} - for i := 0; i < charCount; i++ { + for range charCount { randPos := rand.Int63n(52) strBldr.WriteByte(chars[randPos]) } diff --git a/backend/b2/api/types.go b/backend/b2/api/types.go index 74c098542..b221d4c2d 100644 --- a/backend/b2/api/types.go +++ b/backend/b2/api/types.go @@ -130,10 +130,10 @@ type AuthorizeAccountResponse struct { AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file. AccountID string `json:"accountId"` // The identifier for the account. Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it. - BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. - BucketName string `json:"bucketName"` // When present, name of bucket - may be empty - Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has. - NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix + BucketID string `json:"bucketId"` // When present, access is restricted to one bucket. + BucketName string `json:"bucketName"` // When present, name of bucket - may be empty + Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has. + NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix } `json:"allowed"` APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files. AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header. diff --git a/backend/b2/b2.go b/backend/b2/b2.go index 357b6c6c8..92e7675f0 100644 --- a/backend/b2/b2.go +++ b/backend/b2/b2.go @@ -16,6 +16,7 @@ import ( "io" "net/http" "path" + "slices" "strconv" "strings" "sync" @@ -589,12 +590,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error { // hasPermission returns if the current AuthorizationToken has the selected permission func (f *Fs) hasPermission(permission string) bool { - for _, capability := range f.info.Allowed.Capabilities { - if capability == permission { - return true - } - } - return false + return slices.Contains(f.info.Allowed.Capabilities, permission) } // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken @@ -1275,7 +1271,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b toBeDeleted := make(chan *api.File, f.ci.Transfers) var wg sync.WaitGroup wg.Add(f.ci.Transfers) - for i := 0; i < f.ci.Transfers; i++ { + for range f.ci.Transfers { go func() { defer wg.Done() for object := range toBeDeleted { @@ -1939,7 +1935,7 @@ func init() { // urlEncode encodes in with % encoding func urlEncode(in string) string { var out bytes.Buffer - for i := 0; i < len(in); i++ { + for i := range len(in) { c := in[i] if noNeedToEncode[c] { _ = out.WriteByte(c) @@ -2260,7 +2256,7 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules }, } -func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { var newRule api.LifecycleRule if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" { days, err := strconv.Atoi(daysStr) @@ -2349,7 +2345,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc. }, } -func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { maxAge := defaultMaxAge if opt["max-age"] != "" { maxAge, err = fs.ParseDuration(opt["max-age"]) @@ -2372,7 +2368,7 @@ it would do. `, } -func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { return nil, f.cleanUp(ctx, true, false, 0) } @@ -2391,7 +2387,7 @@ var commandHelp = []fs.CommandHelp{ // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "lifecycle": return f.lifecycleCommand(ctx, name, arg, opt) diff --git a/backend/b2/upload.go b/backend/b2/upload.go index 287466eaa..e2cd4a1c2 100644 --- a/backend/b2/upload.go +++ b/backend/b2/upload.go @@ -478,17 +478,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) { remaining = up.size ) g.SetLimit(up.f.opt.UploadConcurrency) - for part := 0; part < up.parts; part++ { + for part := range up.parts { // Fail fast, in case an errgroup managed function returns an error // gCtx is cancelled. There is no point in copying all the other parts. if gCtx.Err() != nil { break } - reqSize := remaining - if reqSize >= up.chunkSize { - reqSize = up.chunkSize - } + reqSize := min(remaining, up.chunkSize) part := part // for the closure g.Go(func() (err error) { diff --git a/backend/box/box.go b/backend/box/box.go index 8c5903578..c21376837 100644 --- a/backend/box/box.go +++ b/backend/box/box.go @@ -237,8 +237,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC return claims, nil } -func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} { - signingHeaders := map[string]interface{}{ +func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any { + signingHeaders := map[string]any{ "kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID, } return signingHeaders @@ -1343,12 +1343,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs. nextStreamPosition = streamPosition for { - limit := f.opt.ListChunk - // box only allows a max of 500 events - if limit > 500 { - limit = 500 - } + limit := min(f.opt.ListChunk, 500) opts := rest.Opts{ Method: "GET", diff --git a/backend/box/upload.go b/backend/box/upload.go index c664cfbf5..a9b3a9f9e 100644 --- a/backend/box/upload.go +++ b/backend/box/upload.go @@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api const defaultDelay = 10 var tries int outer: - for tries = 0; tries < maxTries; tries++ { + for tries = range maxTries { err = o.fs.pacer.Call(func() (bool, error) { resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil) if err != nil { @@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct errs := make(chan error, 1) var wg sync.WaitGroup outer: - for part := 0; part < session.TotalParts; part++ { + for part := range session.TotalParts { // Check any errors select { case err = <-errs: @@ -211,10 +211,7 @@ outer: default: } - reqSize := remaining - if reqSize >= chunkSize { - reqSize = chunkSize - } + reqSize := min(remaining, chunkSize) // Make a block of memory buf := make([]byte, reqSize) diff --git a/backend/cache/cache.go b/backend/cache/cache.go index cb4e0e62b..e7a61e922 100644 --- a/backend/cache/cache.go +++ b/backend/cache/cache.go @@ -1092,7 +1092,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er return err } - for i := 0; i < len(entries); i++ { + for i := range entries { innerDir, ok := entries[i].(fs.Directory) if ok { err := f.recurse(ctx, innerDir.Remote(), list) @@ -1428,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i }() // wait until both are done - for c := 0; c < 2; c++ { + for range 2 { <-done } } @@ -1753,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { } // Stats returns stats about the cache storage -func (f *Fs) Stats() (map[string]map[string]interface{}, error) { +func (f *Fs) Stats() (map[string]map[string]any, error) { return f.cache.Stats() } @@ -1933,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{ // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { switch name { case "stats": return f.Stats() diff --git a/backend/cache/cache_internal_test.go b/backend/cache/cache_internal_test.go index 6092a1a1d..0138d8d83 100644 --- a/backend/cache/cache_internal_test.go +++ b/backend/cache/cache_internal_test.go @@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(len(checkSample)), o.Size()) - for i := 0; i < len(checkSample); i++ { + for i := range checkSample { require.Equal(t, testData[i], checkSample[i]) } } @@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) { readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false) require.NoError(t, err) - for i := 0; i < len(readData); i++ { + for i := range readData { require.Equalf(t, testData[i], readData[i], "at byte %v", i) } } @@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) { co, ok := o.(*cache.Object) require.True(t, ok) - for i := 0; i < 4; i++ { // read first 4 + for i := range 4 { // read first 4 _ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false) } cfs.CleanUpCache(true) @@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser { f, err := os.CreateTemp("", "rclonecache-tempfile") require.NoError(t, err) - for i := 0; i < int(cnt); i++ { + for range int(cnt) { data := randStringBytes(int(chunk)) _, _ = f.Write(data) } @@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error { return err } -func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) { +func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) { var err error - var l []interface{} + var l []any var list fs.DirEntries list, err = f.List(context.Background(), remote) for _, ll := range list { @@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch var err error var state cache.BackgroundUploadState - for i := 0; i < 2; i++ { + for range 2 { select { case state = <-buCh: // continue @@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error { var err error - for i := 0; i < maxRetries; i++ { + for range maxRetries { err = block() if err == nil { return nil diff --git a/backend/cache/cache_upload_test.go b/backend/cache/cache_upload_test.go index 0a3e5d297..a47a1792a 100644 --- a/backend/cache/cache_upload_test.go +++ b/backend/cache/cache_upload_test.go @@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) { randInstance := rand.New(rand.NewSource(time.Now().Unix())) lastFile := "" - for i := 0; i < totalFiles; i++ { + for i := range totalFiles { size := int64(randInstance.Intn(maxSize-minSize) + minSize) testReader := runInstance.randomReader(t, size) remote := "test/" + strconv.Itoa(i) + ".bin" diff --git a/backend/cache/handle.go b/backend/cache/handle.go index 11772eb6e..c12080043 100644 --- a/backend/cache/handle.go +++ b/backend/cache/handle.go @@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) { } } - for i := 0; i < r.workers; i++ { + for i := range r.workers { o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i) if o < 0 || o >= r.cachedObject.Size() { continue @@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) { if !found { // we're gonna give the workers a chance to pickup the chunk // and retry a couple of times - for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ { + for i := range r.cacheFs().opt.ReadRetries * 8 { data, err = r.storage().GetChunk(r.cachedObject, chunkStart) if err == nil { found = true diff --git a/backend/cache/plex.go b/backend/cache/plex.go index 040a665ef..ff686f793 100644 --- a/backend/cache/plex.go +++ b/backend/cache/plex.go @@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error { if err != nil { return err } - var data map[string]interface{} + var data map[string]any err = json.NewDecoder(resp.Body).Decode(&data) if err != nil { return fmt.Errorf("failed to obtain token: %w", err) @@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool { } // adapted from: https://stackoverflow.com/a/28878037 (credit) -func get(m interface{}, path ...interface{}) (interface{}, bool) { +func get(m any, path ...any) (any, bool) { for _, p := range path { switch idx := p.(type) { case string: - if mm, ok := m.(map[string]interface{}); ok { + if mm, ok := m.(map[string]any); ok { if val, found := mm[idx]; found { m = val continue @@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) { } return nil, false case int: - if mm, ok := m.([]interface{}); ok { + if mm, ok := m.([]any); ok { if len(mm) > idx { m = mm[idx] continue diff --git a/backend/cache/storage_persistent.go b/backend/cache/storage_persistent.go index 35566eeea..737b2e4ae 100644 --- a/backend/cache/storage_persistent.go +++ b/backend/cache/storage_persistent.go @@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) { } // Stats returns a go map with the stats key values -func (b *Persistent) Stats() (map[string]map[string]interface{}, error) { - r := make(map[string]map[string]interface{}) - r["data"] = make(map[string]interface{}) +func (b *Persistent) Stats() (map[string]map[string]any, error) { + r := make(map[string]map[string]any) + r["data"] = make(map[string]any) r["data"]["oldest-ts"] = time.Now() r["data"]["oldest-file"] = "" r["data"]["newest-ts"] = time.Now() r["data"]["newest-file"] = "" r["data"]["total-chunks"] = 0 r["data"]["total-size"] = int64(0) - r["files"] = make(map[string]interface{}) + r["files"] = make(map[string]any) r["files"]["oldest-ts"] = time.Now() r["files"]["oldest-name"] = "" r["files"]["newest-ts"] = time.Now() diff --git a/backend/chunker/chunker.go b/backend/chunker/chunker.go index 3cc4b1c89..a2f904d7b 100644 --- a/backend/chunker/chunker.go +++ b/backend/chunker/chunker.go @@ -632,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct // forbidChunk prints error message or raises error if file is chunk. // First argument sets log prefix, use `false` to suppress message. -func (f *Fs) forbidChunk(o interface{}, filePath string) error { +func (f *Fs) forbidChunk(o any, filePath string) error { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" { if f.opt.FailHard { return fmt.Errorf("chunk overlap with %q", parentPath) @@ -680,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err circleSec := unixSec % closestPrimeZzzzSeconds first4chars := strconv.FormatInt(circleSec, 36) - for tries := 0; tries < maxTransactionProbes; tries++ { + for range maxTransactionProbes { f.xactIDMutex.Lock() randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1) f.xactIDMutex.Unlock() @@ -1189,10 +1189,7 @@ func (f *Fs) put( } tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID) - size := c.sizeLeft - if size > c.chunkSize { - size = c.chunkSize - } + size := min(c.sizeLeft, c.chunkSize) savedReadCount := c.readCount // If a single chunk is expected, avoid the extra rename operation @@ -1477,10 +1474,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error { const bufLen = 1048576 // 1 MiB buf := make([]byte, bufLen) for size > 0 { - n := size - if n > bufLen { - n = bufLen - } + n := min(size, bufLen) if _, err := io.ReadFull(in, buf[0:n]); err != nil { return err } diff --git a/backend/chunker/chunker_internal_test.go b/backend/chunker/chunker_internal_test.go index fc3e958b0..19f665fd8 100644 --- a/backend/chunker/chunker_internal_test.go +++ b/backend/chunker/chunker_internal_test.go @@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) { }) } -type settings map[string]interface{} +type settings map[string]any func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs { fsName := strings.Split(f.Name(), "{")[0] // strip off hash diff --git a/backend/crypt/cipher.go b/backend/crypt/cipher.go index 4fd529175..c9a3786ef 100644 --- a/backend/crypt/cipher.go +++ b/backend/crypt/cipher.go @@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo dirNameEncrypt: dirNameEncrypt, encryptedSuffix: ".bin", } - c.buffers.New = func() interface{} { + c.buffers.New = func() any { return new([blockSize]byte) } err := c.Key(password, salt) @@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string { _, _ = result.WriteString(strconv.Itoa(dir) + ".") // but we'll augment it with the nameKey for real calculation - for i := 0; i < len(c.nameKey); i++ { + for i := range len(c.nameKey) { dir += int(c.nameKey[i]) } @@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) { } // add the nameKey to get the real rotate distance - for i := 0; i < len(c.nameKey); i++ { + for i := range len(c.nameKey) { dir += int(c.nameKey[i]) } @@ -664,7 +664,7 @@ func (n *nonce) increment() { // add a uint64 to the nonce func (n *nonce) add(x uint64) { carry := uint16(0) - for i := 0; i < 8; i++ { + for i := range 8 { digit := (*n)[i] xDigit := byte(x) x >>= 8 diff --git a/backend/crypt/cipher_test.go b/backend/crypt/cipher_test.go index 559a6f549..fe2edc6f9 100644 --- a/backend/crypt/cipher_test.go +++ b/backend/crypt/cipher_test.go @@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) { open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { end := len(ciphertext) if underlyingLimit >= 0 { - end = int(underlyingOffset + underlyingLimit) - if end > len(ciphertext) { - end = len(ciphertext) - } + end = min(int(underlyingOffset+underlyingLimit), len(ciphertext)) } reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end])) return reader, nil @@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) { assert.NoError(t, err) // Test truncating the file at each possible point - for i := 0; i < len(file16)-1; i++ { + for i := range len(file16) - 1 { what := fmt.Sprintf("truncating to %d/%d", i, len(file16)) cd := newCloseDetector(bytes.NewBuffer(file16[:i])) fh, err := c.newDecrypter(cd) diff --git a/backend/crypt/crypt.go b/backend/crypt/crypt.go index 2303c0851..705416cc2 100644 --- a/backend/crypt/crypt.go +++ b/backend/crypt/crypt.go @@ -924,7 +924,7 @@ Usage Example: // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "decode": out := make([]string, 0, len(arg)) diff --git a/backend/crypt/pkcs7/pkcs7.go b/backend/crypt/pkcs7/pkcs7.go index d2a547042..b92c0073c 100644 --- a/backend/crypt/pkcs7/pkcs7.go +++ b/backend/crypt/pkcs7/pkcs7.go @@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte { } length := len(buf) padding := n - (length % n) - for i := 0; i < padding; i++ { + for range padding { buf = append(buf, byte(padding)) } if (len(buf) % n) != 0 { @@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) { if padding == 0 { return nil, ErrorPaddingTooShort } - for i := 0; i < padding; i++ { + for i := range padding { if buf[length-1-i] != byte(padding) { return nil, ErrorPaddingNotAllTheSame } diff --git a/backend/drive/drive.go b/backend/drive/drive.go index 5ccea32d3..6cbb1d8d9 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -18,6 +18,7 @@ import ( "net/http" "os" "path" + "slices" "sort" "strconv" "strings" @@ -199,12 +200,7 @@ func driveScopes(scopesString string) (scopes []string) { // Returns true if one of the scopes was "drive.appfolder" func driveScopesContainsAppFolder(scopes []string) bool { - for _, scope := range scopes { - if scope == scopePrefix+"drive.appfolder" { - return true - } - } - return false + return slices.Contains(scopes, scopePrefix+"drive.appfolder") } func driveOAuthOptions() []fs.Option { @@ -958,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) { type listFn func(*drive.File) bool func containsString(slice []string, s string) bool { - for _, e := range slice { - if e == s { - return true - } - } - return false + return slices.Contains(slice, s) } // getFile returns drive.File for the ID passed and fields passed in @@ -1152,13 +1143,7 @@ OUTER: // Check the case of items is correct since // the `=` operator is case insensitive. if title != "" && title != item.Name { - found := false - for _, stem := range stems { - if stem == item.Name { - found = true - break - } - } + found := slices.Contains(stems, item.Name) if !found { continue } @@ -1561,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) { func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) { // wipe checksum if SkipChecksumGphotos and file is type Photo or Video if f.opt.SkipChecksumGphotos { - for _, space := range info.Spaces { - if space == "photos" { - info.Md5Checksum = "" - info.Sha1Checksum = "" - info.Sha256Checksum = "" - break - } + if slices.Contains(info.Spaces, "photos") { + info.Md5Checksum = "" + info.Sha1Checksum = "" + info.Sha256Checksum = "" } } o := &Object{ @@ -2245,7 +2227,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( wg.Add(1) in <- listREntry{directoryID, dir} - for i := 0; i < f.ci.Checkers; i++ { + for range f.ci.Checkers { go f.listRRunner(ctx, &wg, in, out, cb, sendJob) } go func() { @@ -2254,11 +2236,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( // if the input channel overflowed add the collected entries to the channel now for len(overflow) > 0 { mu.Lock() - l := len(overflow) // only fill half of the channel to prevent entries being put into overflow again - if l > listRInputBuffer/2 { - l = listRInputBuffer / 2 - } + l := min(len(overflow), listRInputBuffer/2) wg.Add(l) for _, d := range overflow[:l] { in <- d @@ -2278,7 +2257,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) ( mu.Unlock() }() // wait until the all workers to finish - for i := 0; i < f.ci.Checkers; i++ { + for range f.ci.Checkers { e := <-out mu.Lock() // if one worker returns an error early, close the input so all other workers exit @@ -3914,7 +3893,7 @@ Third delete all orphaned files to the trash // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "get": out := make(map[string]string) diff --git a/backend/drive/metadata.go b/backend/drive/metadata.go index c8afa7f8d..be8e83b85 100644 --- a/backend/drive/metadata.go +++ b/backend/drive/metadata.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "maps" "strconv" "strings" "sync" @@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e metadata := make(fs.Metadata, 16) // Dump user metadata first as it overrides system metadata - for k, v := range info.Properties { - metadata[k] = v - } + maps.Copy(metadata, info.Properties) // System metadata metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission) diff --git a/backend/drive/upload.go b/backend/drive/upload.go index df6454429..10b3ab506 100644 --- a/backend/drive/upload.go +++ b/backend/drive/upload.go @@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) { if start >= rx.ContentLength { break } - reqSize = rx.ContentLength - start - if reqSize >= int64(rx.f.opt.ChunkSize) { - reqSize = int64(rx.f.opt.ChunkSize) - } + reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize)) chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize) } else { // If size unknown read into buffer diff --git a/backend/dropbox/dbhash/dbhash.go b/backend/dropbox/dbhash/dbhash.go index 3cdc5d7b6..9a1ab3c83 100644 --- a/backend/dropbox/dbhash/dbhash.go +++ b/backend/dropbox/dbhash/dbhash.go @@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) { n = len(p) for len(p) > 0 { d.writtenMore = true - toWrite := bytesPerBlock - d.n - if toWrite > len(p) { - toWrite = len(p) - } + toWrite := min(bytesPerBlock-d.n, len(p)) _, err = d.blockHash.Write(p[:toWrite]) if err != nil { panic(hashReturnedError) diff --git a/backend/dropbox/dbhash/dbhash_test.go b/backend/dropbox/dbhash/dbhash_test.go index 6db2bca2e..3990628a5 100644 --- a/backend/dropbox/dbhash/dbhash_test.go +++ b/backend/dropbox/dbhash/dbhash_test.go @@ -11,7 +11,7 @@ import ( func testChunk(t *testing.T, chunk int) { data := make([]byte, chunk) - for i := 0; i < chunk; i++ { + for i := range chunk { data[i] = 'A' } for _, test := range []struct { diff --git a/backend/filefabric/api/types.go b/backend/filefabric/api/types.go index 9f4fb5984..4199bc3fd 100644 --- a/backend/filefabric/api/types.go +++ b/backend/filefabric/api/types.go @@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{}) // fields returns the JSON fields in use by opt as a | separated // string. -func fields(opt interface{}) (pipeTags string, err error) { +func fields(opt any) (pipeTags string, err error) { var tags []string def := reflect.ValueOf(opt) defType := def.Type() - for i := 0; i < def.NumField(); i++ { + for i := range def.NumField() { field := defType.Field(i) tag, ok := field.Tag.Lookup("json") if !ok { @@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) { // mustFields returns the JSON fields in use by opt as a | separated // string. It panics on failure. -func mustFields(opt interface{}) string { +func mustFields(opt any) string { tags, err := fields(opt) if err != nil { panic(err) @@ -351,12 +351,12 @@ type SpaceInfo struct { // DeleteResponse is returned from doDeleteFile type DeleteResponse struct { Status - Deleted []string `json:"deleted"` - Errors []interface{} `json:"errors"` - ID string `json:"fi_id"` - BackgroundTask int `json:"backgroundtask"` - UsSize string `json:"us_size"` - PaSize string `json:"pa_size"` + Deleted []string `json:"deleted"` + Errors []any `json:"errors"` + ID string `json:"fi_id"` + BackgroundTask int `json:"backgroundtask"` + UsSize string `json:"us_size"` + PaSize string `json:"pa_size"` //SpaceInfo SpaceInfo `json:"spaceinfo"` } diff --git a/backend/filefabric/filefabric.go b/backend/filefabric/filefabric.go index 454dc3c95..9df530f15 100644 --- a/backend/filefabric/filefabric.go +++ b/backend/filefabric/filefabric.go @@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) { } // params for rpc -type params map[string]interface{} +type params map[string]any // rpc calls the rpc.php method of the SME file fabric // diff --git a/backend/filescom/filescom.go b/backend/filescom/filescom.go index 8eb32b320..38807d03a 100644 --- a/backend/filescom/filescom.go +++ b/backend/filescom/filescom.go @@ -10,6 +10,7 @@ import ( "net/http" "net/url" "path" + "slices" "strings" "time" @@ -169,11 +170,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) { } if apiErr, ok := err.(files_sdk.ResponseError); ok { - for _, e := range retryErrorCodes { - if apiErr.HttpCode == e { - fs.Debugf(nil, "Retrying API error %v", err) - return true, err - } + if slices.Contains(retryErrorCodes, apiErr.HttpCode) { + fs.Debugf(nil, "Retrying API error %v", err) + return true, err } } diff --git a/backend/ftp/ftp_internal_test.go b/backend/ftp/ftp_internal_test.go index 3e06bb9b4..6040551ed 100644 --- a/backend/ftp/ftp_internal_test.go +++ b/backend/ftp/ftp_internal_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" ) -type settings map[string]interface{} +type settings map[string]any func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs { fsName := strings.Split(f.Name(), "{")[0] // strip off hash diff --git a/backend/googlephotos/albums.go b/backend/googlephotos/albums.go index 00cdbb914..bfb6404db 100644 --- a/backend/googlephotos/albums.go +++ b/backend/googlephotos/albums.go @@ -4,6 +4,7 @@ package googlephotos import ( "path" + "slices" "strings" "sync" @@ -119,7 +120,7 @@ func (as *albums) _del(album *api.Album) { dirs := as.path[dir] for i, dir := range dirs { if dir == leaf { - dirs = append(dirs[:i], dirs[i+1:]...) + dirs = slices.Delete(dirs, i, i+1) break } } diff --git a/backend/googlephotos/googlephotos.go b/backend/googlephotos/googlephotos.go index 2a2c7eb49..60f3caab0 100644 --- a/backend/googlephotos/googlephotos.go +++ b/backend/googlephotos/googlephotos.go @@ -388,7 +388,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e Method: "GET", RootURL: "https://accounts.google.com/.well-known/openid-configuration", } - var openIDconfig map[string]interface{} + var openIDconfig map[string]any err = f.pacer.Call(func() (bool, error) { resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig) return shouldRetry(ctx, resp, err) @@ -448,7 +448,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) { "token_type_hint": []string{"access_token"}, }, } - var res interface{} + var res any err = f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, &opts, nil, &res) return shouldRetry(ctx, resp, err) diff --git a/backend/hasher/commands.go b/backend/hasher/commands.go index a6eff7efd..2797e5d4f 100644 --- a/backend/hasher/commands.go +++ b/backend/hasher/commands.go @@ -24,7 +24,7 @@ import ( // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "drop": return nil, f.db.Stop(true) diff --git a/backend/hasher/kv.go b/backend/hasher/kv.go index f67119eb6..c51c13b0e 100644 --- a/backend/hasher/kv.go +++ b/backend/hasher/kv.go @@ -6,6 +6,7 @@ import ( "encoding/gob" "errors" "fmt" + "maps" "strings" "time" @@ -195,9 +196,7 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) { r.Fp = op.fp } - for hashType, hashVal := range op.hashes { - r.Hashes[hashType] = hashVal - } + maps.Copy(r.Hashes, op.hashes) if data, err = r.encode(op.key); err != nil { return fmt.Errorf("marshal failed: %w", err) } diff --git a/backend/hidrive/hidrivehash/hidrivehash.go b/backend/hidrive/hidrivehash/hidrivehash.go index 092663d42..887cda5bf 100644 --- a/backend/hidrive/hidrivehash/hidrivehash.go +++ b/backend/hidrive/hidrivehash/hidrivehash.go @@ -52,10 +52,7 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui total := len(p) nullBytes := make([]byte, blockSize) for len(p) > 0 { - toWrite := int(blockSize - *bytesInBlock) - if toWrite > len(p) { - toWrite = len(p) - } + toWrite := min(int(blockSize-*bytesInBlock), len(p)) c, err := writer.Write(p[:toWrite]) *bytesInBlock += uint32(c) *onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite]) @@ -276,7 +273,7 @@ func (h *hidriveHash) Sum(b []byte) []byte { } checksum := zeroSum - for i := 0; i < len(h.levels); i++ { + for i := range h.levels { level := h.levels[i] if i < len(h.levels)-1 { // Aggregate non-empty non-final levels. diff --git a/backend/hidrive/hidrivehash/hidrivehash_test.go b/backend/hidrive/hidrivehash/hidrivehash_test.go index 07f2435b4..817f0a8dd 100644 --- a/backend/hidrive/hidrivehash/hidrivehash_test.go +++ b/backend/hidrive/hidrivehash/hidrivehash_test.go @@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) { func TestLevelIsFull(t *testing.T) { content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19} l := hidrivehash.NewLevel() - for i := 0; i < 256; i++ { + for range 256 { assert.False(t, l.(internal.LevelHash).IsFull()) written, err := l.Write(content[:]) assert.Equal(t, len(content), written) diff --git a/backend/http/http.go b/backend/http/http.go index 60c6cb99c..4b64ec304 100644 --- a/backend/http/http.go +++ b/backend/http/http.go @@ -505,7 +505,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e entries = append(entries, entry) entriesMu.Unlock() } - for i := 0; i < checkers; i++ { + for range checkers { wg.Add(1) go func() { defer wg.Done() @@ -740,7 +740,7 @@ It doesn't return anything. // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "set": newOpt := f.opt diff --git a/backend/iclouddrive/api/client.go b/backend/iclouddrive/api/client.go index 7cdf74baf..a0c8af228 100644 --- a/backend/iclouddrive/api/client.go +++ b/backend/iclouddrive/api/client.go @@ -76,7 +76,7 @@ func (c *Client) DriveService() (*DriveService, error) { // This function is the main entry point for making requests to the iCloud // API. If the initial request returns a 401 (Unauthorized), it will try to // reauthenticate and retry the request. -func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { +func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) { resp, err = c.Session.Request(ctx, opts, request, response) if err != nil && resp != nil { // try to reauth @@ -100,7 +100,7 @@ func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{ // This function is useful when you have a session that is already // authenticated, but you need to make a request without triggering // a re-authentication. -func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { +func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) { // Make the request without re-authenticating resp, err = c.Session.Request(ctx, opts, request, response) return resp, err @@ -161,6 +161,6 @@ func newRequestError(Status string, Text string) *RequestError { } // newErr orf makes a new error from sprintf parameters. -func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError { +func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError { return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...)) } diff --git a/backend/iclouddrive/api/drive.go b/backend/iclouddrive/api/drive.go index 2a8c7e375..653fad597 100644 --- a/backend/iclouddrive/api/drive.go +++ b/backend/iclouddrive/api/drive.go @@ -733,8 +733,8 @@ type DocumentUpdateResponse struct { StatusCode int `json:"status_code"` ErrorMessage string `json:"error_message"` } `json:"status"` - OperationID interface{} `json:"operation_id"` - Document *Document `json:"document"` + OperationID any `json:"operation_id"` + Document *Document `json:"document"` } `json:"results"` } @@ -765,9 +765,9 @@ type Document struct { IsWritable bool `json:"is_writable"` IsHidden bool `json:"is_hidden"` } `json:"file_flags"` - LastOpenedTime int64 `json:"lastOpenedTime"` - RestorePath interface{} `json:"restorePath"` - HasChainedParent bool `json:"hasChainedParent"` + LastOpenedTime int64 `json:"lastOpenedTime"` + RestorePath any `json:"restorePath"` + HasChainedParent bool `json:"hasChainedParent"` } // DriveID returns the drive ID of the Document. diff --git a/backend/iclouddrive/api/session.go b/backend/iclouddrive/api/session.go index 7ee350675..0fd9072de 100644 --- a/backend/iclouddrive/api/session.go +++ b/backend/iclouddrive/api/session.go @@ -3,13 +3,13 @@ package api import ( "context" "fmt" + "maps" "net/http" "net/url" "slices" "strings" "github.com/oracle/oci-go-sdk/v65/common" - "github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/lib/rest" ) @@ -35,7 +35,7 @@ type Session struct { // } // Request makes a request -func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) { +func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) { resp, err := s.srv.CallJSON(ctx, &opts, &request, &response) if err != nil { @@ -129,7 +129,7 @@ func (s *Session) AuthWithToken(ctx context.Context) error { // Validate2FACode validates the 2FA code func (s *Session) Validate2FACode(ctx context.Context, code string) error { - values := map[string]interface{}{"securityCode": map[string]string{"code": code}} + values := map[string]any{"securityCode": map[string]string{"code": code}} body, err := IntoReader(values) if err != nil { return err @@ -220,9 +220,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string "Referer": fmt.Sprintf("%s/", homeEndpoint), "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", } - for k, v := range overwrite { - headers[k] = v - } + maps.Copy(headers, overwrite) return headers } @@ -230,9 +228,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string func (s *Session) GetHeaders(overwrite map[string]string) map[string]string { headers := GetCommonHeaders(map[string]string{}) headers["Cookie"] = s.GetCookieString() - for k, v := range overwrite { - headers[k] = v - } + maps.Copy(headers, overwrite) return headers } @@ -254,9 +250,7 @@ func GetCommonHeaders(overwrite map[string]string) map[string]string { "Referer": fmt.Sprintf("%s/", baseEndpoint), "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0", } - for k, v := range overwrite { - headers[k] = v - } + maps.Copy(headers, overwrite) return headers } @@ -338,33 +332,33 @@ type AccountInfo struct { // ValidateDataDsInfo represents an validation info type ValidateDataDsInfo struct { - HsaVersion int `json:"hsaVersion"` - LastName string `json:"lastName"` - ICDPEnabled bool `json:"iCDPEnabled"` - TantorMigrated bool `json:"tantorMigrated"` - Dsid string `json:"dsid"` - HsaEnabled bool `json:"hsaEnabled"` - IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"` - IroncadeMigrated bool `json:"ironcadeMigrated"` - Locale string `json:"locale"` - BrZoneConsolidated bool `json:"brZoneConsolidated"` - ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"` - IsManagedAppleID bool `json:"isManagedAppleID"` - IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"` - IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"` - ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"` - Gilligvited bool `json:"gilligvited"` - AppleIDAliases []interface{} `json:"appleIdAliases"` - UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"` - IsPaidDeveloper bool `json:"isPaidDeveloper"` - CountryCode string `json:"countryCode"` - NotificationID string `json:"notificationId"` - PrimaryEmailVerified bool `json:"primaryEmailVerified"` - ADsID string `json:"aDsID"` - Locked bool `json:"locked"` - ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"` - HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"` - PrimaryEmail string `json:"primaryEmail"` + HsaVersion int `json:"hsaVersion"` + LastName string `json:"lastName"` + ICDPEnabled bool `json:"iCDPEnabled"` + TantorMigrated bool `json:"tantorMigrated"` + Dsid string `json:"dsid"` + HsaEnabled bool `json:"hsaEnabled"` + IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"` + IroncadeMigrated bool `json:"ironcadeMigrated"` + Locale string `json:"locale"` + BrZoneConsolidated bool `json:"brZoneConsolidated"` + ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"` + IsManagedAppleID bool `json:"isManagedAppleID"` + IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"` + IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"` + ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"` + Gilligvited bool `json:"gilligvited"` + AppleIDAliases []any `json:"appleIdAliases"` + UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"` + IsPaidDeveloper bool `json:"isPaidDeveloper"` + CountryCode string `json:"countryCode"` + NotificationID string `json:"notificationId"` + PrimaryEmailVerified bool `json:"primaryEmailVerified"` + ADsID string `json:"aDsID"` + Locked bool `json:"locked"` + ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"` + HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"` + PrimaryEmail string `json:"primaryEmail"` AppleIDEntries []struct { IsPrimary bool `json:"isPrimary"` Type string `json:"type"` diff --git a/backend/imagekit/util.go b/backend/imagekit/util.go index fea67f3ac..d16eee9a5 100644 --- a/backend/imagekit/util.go +++ b/backend/imagekit/util.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "strconv" "time" @@ -142,12 +143,7 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool { if resp == nil { return false } - for _, e := range retryErrorCodes { - if resp.StatusCode == e { - return true - } - } - return false + return slices.Contains(retryErrorCodes, resp.StatusCode) } func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) { diff --git a/backend/internetarchive/internetarchive.go b/backend/internetarchive/internetarchive.go index ec6db9684..dc8ca6b84 100644 --- a/backend/internetarchive/internetarchive.go +++ b/backend/internetarchive/internetarchive.go @@ -13,6 +13,7 @@ import ( "net/url" "path" "regexp" + "slices" "strconv" "strings" "time" @@ -200,7 +201,7 @@ Only enable if you need to be guaranteed to be reflected after write operations. const iaItemMaxSize int64 = 1099511627776 // metadata keys that are not writeable -var roMetadataKey = map[string]interface{}{ +var roMetadataKey = map[string]any{ // do not add mtime here, it's a documented exception "name": nil, "source": nil, "size": nil, "md5": nil, "crc32": nil, "sha1": nil, "format": nil, "old_version": nil, @@ -991,10 +992,8 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) { func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) { if resp != nil { - for _, e := range retryErrorCodes { - if resp.StatusCode == e { - return true, err - } + if slices.Contains(retryErrorCodes, resp.StatusCode) { + return true, err } } // Ok, not an awserr, check for generic failure conditions @@ -1147,13 +1146,7 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz } fileTrackers, _ := listOrString(iaFile.UpdateTrack) - trackerMatch := false - for _, v := range fileTrackers { - if v == tracker { - trackerMatch = true - break - } - } + trackerMatch := slices.Contains(fileTrackers, tracker) if !trackerMatch { continue } diff --git a/backend/jottacloud/api/types.go b/backend/jottacloud/api/types.go index 6d192bd4e..dea7ec8ba 100644 --- a/backend/jottacloud/api/types.go +++ b/backend/jottacloud/api/types.go @@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error { // MarshalJSON turns a Rfc3339Time into JSON func (t *Rfc3339Time) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("\"%s\"", t.String())), nil + return fmt.Appendf(nil, "\"%s\"", t.String()), nil } // LoginToken is struct representing the login token generated in the WebUI @@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct { // CustomerInfo provides general information about the account. Required for finding the correct internal username. type CustomerInfo struct { - Username string `json:"username"` - Email string `json:"email"` - Name string `json:"name"` - CountryCode string `json:"country_code"` - LanguageCode string `json:"language_code"` - CustomerGroupCode string `json:"customer_group_code"` - BrandCode string `json:"brand_code"` - AccountType string `json:"account_type"` - SubscriptionType string `json:"subscription_type"` - Usage int64 `json:"usage"` - Quota int64 `json:"quota"` - BusinessUsage int64 `json:"business_usage"` - BusinessQuota int64 `json:"business_quota"` - WriteLocked bool `json:"write_locked"` - ReadLocked bool `json:"read_locked"` - LockedCause interface{} `json:"locked_cause"` - WebHash string `json:"web_hash"` - AndroidHash string `json:"android_hash"` - IOSHash string `json:"ios_hash"` + Username string `json:"username"` + Email string `json:"email"` + Name string `json:"name"` + CountryCode string `json:"country_code"` + LanguageCode string `json:"language_code"` + CustomerGroupCode string `json:"customer_group_code"` + BrandCode string `json:"brand_code"` + AccountType string `json:"account_type"` + SubscriptionType string `json:"subscription_type"` + Usage int64 `json:"usage"` + Quota int64 `json:"quota"` + BusinessUsage int64 `json:"business_usage"` + BusinessQuota int64 `json:"business_quota"` + WriteLocked bool `json:"write_locked"` + ReadLocked bool `json:"read_locked"` + LockedCause any `json:"locked_cause"` + WebHash string `json:"web_hash"` + AndroidHash string `json:"android_hash"` + IOSHash string `json:"ios_hash"` } // TrashResponse is returned when emptying the Trash diff --git a/backend/linkbox/linkbox.go b/backend/linkbox/linkbox.go index cc2ac3d16..fd849ef59 100644 --- a/backend/linkbox/linkbox.go +++ b/backend/linkbox/linkbox.go @@ -193,7 +193,7 @@ func (o *Object) set(e *entity) { // Call linkbox with the query in opts and return result // // This will be checked for error and an error will be returned if Status != 1 -func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error { +func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error { err := f.pacer.Call(func() (bool, error) { resp, err := f.srv.CallJSON(ctx, opts, nil, &result) return f.shouldRetry(ctx, resp, err) diff --git a/backend/local/local.go b/backend/local/local.go index b955688d1..9df4d454b 100644 --- a/backend/local/local.go +++ b/backend/local/local.go @@ -1046,7 +1046,7 @@ you can try to change the output.`, // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) { switch name { case "noop": if txt, ok := opt["error"]; ok { @@ -1056,7 +1056,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str return nil, errors.New(txt) } if _, ok := opt["echo"]; ok { - out := map[string]interface{}{} + out := map[string]any{} out["name"] = name out["arg"] = arg out["opt"] = opt diff --git a/backend/local/local_internal_test.go b/backend/local/local_internal_test.go index 4c769535e..03a30c218 100644 --- a/backend/local/local_internal_test.go +++ b/backend/local/local_internal_test.go @@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) { require.NoError(t, err) src.(*Object).fs.opt.NoCheckUpdated = true - for i := 0; i < 100; i++ { + for i := range 100 { go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background())) } _, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src) diff --git a/backend/mailru/api/m1.go b/backend/mailru/api/m1.go index c9d3e9c9f..e4babfecc 100644 --- a/backend/mailru/api/m1.go +++ b/backend/mailru/api/m1.go @@ -63,8 +63,8 @@ type UserInfoResponse struct { Prolong bool `json:"prolong"` Promocodes struct { } `json:"promocodes"` - Subscription []interface{} `json:"subscription"` - Version string `json:"version"` + Subscription []any `json:"subscription"` + Version string `json:"version"` } `json:"billing"` Bonuses struct { CameraUpload bool `json:"camera_upload"` diff --git a/backend/mailru/mailru.go b/backend/mailru/mailru.go index c0a89939a..3f2cfc372 100644 --- a/backend/mailru/mailru.go +++ b/backend/mailru/mailru.go @@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) { return nil, nil case api.ListParseUnknown15: skip := int(r.ReadPu32()) - for i := 0; i < skip; i++ { + for range skip { r.ReadPu32() r.ReadPu32() } @@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt func (f *Fs) parseSpeedupPatterns(patternString string) (err error) { f.speedupGlobs = nil f.speedupAny = false - uniqueValidPatterns := make(map[string]interface{}) + uniqueValidPatterns := make(map[string]any) for _, pattern := range strings.Split(patternString, ",") { pattern = strings.ToLower(strings.TrimSpace(pattern)) @@ -2131,10 +2131,7 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in if limit < 0 { limit = size - offset } - end = offset + limit - if end > size { - end = size - } + end = min(offset+limit, size) partial = !(offset == 0 && end == size) return offset, end, partial } diff --git a/backend/mailru/mrhash/mrhash_test.go b/backend/mailru/mrhash/mrhash_test.go index 3c5815652..a143fcec0 100644 --- a/backend/mailru/mrhash/mrhash_test.go +++ b/backend/mailru/mrhash/mrhash_test.go @@ -11,7 +11,7 @@ import ( func testChunk(t *testing.T, chunk int) { data := make([]byte, chunk) - for i := 0; i < chunk; i++ { + for i := range chunk { data[i] = 'A' } for _, test := range []struct { diff --git a/backend/mega/mega.go b/backend/mega/mega.go index 6f974b779..99b66e7e9 100644 --- a/backend/mega/mega.go +++ b/backend/mega/mega.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "path" + "slices" "strings" "sync" "time" @@ -218,11 +219,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e srv = mega.New().SetClient(fshttp.NewClient(ctx)) srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries srv.SetHTTPS(opt.UseHTTPS) - srv.SetLogger(func(format string, v ...interface{}) { + srv.SetLogger(func(format string, v ...any) { fs.Infof("*go-mega*", format, v...) }) if opt.Debug { - srv.SetDebugger(func(format string, v ...interface{}) { + srv.SetDebugger(func(format string, v ...any) { fs.Debugf("*go-mega*", format, v...) }) } @@ -498,11 +499,8 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e if err != nil { return false, fmt.Errorf("list failed: %w", err) } - for _, item := range nodes { - if fn(item) { - found = true - break - } + if slices.ContainsFunc(nodes, fn) { + found = true } return } @@ -1156,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op // Upload the chunks // FIXME do this in parallel - for id := 0; id < u.Chunks(); id++ { + for id := range u.Chunks() { _, chunkSize, err := u.ChunkLocation(id) if err != nil { return fmt.Errorf("upload failed to read chunk location: %w", err) diff --git a/backend/memory/memory_internal_test.go b/backend/memory/memory_internal_test.go index fe8db1f57..f79c20f8c 100644 --- a/backend/memory/memory_internal_test.go +++ b/backend/memory/memory_internal_test.go @@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) { r.Fremote.Features().Disable("Purge") // force fallback-purge // make a lot of files to prevent it from finishing too quickly - for i := 0; i < 100; i++ { + for i := range 100 { dst := "file" + fmt.Sprint(i) + ".txt" r.WriteObject(ctx, dst, "hello", t1) } diff --git a/backend/netstorage/netstorage.go b/backend/netstorage/netstorage.go index 8e0426171..4bada7f66 100755 --- a/backend/netstorage/netstorage.go +++ b/backend/netstorage/netstorage.go @@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e } // Command the backend to run a named commands: du and symlink -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "du": // No arg parsing needed, the path is passed in the fs @@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err // callBackend calls NetStorage API using either rest.Call or rest.CallXML function, // depending on whether the response is required -func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) { +func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) { opts := rest.Opts{ Method: method, RootURL: URL, @@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope } // netStorageDuRequest performs a NetStorage du request -func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) { +func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) { URL := f.url("") const actionHeader = "version=1&action=du&format=xml&encoding=utf-8" duResp := &Du{} @@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) { } // netStorageDuRequest performs a NetStorage symlink request -func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) { +func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) { target := url.QueryEscape(strings.TrimSuffix(dst, "/")) actionHeader := "version=1&action=symlink&target=" + target if modTime != nil { diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 046f0605e..0a9aad7f4 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -2532,10 +2532,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec remaining := size position := int64(0) for remaining > 0 { - n := int64(o.fs.opt.ChunkSize) - if remaining < n { - n = remaining - } + n := min(remaining, int64(o.fs.opt.ChunkSize)) seg := readers.NewRepeatableReader(io.LimitReader(in, n)) fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n) info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...) diff --git a/backend/onedrive/quickxorhash/quickxorhash.go b/backend/onedrive/quickxorhash/quickxorhash.go index a598a5a1f..890469716 100644 --- a/backend/onedrive/quickxorhash/quickxorhash.go +++ b/backend/onedrive/quickxorhash/quickxorhash.go @@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) { // Calculate the current checksum func (q *quickXorHash) checkSum() (h [Size + 1]byte) { - for i := 0; i < dataSize; i++ { + for i := range dataSize { shift := (i * 11) % 160 shiftBytes := shift / 8 shiftBits := shift % 8 diff --git a/backend/onedrive/quickxorhash/quickxorhash_test.go b/backend/onedrive/quickxorhash/quickxorhash_test.go index c38c4d88b..a239ed02f 100644 --- a/backend/onedrive/quickxorhash/quickxorhash_test.go +++ b/backend/onedrive/quickxorhash/quickxorhash_test.go @@ -130,10 +130,7 @@ func TestQuickXorHashByBlock(t *testing.T) { require.NoError(t, err, what) h := New() for i := 0; i < len(in); i += blockSize { - end := i + blockSize - if end > len(in) { - end = len(in) - } + end := min(i+blockSize, len(in)) n, err := h.Write(in[i:end]) require.Equal(t, end-i, n, what) require.NoError(t, err, what) diff --git a/backend/opendrive/opendrive.go b/backend/opendrive/opendrive.go index 6fd0f464a..4fa80f351 100644 --- a/backend/opendrive/opendrive.go +++ b/backend/opendrive/opendrive.go @@ -491,7 +491,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, Method: "POST", Path: "/file/move_copy.json", } - var request interface{} = moveCopyFileData + var request any = moveCopyFileData // use /file/rename.json if moving within the same directory _, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false) @@ -564,7 +564,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string Method: "POST", Path: "/folder/move_copy.json", } - var request interface{} = moveFolderData + var request any = moveFolderData // use /folder/rename.json if moving within the same parent directory if srcDirectoryID == dstDirectoryID { @@ -1042,10 +1042,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op chunkCounter := 0 for remainingBytes > 0 { - currentChunkSize := int64(o.fs.opt.ChunkSize) - if currentChunkSize > remainingBytes { - currentChunkSize = remainingBytes - } + currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes) remainingBytes -= currentChunkSize fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes) diff --git a/backend/oracleobjectstorage/command.go b/backend/oracleobjectstorage/command.go index 13c288caa..fa687fef8 100644 --- a/backend/oracleobjectstorage/command.go +++ b/backend/oracleobjectstorage/command.go @@ -131,7 +131,7 @@ If it is a string or a []string it will be shown to the user otherwise it will be JSON encoded and shown to the user like that */ func (f *Fs) Command(ctx context.Context, commandName string, args []string, - opt map[string]string) (result interface{}, err error) { + opt map[string]string) (result any, err error) { // fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt) switch commandName { case operationRename: @@ -159,7 +159,7 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string, } } -func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) { +func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) { if remote == "" { return nil, fmt.Errorf("path to object file cannot be empty") } @@ -332,7 +332,7 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat return uploadedParts, nil } -func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) { +func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) { req := objectstorage.RestoreObjectsRequest{ NamespaceName: common.String(f.opt.Namespace), RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{}, diff --git a/backend/oracleobjectstorage/copy.go b/backend/oracleobjectstorage/copy.go index 06fed4c7a..76b5d939c 100644 --- a/backend/oracleobjectstorage/copy.go +++ b/backend/oracleobjectstorage/copy.go @@ -112,7 +112,7 @@ func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType s string(objectstorage.WorkRequestSummaryStatusCanceled), string(objectstorage.WorkRequestStatusFailed), }, - Refresh: func() (interface{}, string, error) { + Refresh: func() (any, string, error) { getWorkRequestRequest := objectstorage.GetWorkRequestRequest{} getWorkRequestRequest.WorkRequestId = wID workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest) diff --git a/backend/oracleobjectstorage/object.go b/backend/oracleobjectstorage/object.go index 834ec66c5..d9cc20f84 100644 --- a/backend/oracleobjectstorage/object.go +++ b/backend/oracleobjectstorage/object.go @@ -131,7 +131,7 @@ func (o *Object) setMetaData( contentMd5 *string, contentType *string, lastModified *common.SDKTime, - storageTier interface{}, + storageTier any, meta map[string]string) error { if contentLength != nil { diff --git a/backend/oracleobjectstorage/waiter.go b/backend/oracleobjectstorage/waiter.go index 9981a1ebd..d09f00033 100644 --- a/backend/oracleobjectstorage/waiter.go +++ b/backend/oracleobjectstorage/waiter.go @@ -5,6 +5,7 @@ package oracleobjectstorage import ( "context" "fmt" + "slices" "strings" "time" @@ -23,7 +24,7 @@ var refreshGracePeriod = 30 * time.Second // // `state` is the latest state of that object. And `err` is any error that // may have happened while refreshing the state. -type StateRefreshFunc func() (result interface{}, state string, err error) +type StateRefreshFunc func() (result any, state string, err error) // StateChangeConf is the configuration struct used for `WaitForState`. type StateChangeConf struct { @@ -56,7 +57,7 @@ type StateChangeConf struct { // reach the target state. // // Cancellation from the passed in context will cancel the refresh loop -func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) { +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) { // fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target) notfoundTick := 0 @@ -72,7 +73,7 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType } type Result struct { - Result interface{} + Result any State string Error error Done bool @@ -165,12 +166,9 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType } } - for _, allowed := range conf.Pending { - if currentState == allowed { - found = true - targetOccurrence = 0 - break - } + if slices.Contains(conf.Pending, currentState) { + found = true + targetOccurrence = 0 } if !found && len(conf.Pending) > 0 { @@ -278,8 +276,8 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType // NotFoundError resource not found error type NotFoundError struct { LastError error - LastRequest interface{} - LastResponse interface{} + LastRequest any + LastResponse any Message string Retries int } diff --git a/backend/pcloud/pcloud.go b/backend/pcloud/pcloud.go index 763784645..0fe34d56e 100644 --- a/backend/pcloud/pcloud.go +++ b/backend/pcloud/pcloud.go @@ -990,10 +990,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) { if err != nil { return nil, err } - free := q.Quota - q.UsedQuota - if free < 0 { - free = 0 - } + free := max(q.Quota-q.UsedQuota, 0) usage = &fs.Usage{ Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used Used: fs.NewUsageValue(q.UsedQuota), // bytes in use @@ -1324,7 +1321,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op if err != nil { // sometimes pcloud leaves a half complete file on // error, so delete it if it exists, trying a few times - for i := 0; i < 5; i++ { + for range 5 { delObj, delErr := o.fs.NewObject(ctx, o.remote) if delErr == nil && delObj != nil { _ = delObj.Remove(ctx) diff --git a/backend/pcloud/writer_at.go b/backend/pcloud/writer_at.go index adf6a6b44..db91349db 100644 --- a/backend/pcloud/writer_at.go +++ b/backend/pcloud/writer_at.go @@ -37,7 +37,7 @@ func (c *writerAt) Close() error { } sizeOk := false sizeLastSeen := int64(0) - for retry := 0; retry < 5; retry++ { + for retry := range 5 { fs.Debugf(c.remote, "checking file size: try %d/5", retry) obj, err := c.fs.NewObject(c.ctx, c.remote) if err != nil { diff --git a/backend/pikpak/api/types.go b/backend/pikpak/api/types.go index a81bda3c1..0e2d8049b 100644 --- a/backend/pikpak/api/types.go +++ b/backend/pikpak/api/types.go @@ -71,14 +71,14 @@ type Error struct { // ErrorDetails contains further details of api error type ErrorDetails struct { - Type string `json:"@type,omitempty"` - Reason string `json:"reason,omitempty"` - Domain string `json:"domain,omitempty"` - Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet - Locale string `json:"locale,omitempty"` // e.g. "en" - Message string `json:"message,omitempty"` - StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet - Detail string `json:"detail,omitempty"` + Type string `json:"@type,omitempty"` + Reason string `json:"reason,omitempty"` + Domain string `json:"domain,omitempty"` + Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet + Locale string `json:"locale,omitempty"` // e.g. "en" + Message string `json:"message,omitempty"` + StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet + Detail string `json:"detail,omitempty"` } // Error returns a string for the error and satisfies the error interface @@ -168,44 +168,44 @@ type FileList struct { // for a single file, i.e. supports for higher `--multi-thread-streams=N`. // However, it is not generally applicable as it is only for media. type File struct { - Apps []*FileApp `json:"apps,omitempty"` - Audit *FileAudit `json:"audit,omitempty"` - Collection string `json:"collection,omitempty"` // TODO - CreatedTime Time `json:"created_time,omitempty"` - DeleteTime Time `json:"delete_time,omitempty"` - FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO" - FileExtension string `json:"file_extension,omitempty"` - FolderType string `json:"folder_type,omitempty"` - Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum - IconLink string `json:"icon_link,omitempty"` - ID string `json:"id,omitempty"` - Kind string `json:"kind,omitempty"` // "drive#file" - Links *FileLinks `json:"links,omitempty"` - Md5Checksum string `json:"md5_checksum,omitempty"` - Medias []*Media `json:"medias,omitempty"` - MimeType string `json:"mime_type,omitempty"` - ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved - Name string `json:"name,omitempty"` - OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO - OriginalURL string `json:"original_url,omitempty"` - Params *FileParams `json:"params,omitempty"` - ParentID string `json:"parent_id,omitempty"` - Phase string `json:"phase,omitempty"` - Revision int `json:"revision,omitempty,string"` - ReferenceEvents []interface{} `json:"reference_events"` - ReferenceResource interface{} `json:"reference_resource"` - Size int64 `json:"size,omitempty,string"` - SortName string `json:"sort_name,omitempty"` - Space string `json:"space,omitempty"` - SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something? - Starred bool `json:"starred,omitempty"` - Tags []interface{} `json:"tags"` - ThumbnailLink string `json:"thumbnail_link,omitempty"` - Trashed bool `json:"trashed,omitempty"` - UserID string `json:"user_id,omitempty"` - UserModifiedTime Time `json:"user_modified_time,omitempty"` - WebContentLink string `json:"web_content_link,omitempty"` - Writable bool `json:"writable,omitempty"` + Apps []*FileApp `json:"apps,omitempty"` + Audit *FileAudit `json:"audit,omitempty"` + Collection string `json:"collection,omitempty"` // TODO + CreatedTime Time `json:"created_time,omitempty"` + DeleteTime Time `json:"delete_time,omitempty"` + FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO" + FileExtension string `json:"file_extension,omitempty"` + FolderType string `json:"folder_type,omitempty"` + Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum + IconLink string `json:"icon_link,omitempty"` + ID string `json:"id,omitempty"` + Kind string `json:"kind,omitempty"` // "drive#file" + Links *FileLinks `json:"links,omitempty"` + Md5Checksum string `json:"md5_checksum,omitempty"` + Medias []*Media `json:"medias,omitempty"` + MimeType string `json:"mime_type,omitempty"` + ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved + Name string `json:"name,omitempty"` + OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO + OriginalURL string `json:"original_url,omitempty"` + Params *FileParams `json:"params,omitempty"` + ParentID string `json:"parent_id,omitempty"` + Phase string `json:"phase,omitempty"` + Revision int `json:"revision,omitempty,string"` + ReferenceEvents []any `json:"reference_events"` + ReferenceResource any `json:"reference_resource"` + Size int64 `json:"size,omitempty,string"` + SortName string `json:"sort_name,omitempty"` + Space string `json:"space,omitempty"` + SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something? + Starred bool `json:"starred,omitempty"` + Tags []any `json:"tags"` + ThumbnailLink string `json:"thumbnail_link,omitempty"` + Trashed bool `json:"trashed,omitempty"` + UserID string `json:"user_id,omitempty"` + UserModifiedTime Time `json:"user_modified_time,omitempty"` + WebContentLink string `json:"web_content_link,omitempty"` + Writable bool `json:"writable,omitempty"` } // FileLinks includes links to file at backend @@ -235,18 +235,18 @@ type Media struct { VideoType string `json:"video_type,omitempty"` // "mpegts" HdrType string `json:"hdr_type,omitempty"` } `json:"video,omitempty"` - Link *Link `json:"link,omitempty"` - NeedMoreQuota bool `json:"need_more_quota,omitempty"` - VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something? - RedirectLink string `json:"redirect_link,omitempty"` - IconLink string `json:"icon_link,omitempty"` - IsDefault bool `json:"is_default,omitempty"` - Priority int `json:"priority,omitempty"` - IsOrigin bool `json:"is_origin,omitempty"` - ResolutionName string `json:"resolution_name,omitempty"` - IsVisible bool `json:"is_visible,omitempty"` - Category string `json:"category,omitempty"` // "category_origin" - Audio interface{} `json:"audio"` // TODO: undiscovered yet + Link *Link `json:"link,omitempty"` + NeedMoreQuota bool `json:"need_more_quota,omitempty"` + VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something? + RedirectLink string `json:"redirect_link,omitempty"` + IconLink string `json:"icon_link,omitempty"` + IsDefault bool `json:"is_default,omitempty"` + Priority int `json:"priority,omitempty"` + IsOrigin bool `json:"is_origin,omitempty"` + ResolutionName string `json:"resolution_name,omitempty"` + IsVisible bool `json:"is_visible,omitempty"` + Category string `json:"category,omitempty"` // "category_origin" + Audio any `json:"audio"` // TODO: undiscovered yet } // FileParams includes parameters for instant open @@ -263,20 +263,20 @@ type FileParams struct { // FileApp includes parameters for instant open type FileApp struct { - ID string `json:"id,omitempty"` // "decompress" for rar files - Name string `json:"name,omitempty"` // decompress" for rar files - Access []interface{} `json:"access,omitempty"` - Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide" - RedirectLink string `json:"redirect_link,omitempty"` - VipTypes []interface{} `json:"vip_types,omitempty"` - NeedMoreQuota bool `json:"need_more_quota,omitempty"` - IconLink string `json:"icon_link,omitempty"` - IsDefault bool `json:"is_default,omitempty"` - Params struct{} `json:"params,omitempty"` // TODO - CategoryIDs []interface{} `json:"category_ids,omitempty"` - AdSceneType int `json:"ad_scene_type,omitempty"` - Space string `json:"space,omitempty"` - Links struct{} `json:"links,omitempty"` // TODO + ID string `json:"id,omitempty"` // "decompress" for rar files + Name string `json:"name,omitempty"` // decompress" for rar files + Access []any `json:"access,omitempty"` + Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide" + RedirectLink string `json:"redirect_link,omitempty"` + VipTypes []any `json:"vip_types,omitempty"` + NeedMoreQuota bool `json:"need_more_quota,omitempty"` + IconLink string `json:"icon_link,omitempty"` + IsDefault bool `json:"is_default,omitempty"` + Params struct{} `json:"params,omitempty"` // TODO + CategoryIDs []any `json:"category_ids,omitempty"` + AdSceneType int `json:"ad_scene_type,omitempty"` + Space string `json:"space,omitempty"` + Links struct{} `json:"links,omitempty"` // TODO } // ------------------------------------------------------------ @@ -290,27 +290,27 @@ type TaskList struct { // Task is a basic element representing a single task such as offline download and upload type Task struct { - Kind string `json:"kind,omitempty"` // "drive#task" - ID string `json:"id,omitempty"` // task id? - Name string `json:"name,omitempty"` // torrent name? - Type string `json:"type,omitempty"` // "offline" - UserID string `json:"user_id,omitempty"` - Statuses []interface{} `json:"statuses,omitempty"` // TODO - StatusSize int `json:"status_size,omitempty"` // TODO - Params *TaskParams `json:"params,omitempty"` // TODO - FileID string `json:"file_id,omitempty"` - FileName string `json:"file_name,omitempty"` - FileSize string `json:"file_size,omitempty"` - Message string `json:"message,omitempty"` // e.g. "Saving" - CreatedTime Time `json:"created_time,omitempty"` - UpdatedTime Time `json:"updated_time,omitempty"` - ThirdTaskID string `json:"third_task_id,omitempty"` // TODO - Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING" - Progress int `json:"progress,omitempty"` - IconLink string `json:"icon_link,omitempty"` - Callback string `json:"callback,omitempty"` - ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO - Space string `json:"space,omitempty"` + Kind string `json:"kind,omitempty"` // "drive#task" + ID string `json:"id,omitempty"` // task id? + Name string `json:"name,omitempty"` // torrent name? + Type string `json:"type,omitempty"` // "offline" + UserID string `json:"user_id,omitempty"` + Statuses []any `json:"statuses,omitempty"` // TODO + StatusSize int `json:"status_size,omitempty"` // TODO + Params *TaskParams `json:"params,omitempty"` // TODO + FileID string `json:"file_id,omitempty"` + FileName string `json:"file_name,omitempty"` + FileSize string `json:"file_size,omitempty"` + Message string `json:"message,omitempty"` // e.g. "Saving" + CreatedTime Time `json:"created_time,omitempty"` + UpdatedTime Time `json:"updated_time,omitempty"` + ThirdTaskID string `json:"third_task_id,omitempty"` // TODO + Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING" + Progress int `json:"progress,omitempty"` + IconLink string `json:"icon_link,omitempty"` + Callback string `json:"callback,omitempty"` + ReferenceResource any `json:"reference_resource,omitempty"` // TODO + Space string `json:"space,omitempty"` } // TaskParams includes parameters informing status of Task diff --git a/backend/pikpak/helper.go b/backend/pikpak/helper.go index e6f779da9..a5ff0ca00 100644 --- a/backend/pikpak/helper.go +++ b/backend/pikpak/helper.go @@ -638,7 +638,7 @@ func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper return c } -func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) { +func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) { if c.captcha != nil { token, err := c.captcha.Token(opts) if err != nil || token == "" { diff --git a/backend/pikpak/pikpak.go b/backend/pikpak/pikpak.go index befcf2580..bda10993a 100644 --- a/backend/pikpak/pikpak.go +++ b/backend/pikpak/pikpak.go @@ -1232,7 +1232,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i params := url.Values{} iVal := reflect.ValueOf(&form.MultiParts).Elem() iTyp := iVal.Type() - for i := 0; i < iVal.NumField(); i++ { + for i := range iVal.NumField() { params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String()) } formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name) @@ -1520,7 +1520,7 @@ Result: // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "addurl": if len(arg) != 1 { diff --git a/backend/putio/error.go b/backend/putio/error.go index ff3fed1d5..194315f53 100644 --- a/backend/putio/error.go +++ b/backend/putio/error.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/http" + "slices" "strconv" "time" @@ -13,10 +14,8 @@ import ( ) func checkStatusCode(resp *http.Response, expected ...int) error { - for _, code := range expected { - if resp.StatusCode == code { - return nil - } + if slices.Contains(expected, resp.StatusCode) { + return nil } return &statusCodeError{response: resp} } diff --git a/backend/putio/fs.go b/backend/putio/fs.go index cdd89ceba..a0570c971 100644 --- a/backend/putio/fs.go +++ b/backend/putio/fs.go @@ -332,10 +332,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io. var offsetMismatch bool buf := make([]byte, defaultChunkSize) for clientOffset < size { - chunkSize := size - clientOffset - if chunkSize >= int64(defaultChunkSize) { - chunkSize = int64(defaultChunkSize) - } + chunkSize := min(size-clientOffset, int64(defaultChunkSize)) chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize) chunkStart := clientOffset reqSize := chunkSize diff --git a/backend/qingstor/upload.go b/backend/qingstor/upload.go index f0029f674..14ea24aa9 100644 --- a/backend/qingstor/upload.go +++ b/backend/qingstor/upload.go @@ -358,7 +358,7 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) { })() ch := make(chan chunk, mu.cfg.concurrency) - for i := 0; i < mu.cfg.concurrency; i++ { + for range mu.cfg.concurrency { mu.wg.Add(1) go mu.readChunk(ch) } diff --git a/backend/quatrix/quatrix.go b/backend/quatrix/quatrix.go index 34c5db8c0..19d6f0119 100644 --- a/backend/quatrix/quatrix.go +++ b/backend/quatrix/quatrix.go @@ -15,6 +15,7 @@ import ( "net/http" "net/url" "path" + "slices" "strconv" "strings" "time" @@ -643,10 +644,8 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error { return err } - for _, removedID := range result.IDs { - if removedID == id { - return nil - } + if slices.Contains(result.IDs, id) { + return nil } return fmt.Errorf("file %s was not deleted successfully", id) diff --git a/backend/quatrix/upload_memory.go b/backend/quatrix/upload_memory.go index 615b78540..290c525da 100644 --- a/backend/quatrix/upload_memory.go +++ b/backend/quatrix/upload_memory.go @@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f defer func() { u.fileUsage[fileID] = borrowed }() - effectiveChunkSize := int64(speed * u.effectiveTime.Seconds()) - - if effectiveChunkSize < u.reserved { - effectiveChunkSize = u.reserved - } + effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved) if neededMemory < effectiveChunkSize { effectiveChunkSize = neededMemory diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 914b2280d..89faffcc2 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -19,6 +19,7 @@ import ( "net/url" "path" "regexp" + "slices" "sort" "strconv" "strings" @@ -3097,10 +3098,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) { return true, err } } - for _, e := range retryErrorCodes { - if httpStatusCode == e { - return true, err - } + if slices.Contains(retryErrorCodes, httpStatusCode) { + return true, err } } // Ok, not an awserr, check for generic failure conditions @@ -3230,7 +3229,7 @@ func fixupRequest(o *s3.Options, opt *Options) { type s3logger struct{} // Logf is expected to support the standard fmt package "verbs". -func (s3logger) Logf(classification logging.Classification, format string, v ...interface{}) { +func (s3logger) Logf(classification logging.Classification, format string, v ...any) { switch classification { default: case logging.Debug: @@ -5253,7 +5252,7 @@ It doesn't return anything. // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that -func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) { switch name { case "restore": req := s3.RestoreObjectInput{ diff --git a/backend/seafile/renew.go b/backend/seafile/renew.go index a67159d52..166f0f5db 100644 --- a/backend/seafile/renew.go +++ b/backend/seafile/renew.go @@ -9,9 +9,9 @@ import ( // Renew allows tokens to be renewed on expiry. type Renew struct { - ts *time.Ticker // timer indicating when it's time to renew the token - run func() error // the callback to do the renewal - done chan interface{} // channel to end the go routine + ts *time.Ticker // timer indicating when it's time to renew the token + run func() error // the callback to do the renewal + done chan any // channel to end the go routine shutdown *sync.Once } @@ -22,7 +22,7 @@ func NewRenew(every time.Duration, run func() error) *Renew { r := &Renew{ ts: time.NewTicker(every), run: run, - done: make(chan interface{}), + done: make(chan any), shutdown: &sync.Once{}, } go r.renewOnExpiry() diff --git a/backend/seafile/seafile.go b/backend/seafile/seafile.go index b38dfa2b5..918b57b95 100644 --- a/backend/seafile/seafile.go +++ b/backend/seafile/seafile.go @@ -1313,7 +1313,7 @@ func (f *Fs) getCachedLibraries(ctx context.Context) ([]api.Library, error) { f.librariesMutex.Lock() defer f.librariesMutex.Unlock() - libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value interface{}, ok bool, error error) { + libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value any, ok bool, error error) { // Load the libraries if not present in the cache libraries, err := f.getLibraries(ctx) if err != nil { diff --git a/backend/sftp/ssh_external.go b/backend/sftp/ssh_external.go index 6c52acd8f..a42cdd7cc 100644 --- a/backend/sftp/ssh_external.go +++ b/backend/sftp/ssh_external.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os/exec" + "slices" "strings" "time" @@ -89,7 +90,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal { // Connect to a remote host and request the sftp subsystem via // the 'ssh' command. This assumes that passwordless login is // correctly configured. - ssh := append([]string(nil), s.f.opt.SSH...) + ssh := slices.Clone(s.f.opt.SSH) s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...) // Allow the command a short time only to shut down diff --git a/backend/sftp/stringlock_test.go b/backend/sftp/stringlock_test.go index bc89a0147..ded8f3cfd 100644 --- a/backend/sftp/stringlock_test.go +++ b/backend/sftp/stringlock_test.go @@ -20,13 +20,13 @@ func TestStringLock(t *testing.T) { inner = 100 total = outer * inner ) - for k := 0; k < outer; k++ { + for range outer { for j := range counter { wg.Add(1) go func(j int) { defer wg.Done() ID := fmt.Sprintf("%d", j) - for i := 0; i < inner; i++ { + for range inner { lock.Lock(ID) n := counter[j] time.Sleep(1 * time.Millisecond) diff --git a/backend/sharefile/sharefile.go b/backend/sharefile/sharefile.go index d35468e0c..e470a795a 100644 --- a/backend/sharefile/sharefile.go +++ b/backend/sharefile/sharefile.go @@ -537,7 +537,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e // Fill up (or reset) the buffer tokens func (f *Fs) fillBufferTokens() { f.bufferTokens = make(chan []byte, f.ci.Transfers) - for i := 0; i < f.ci.Transfers; i++ { + for range f.ci.Transfers { f.bufferTokens <- nil } } diff --git a/backend/sharefile/upload.go b/backend/sharefile/upload.go index eb8358b65..0e9691972 100644 --- a/backend/sharefile/upload.go +++ b/backend/sharefile/upload.go @@ -57,10 +57,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method) } - threads := f.ci.Transfers - if threads > info.MaxNumberOfThreads { - threads = info.MaxNumberOfThreads - } + threads := min(f.ci.Transfers, info.MaxNumberOfThreads) // unwrap the accounting from the input, we use wrap to put it // back on after the buffering diff --git a/backend/sia/sia.go b/backend/sia/sia.go index 5d6c92f2d..9f4e02bf0 100644 --- a/backend/sia/sia.go +++ b/backend/sia/sia.go @@ -337,7 +337,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options . } // Cleanup stray files left after failed upload - for i := 0; i < 5; i++ { + for range 5 { cleanObj, cleanErr := f.NewObject(ctx, src.Remote()) if cleanErr == nil { cleanErr = cleanObj.Remove(ctx) diff --git a/backend/sugarsync/sugarsync.go b/backend/sugarsync/sugarsync.go index 04dc31fd8..72a13710f 100644 --- a/backend/sugarsync/sugarsync.go +++ b/backend/sugarsync/sugarsync.go @@ -574,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, RootURL: pathID, NoResponse: true, } - var mkdir interface{} + var mkdir any if pathID == f.opt.RootID { // folders at the root are syncFolders mkdir = &api.CreateSyncFolder{ diff --git a/backend/swift/swift.go b/backend/swift/swift.go index 452d5f11a..b7b227bc7 100644 --- a/backend/swift/swift.go +++ b/backend/swift/swift.go @@ -8,8 +8,10 @@ import ( "errors" "fmt" "io" + "maps" "path" "regexp" + "slices" "strconv" "strings" "sync" @@ -417,10 +419,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) { } // If this is a swift.Error object extract the HTTP error code if swiftError, ok := err.(*swift.Error); ok { - for _, e := range retryErrorCodes { - if swiftError.StatusCode == e { - return true, err - } + if slices.Contains(retryErrorCodes, swiftError.StatusCode) { + return true, err } } // Check for generic failure conditions @@ -701,7 +701,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix if !recurse { opts.Delimiter = '/' } - return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) { + return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (any, error) { var objects []swift.Object var err error err = f.pacer.Call(func() (bool, error) { @@ -1378,9 +1378,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { meta := o.headers.ObjectMetadata() meta.SetModTime(modTime) newHeaders := meta.ObjectHeaders() - for k, v := range newHeaders { - o.headers[k] = v - } + maps.Copy(o.headers, newHeaders) // Include any other metadata from request for k, v := range o.headers { if strings.HasPrefix(k, "X-Object-") { @@ -1450,7 +1448,7 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string // encoded but we need '&' encoded. func urlEncode(str string) string { var buf bytes.Buffer - for i := 0; i < len(str); i++ { + for i := range len(str) { c := str[i] if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' { _ = buf.WriteByte(c) diff --git a/backend/ulozto/api/types.go b/backend/ulozto/api/types.go index e45abcf82..f649f1146 100644 --- a/backend/ulozto/api/types.go +++ b/backend/ulozto/api/types.go @@ -82,8 +82,8 @@ type File struct { ContentType string `json:"content_type"` Format struct { } `json:"format"` - DownloadTypes []interface{} `json:"download_types"` - ThumbnailInfo []interface{} `json:"thumbnail_info"` + DownloadTypes []any `json:"download_types"` + ThumbnailInfo []any `json:"thumbnail_info"` PreviewInfo struct { } `json:"preview_info"` Privacy string `json:"privacy"` diff --git a/backend/ulozto/ulozto.go b/backend/ulozto/ulozto.go index d39435aef..ec2f2f6b7 100644 --- a/backend/ulozto/ulozto.go +++ b/backend/ulozto/ulozto.go @@ -729,7 +729,7 @@ func (o *Object) Storable() bool { return true } -func (o *Object) updateFileProperties(ctx context.Context, req interface{}) (err error) { +func (o *Object) updateFileProperties(ctx context.Context, req any) (err error) { var resp *api.File opts := rest.Opts{ @@ -887,7 +887,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op // Remove implements the mandatory method fs.Object.Remove func (o *Object) Remove(ctx context.Context) error { - for i := 0; i < 2; i++ { + for range 2 { // First call moves the item to recycle bin, second deletes it for good var err error opts := rest.Opts{ diff --git a/backend/union/union.go b/backend/union/union.go index f1922eda7..9c976bac0 100644 --- a/backend/union/union.go +++ b/backend/union/union.go @@ -902,7 +902,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e } // Backward compatible to old config if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 { - for i := 0; i < len(opt.Remotes)-1; i++ { + for i := range len(opt.Remotes) - 1 { opt.Remotes[i] += ":ro" } opt.Upstreams = opt.Remotes @@ -1045,7 +1045,7 @@ func parentDir(absPath string) string { func multithread(num int, fn func(int)) { var wg sync.WaitGroup - for i := 0; i < num; i++ { + for i := range num { wg.Add(1) i := i go func() { diff --git a/backend/uptobox/uptobox.go b/backend/uptobox/uptobox.go index 1db823ef4..ee13d0c67 100644 --- a/backend/uptobox/uptobox.go +++ b/backend/uptobox/uptobox.go @@ -246,7 +246,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe return f, nil } -func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) { +func (f *Fs) decodeError(resp *http.Response, response any) (err error) { defer fs.CheckClose(resp.Body, &err) body, err := io.ReadAll(resp.Body) diff --git a/backend/webdav/chunking.go b/backend/webdav/chunking.go index 379079cf9..ce29507c9 100644 --- a/backend/webdav/chunking.go +++ b/backend/webdav/chunking.go @@ -112,12 +112,8 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa return err } - contentLength := chunkSize - // Last chunk may be smaller - if size-offset < contentLength { - contentLength = size - offset - } + contentLength := min(size-offset, chunkSize) endOffset := offset + contentLength - 1 diff --git a/backend/webdav/odrvcookie/fetch.go b/backend/webdav/odrvcookie/fetch.go index c3ade04e5..a1ba6fb73 100644 --- a/backend/webdav/odrvcookie/fetch.go +++ b/backend/webdav/odrvcookie/fetch.go @@ -185,7 +185,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe if err != nil { return nil, err } - reqData := map[string]interface{}{ + reqData := map[string]any{ "Username": ca.user, "Password": ca.pass, "Address": ca.endpoint, diff --git a/backend/yandex/api/types.go b/backend/yandex/api/types.go index 614ced7d3..b1da038da 100644 --- a/backend/yandex/api/types.go +++ b/backend/yandex/api/types.go @@ -23,20 +23,20 @@ type ResourceInfoRequestOptions struct { // ResourceInfoResponse struct is returned by the API for metadata requests. type ResourceInfoResponse struct { - PublicKey string `json:"public_key"` - Name string `json:"name"` - Created string `json:"created"` - CustomProperties map[string]interface{} `json:"custom_properties"` - Preview string `json:"preview"` - PublicURL string `json:"public_url"` - OriginPath string `json:"origin_path"` - Modified string `json:"modified"` - Path string `json:"path"` - Md5 string `json:"md5"` - ResourceType string `json:"type"` - MimeType string `json:"mime_type"` - Size int64 `json:"size"` - Embedded *ResourceListResponse `json:"_embedded"` + PublicKey string `json:"public_key"` + Name string `json:"name"` + Created string `json:"created"` + CustomProperties map[string]any `json:"custom_properties"` + Preview string `json:"preview"` + PublicURL string `json:"public_url"` + OriginPath string `json:"origin_path"` + Modified string `json:"modified"` + Path string `json:"path"` + Md5 string `json:"md5"` + ResourceType string `json:"type"` + MimeType string `json:"mime_type"` + Size int64 `json:"size"` + Embedded *ResourceListResponse `json:"_embedded"` } // ResourceListResponse struct @@ -64,7 +64,7 @@ type AsyncStatus struct { // CustomPropertyResponse struct we send and is returned by the API for CustomProperty request. type CustomPropertyResponse struct { - CustomProperties map[string]interface{} `json:"custom_properties"` + CustomProperties map[string]any `json:"custom_properties"` } // SortMode struct - sort mode diff --git a/backend/yandex/yandex.go b/backend/yandex/yandex.go index 9d201c268..fc40aa88b 100644 --- a/backend/yandex/yandex.go +++ b/backend/yandex/yandex.go @@ -1024,7 +1024,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s } opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath())) - rcm := map[string]interface{}{ + rcm := map[string]any{ property: value, } cpr := api.CustomPropertyResponse{CustomProperties: rcm} diff --git a/cmd/backend/backend.go b/cmd/backend/backend.go index 0c2bffd31..c2a05e1a6 100644 --- a/cmd/backend/backend.go +++ b/cmd/backend/backend.go @@ -82,7 +82,7 @@ Note to run these commands on a running backend then see return err } // Run the command - var out interface{} + var out any switch name { case "help": return showHelp(fsInfo) diff --git a/cmd/bisync/bilib/names.go b/cmd/bisync/bilib/names.go index 70d3d61be..d8951a0b5 100644 --- a/cmd/bisync/bilib/names.go +++ b/cmd/bisync/bilib/names.go @@ -10,7 +10,7 @@ import ( ) // Names comprises a set of file names -type Names map[string]interface{} +type Names map[string]any // ToNames converts string slice to a set of names func ToNames(list []string) Names { diff --git a/cmd/bisync/bisync_test.go b/cmd/bisync/bisync_test.go index 694377430..bf6e949ee 100644 --- a/cmd/bisync/bisync_test.go +++ b/cmd/bisync/bisync_test.go @@ -627,7 +627,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) { testFunc := func() { src := filepath.Join(b.dataDir, "file7.txt") - for i := 0; i < 50; i++ { + for i := range 50 { dst := "file" + fmt.Sprint(i) + ".txt" err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst) if err != nil { @@ -1606,7 +1606,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string { s = pathReplacer.Replace(strings.TrimSpace(s)) // Apply regular expression replacements - for i := 0; i < len(repFrom); i++ { + for i := range repFrom { s = repFrom[i].ReplaceAllString(s, repTo[i]) } s = strings.TrimSpace(s) @@ -1621,7 +1621,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string { // Sort consecutive groups of naturally unordered lines. // Any such group must end before the log ends or it might be lost. absorbed := false - for i := 0; i < len(dampers); i++ { + for i := range dampers { match := false if s != "" && !absorbed { match = hoppers[i].MatchString(s) @@ -1869,7 +1869,7 @@ func fileType(fileName string) string { } // logPrintf prints a message to stdout and to the test log -func (b *bisyncTest) logPrintf(text string, args ...interface{}) { +func (b *bisyncTest) logPrintf(text string, args ...any) { line := fmt.Sprintf(text, args...) fs.Log(nil, line) if b.logFile != nil { @@ -1936,7 +1936,7 @@ func ctxNoDsStore(ctx context.Context, t *testing.T) (context.Context, *filter.F return ctxNoDsStore, fi } -func checkError(t *testing.T, err error, msgAndArgs ...interface{}) { +func checkError(t *testing.T, err error, msgAndArgs ...any) { if errors.Is(err, fs.ErrorCantUploadEmptyFiles) { t.Skipf("Skip test because remote cannot upload empty files") } diff --git a/cmd/bisync/log.go b/cmd/bisync/log.go index a7c131b40..36d3ce6b8 100644 --- a/cmd/bisync/log.go +++ b/cmd/bisync/log.go @@ -12,7 +12,7 @@ import ( "github.com/rclone/rclone/lib/terminal" ) -func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) { +func (b *bisyncRun) indentf(tag, file, format string, args ...any) { b.indent(tag, file, fmt.Sprintf(format, args...)) } diff --git a/cmd/bisync/operations.go b/cmd/bisync/operations.go index 75b36e8b0..2fde727ca 100644 --- a/cmd/bisync/operations.go +++ b/cmd/bisync/operations.go @@ -524,7 +524,7 @@ func (b *bisyncRun) testFn() { } } -func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) { +func (b *bisyncRun) handleErr(o any, msg string, err error, critical, retryable bool) { if err != nil { if retryable { b.retryable = true @@ -624,7 +624,7 @@ func (b *bisyncRun) debugFn(nametocheck string, fn func()) { // waitFor runs fn() until it returns true or the timeout expires func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) { const individualWait = 1 * time.Second - for i := 0; i < int(totalWait/individualWait); i++ { + for i := range int(totalWait / individualWait) { ok = fn() if ok { return ok diff --git a/cmd/gitannex/gitannex.go b/cmd/gitannex/gitannex.go index d71393b05..e716cfe34 100644 --- a/cmd/gitannex/gitannex.go +++ b/cmd/gitannex/gitannex.go @@ -28,6 +28,7 @@ import ( "io" "os" "path/filepath" + "slices" "strings" "github.com/rclone/rclone/cmd" @@ -282,11 +283,8 @@ func (s *server) handleInitRemote() error { if s.configRcloneRemoteName != ":local" { var remoteExists bool - for _, remoteName := range config.FileSections() { - if remoteName == trimmedName { - remoteExists = true - break - } + if slices.Contains(config.FileSections(), trimmedName) { + remoteExists = true } if !remoteExists { s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName) diff --git a/cmd/help.go b/cmd/help.go index 619a974aa..f65ff6d53 100644 --- a/cmd/help.go +++ b/cmd/help.go @@ -273,7 +273,7 @@ func showBackends() { fmt.Printf(" rclone help backend \n") } -func quoteString(v interface{}) string { +func quoteString(v any) string { switch v.(type) { case string: return fmt.Sprintf("%q", v) diff --git a/cmd/mount/mount.go b/cmd/mount/mount.go index 2431d5df4..0a1f0f4b3 100644 --- a/cmd/mount/mount.go +++ b/cmd/mount/mount.go @@ -78,7 +78,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error fs.Debugf(f, "Mounting on %q", mountpoint) if opt.DebugFUSE { - fuse.Debug = func(msg interface{}) { + fuse.Debug = func(msg any) { fs.Debugf("fuse", "%v", msg) } } diff --git a/cmd/ncdu/ncdu.go b/cmd/ncdu/ncdu.go index 42117ab0d..dfe994a63 100644 --- a/cmd/ncdu/ncdu.go +++ b/cmd/ncdu/ncdu.go @@ -185,7 +185,7 @@ func (u *UI) Print(x, y int, style tcell.Style, msg string) { } // Printf a string -func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...interface{}) { +func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...any) { s := fmt.Sprintf(format, args...) u.Print(x, y, style, s) } @@ -207,7 +207,7 @@ func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) { } // Linef a string -func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...interface{}) { +func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...any) { s := fmt.Sprintf(format, args...) u.Line(x, y, xmax, style, spacer, s) } @@ -273,11 +273,7 @@ func (u *UI) Box() { xmax := x + boxWidth if len(u.boxMenu) != 0 { count := lineOptionLength(u.boxMenu) - if x+boxWidth > x+count { - xmax = x + boxWidth - } else { - xmax = x + count - } + xmax = max(x+boxWidth, x+count) } ymax := y + len(u.boxText) diff --git a/cmd/ncdu/scan/scan.go b/cmd/ncdu/scan/scan.go index fb47b335c..97018b3eb 100644 --- a/cmd/ncdu/scan/scan.go +++ b/cmd/ncdu/scan/scan.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "path" + "slices" "sync" "time" @@ -111,7 +112,7 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir // Entries returns a copy of the entries in the directory func (d *Dir) Entries() fs.DirEntries { - return append(fs.DirEntries(nil), d.entries...) + return slices.Clone(d.entries) } // Remove removes the i-th entry from the @@ -146,7 +147,7 @@ func (d *Dir) remove(i int) { d.size -= size d.count -= count d.countUnknownSize -= countUnknownSize - d.entries = append(d.entries[:i], d.entries[i+1:]...) + d.entries = slices.Delete(d.entries, i, i+1) dir := d // populate changed size and count to parent(s) diff --git a/cmd/progress.go b/cmd/progress.go index 679c1d30d..2963ad396 100644 --- a/cmd/progress.go +++ b/cmd/progress.go @@ -40,7 +40,7 @@ func startProgress() func() { } // Intercept output from functions such as HashLister to stdout - operations.SyncPrintf = func(format string, a ...interface{}) { + operations.SyncPrintf = func(format string, a ...any) { printProgress(fmt.Sprintf(format, a...)) } @@ -97,7 +97,7 @@ func printProgress(logMessage string) { out(terminal.MoveUp) } // Move to the start of the block we wrote erasing all the previous lines - for i := 0; i < nlines-1; i++ { + for range nlines - 1 { out(terminal.EraseLine) out(terminal.MoveUp) } diff --git a/cmd/rc/rc.go b/cmd/rc/rc.go index a3bf22615..af78d1485 100644 --- a/cmd/rc/rc.go +++ b/cmd/rc/rc.go @@ -312,12 +312,12 @@ func list(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to list: %w", err) } - commands, ok := list["commands"].([]interface{}) + commands, ok := list["commands"].([]any) if !ok { return errors.New("bad JSON") } for _, command := range commands { - info, ok := command.(map[string]interface{}) + info, ok := command.(map[string]any) if !ok { return errors.New("bad JSON") } diff --git a/cmd/selfupdate/selfupdate.go b/cmd/selfupdate/selfupdate.go index cca72a81b..a68e0f9e0 100644 --- a/cmd/selfupdate/selfupdate.go +++ b/cmd/selfupdate/selfupdate.go @@ -327,7 +327,7 @@ func makeRandomExeName(baseName, extension string) (string, error) { extension += ".exe" } - for attempt := 0; attempt < maxAttempts; attempt++ { + for range maxAttempts { filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension) if _, err := os.Stat(filename); os.IsNotExist(err) { return filename, nil diff --git a/cmd/serve/dlna/cds.go b/cmd/serve/dlna/cds.go index adcc580df..22d9d4d32 100644 --- a/cmd/serve/dlna/cds.go +++ b/cmd/serve/dlna/cds.go @@ -34,7 +34,7 @@ var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/") // Turns the given entry and DMS host into a UPnP object. A nil object is // returned if the entry is not of interest. -func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret interface{}, err error) { +func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret any, err error) { obj := upnpav.Object{ ID: cdsObject.ID(), Restricted: 1, @@ -127,7 +127,7 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi } // Returns all the upnpav objects in a directory. -func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) { +func (cds *contentDirectoryService) readContainer(o object, host string) (ret []any, err error) { node, err := cds.vfs.Stat(o.Path) if err != nil { return @@ -295,10 +295,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt } totalMatches := len(objs) objs = objs[func() (low int) { - low = browse.StartingIndex - if low > len(objs) { - low = len(objs) - } + low = min(browse.StartingIndex, len(objs)) return }():] if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) { diff --git a/cmd/serve/dlna/dlna_util.go b/cmd/serve/dlna/dlna_util.go index c8d931009..7eabdd50e 100644 --- a/cmd/serve/dlna/dlna_util.go +++ b/cmd/serve/dlna/dlna_util.go @@ -6,6 +6,7 @@ import ( "encoding/xml" "fmt" "io" + "maps" "net" "net/http" "net/http/httptest" @@ -68,7 +69,7 @@ func didlLite(chardata string) string { `` } -func mustMarshalXML(value interface{}) []byte { +func mustMarshalXML(value any) []byte { ret, err := xml.MarshalIndent(value, "", " ") if err != nil { fs.Panicf(nil, "mustMarshalXML failed to marshal %v: %s", value, err) @@ -85,8 +86,8 @@ func marshalSOAPResponse(sa upnp.SoapAction, args map[string]string) []byte { Value: value, }) } - return []byte(fmt.Sprintf(`%[3]s`, - sa.Action, sa.ServiceURN.String(), mustMarshalXML(soapArgs))) + return fmt.Appendf(nil, `%[3]s`, + sa.Action, sa.ServiceURN.String(), mustMarshalXML(soapArgs)) } type loggingResponseWriter struct { @@ -95,7 +96,7 @@ type loggingResponseWriter struct { committed bool } -func (lrw *loggingResponseWriter) logRequest(code int, err interface{}) { +func (lrw *loggingResponseWriter) logRequest(code int, err any) { // Choose appropriate log level based on response status code. var level fs.LogLevel if code < 400 && err == nil { @@ -163,9 +164,7 @@ func traceLogging(next http.Handler) http.Handler { } // copy from recorder to the real response writer - for k, v := range recorder.Header() { - w.Header()[k] = v - } + maps.Copy(w.Header(), recorder.Header()) w.WriteHeader(recorder.Code) _, err = recorder.Body.WriteTo(w) if err != nil { @@ -184,7 +183,7 @@ func withHeader(name string, value string, next http.Handler) http.Handler { } // serveError returns an http.StatusInternalServerError and logs the error -func serveError(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) { +func serveError(ctx context.Context, what any, w http.ResponseWriter, text string, err error) { err = fs.CountError(ctx, err) fs.Errorf(what, "%s: %v", text, err) http.Error(w, text+".", http.StatusInternalServerError) diff --git a/cmd/serve/docker/api.go b/cmd/serve/docker/api.go index b4c85a1fb..394e61281 100644 --- a/cmd/serve/docker/api.go +++ b/cmd/serve/docker/api.go @@ -92,7 +92,7 @@ type ErrorResponse struct { func newRouter(drv *Driver) http.Handler { r := chi.NewRouter() r.Post(activatePath, func(w http.ResponseWriter, r *http.Request) { - res := map[string]interface{}{ + res := map[string]any{ "Implements": []string{"VolumeDriver"}, } encodeResponse(w, res, nil, activatePath) @@ -152,7 +152,7 @@ func newRouter(drv *Driver) http.Handler { return r } -func decodeRequest(w http.ResponseWriter, r *http.Request, req interface{}) bool { +func decodeRequest(w http.ResponseWriter, r *http.Request, req any) bool { if err := json.NewDecoder(r.Body).Decode(req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return false @@ -160,7 +160,7 @@ func decodeRequest(w http.ResponseWriter, r *http.Request, req interface{}) bool return true } -func encodeResponse(w http.ResponseWriter, res interface{}, err error, path string) { +func encodeResponse(w http.ResponseWriter, res any, err error, path string) { w.Header().Set("Content-Type", contentType) if err != nil { fs.Debugf(path, "Request returned error: %v", err) diff --git a/cmd/serve/docker/docker_test.go b/cmd/serve/docker/docker_test.go index a4b1cd320..4c0940a7c 100644 --- a/cmd/serve/docker/docker_test.go +++ b/cmd/serve/docker/docker_test.go @@ -49,7 +49,7 @@ func initialise(ctx context.Context, t *testing.T) (string, fs.Fs) { return testDir, testFs } -func assertErrorContains(t *testing.T, err error, errString string, msgAndArgs ...interface{}) { +func assertErrorContains(t *testing.T, err error, errString string, msgAndArgs ...any) { assert.Error(t, err) if err != nil { assert.Contains(t, err.Error(), errString, msgAndArgs...) @@ -244,7 +244,7 @@ func newAPIClient(t *testing.T, host, unixPath string) *APIClient { } } -func (a *APIClient) request(path string, in, out interface{}, wantErr bool) { +func (a *APIClient) request(path string, in, out any, wantErr bool) { t := a.t var ( dataIn []byte diff --git a/cmd/serve/docker/options_test.go b/cmd/serve/docker/options_test.go index f1d1d73dc..cdbe08f67 100644 --- a/cmd/serve/docker/options_test.go +++ b/cmd/serve/docker/options_test.go @@ -22,7 +22,7 @@ func TestApplyOptions(t *testing.T) { mnt: &mountlib.MountPoint{ MountPoint: "testPath", }, - mountReqs: make(map[string]interface{}), + mountReqs: make(map[string]any), } // Happy path diff --git a/cmd/serve/docker/volume.go b/cmd/serve/docker/volume.go index f774eef8d..70414d076 100644 --- a/cmd/serve/docker/volume.go +++ b/cmd/serve/docker/volume.go @@ -35,7 +35,7 @@ type Volume struct { Path string `json:"path,omitempty"` // for "remote:path" or ":backend:path" Options VolOpts `json:"options"` // all options together Mounts []string `json:"mounts"` // mountReqs as a string list - mountReqs map[string]interface{} + mountReqs map[string]any fsString string // result of merging Fs, Type and Options persist bool mountType string @@ -49,9 +49,9 @@ type VolOpts map[string]string // VolInfo represents a volume for Get and List requests type VolInfo struct { Name string - Mountpoint string `json:",omitempty"` - CreatedAt string `json:",omitempty"` - Status map[string]interface{} `json:",omitempty"` + Mountpoint string `json:",omitempty"` + CreatedAt string `json:",omitempty"` + Status map[string]any `json:",omitempty"` } func newVolume(ctx context.Context, name string, volOpt VolOpts, drv *Driver) (*Volume, error) { @@ -65,7 +65,7 @@ func newVolume(ctx context.Context, name string, volOpt VolOpts, drv *Driver) (* CreatedAt: time.Now(), drv: drv, mnt: mnt, - mountReqs: make(map[string]interface{}), + mountReqs: make(map[string]any), } err := vol.applyOptions(volOpt) if err == nil { @@ -141,7 +141,7 @@ func (vol *Volume) validate() error { return errors.New("mount point is required") } if vol.mountReqs == nil { - vol.mountReqs = make(map[string]interface{}) + vol.mountReqs = make(map[string]any) } return nil } diff --git a/cmd/serve/ftp/ftp.go b/cmd/serve/ftp/ftp.go index ce50a2e03..99af1aef4 100644 --- a/cmd/serve/ftp/ftp.go +++ b/cmd/serve/ftp/ftp.go @@ -226,12 +226,12 @@ func (d *driver) close() error { type Logger struct{} // Print log simple text message -func (l *Logger) Print(sessionID string, message interface{}) { +func (l *Logger) Print(sessionID string, message any) { fs.Infof(sessionID, "%s", message) } // Printf log formatted text message -func (l *Logger) Printf(sessionID string, format string, v ...interface{}) { +func (l *Logger) Printf(sessionID string, format string, v ...any) { fs.Infof(sessionID, format, v...) } diff --git a/cmd/serve/http/http.go b/cmd/serve/http/http.go index 7aba1e598..cb3ac6ac3 100644 --- a/cmd/serve/http/http.go +++ b/cmd/serve/http/http.go @@ -128,7 +128,7 @@ func (s *HTTP) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { } // auth does proxy authorization -func (s *HTTP) auth(user, pass string) (value interface{}, err error) { +func (s *HTTP) auth(user, pass string) (value any, err error) { VFS, _, err := s.proxy.Call(user, pass, false) if err != nil { return nil, err diff --git a/cmd/serve/http/http_test.go b/cmd/serve/http/http_test.go index 497e9732b..a336ec429 100644 --- a/cmd/serve/http/http_test.go +++ b/cmd/serve/http/http_test.go @@ -54,7 +54,7 @@ func start(ctx context.Context, t *testing.T, f fs.Fs) (s *HTTP, testURL string) // try to connect to the test server pause := time.Millisecond - for i := 0; i < 10; i++ { + for range 10 { resp, err := http.Head(testURL) if err == nil { _ = resp.Body.Close() diff --git a/cmd/serve/nfs/cache_test.go b/cmd/serve/nfs/cache_test.go index 3daf8124b..03100d34d 100644 --- a/cmd/serve/nfs/cache_test.go +++ b/cmd/serve/nfs/cache_test.go @@ -50,7 +50,7 @@ func testCacheCRUD(t *testing.T, h *Handler, c Cache, fileName string) { // Thrash the cache operations in parallel on different files func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) { var wg sync.WaitGroup - for i := 0; i < 100; i++ { + for i := range 100 { i := i wg.Add(1) go func() { @@ -64,7 +64,7 @@ func testCacheThrashDifferent(t *testing.T, h *Handler, c Cache) { // Thrash the cache operations in parallel on the same file func testCacheThrashSame(t *testing.T, h *Handler, c Cache) { var wg sync.WaitGroup - for i := 0; i < 100; i++ { + for range 100 { wg.Add(1) go func() { defer wg.Done() diff --git a/cmd/serve/nfs/handler.go b/cmd/serve/nfs/handler.go index 921bd89dc..894f573d7 100644 --- a/cmd/serve/nfs/handler.go +++ b/cmd/serve/nfs/handler.go @@ -128,17 +128,17 @@ type logger struct { } // logPrint intercepts go-nfs logs and calls rclone's log system instead -func (l *logger) logPrint(level fs.LogLevel, args ...interface{}) { +func (l *logger) logPrint(level fs.LogLevel, args ...any) { fs.LogPrintf(level, "nfs", "%s", fmt.Sprint(args...)) } // logPrintf intercepts go-nfs logs and calls rclone's log system instead -func (l *logger) logPrintf(level fs.LogLevel, format string, args ...interface{}) { +func (l *logger) logPrintf(level fs.LogLevel, format string, args ...any) { fs.LogPrintf(level, "nfs", format, args...) } // Debug reroutes go-nfs Debug messages to Intercept -func (l *logger) Debug(args ...interface{}) { +func (l *logger) Debug(args ...any) { if l.level < nfs.DebugLevel { return } @@ -146,7 +146,7 @@ func (l *logger) Debug(args ...interface{}) { } // Debugf reroutes go-nfs Debugf messages to logPrintf -func (l *logger) Debugf(format string, args ...interface{}) { +func (l *logger) Debugf(format string, args ...any) { if l.level < nfs.DebugLevel { return } @@ -154,7 +154,7 @@ func (l *logger) Debugf(format string, args ...interface{}) { } // Error reroutes go-nfs Error messages to Intercept -func (l *logger) Error(args ...interface{}) { +func (l *logger) Error(args ...any) { if l.level < nfs.ErrorLevel { return } @@ -162,7 +162,7 @@ func (l *logger) Error(args ...interface{}) { } // Errorf reroutes go-nfs Errorf messages to logPrintf -func (l *logger) Errorf(format string, args ...interface{}) { +func (l *logger) Errorf(format string, args ...any) { if l.level < nfs.ErrorLevel { return } @@ -170,7 +170,7 @@ func (l *logger) Errorf(format string, args ...interface{}) { } // Fatal reroutes go-nfs Fatal messages to Intercept -func (l *logger) Fatal(args ...interface{}) { +func (l *logger) Fatal(args ...any) { if l.level < nfs.FatalLevel { return } @@ -178,7 +178,7 @@ func (l *logger) Fatal(args ...interface{}) { } // Fatalf reroutes go-nfs Fatalf messages to logPrintf -func (l *logger) Fatalf(format string, args ...interface{}) { +func (l *logger) Fatalf(format string, args ...any) { if l.level < nfs.FatalLevel { return } @@ -191,7 +191,7 @@ func (l *logger) GetLevel() nfs.LogLevel { } // Info reroutes go-nfs Info messages to Intercept -func (l *logger) Info(args ...interface{}) { +func (l *logger) Info(args ...any) { if l.level < nfs.InfoLevel { return } @@ -199,7 +199,7 @@ func (l *logger) Info(args ...interface{}) { } // Infof reroutes go-nfs Infof messages to logPrintf -func (l *logger) Infof(format string, args ...interface{}) { +func (l *logger) Infof(format string, args ...any) { if l.level < nfs.InfoLevel { return } @@ -207,7 +207,7 @@ func (l *logger) Infof(format string, args ...interface{}) { } // Panic reroutes go-nfs Panic messages to Intercept -func (l *logger) Panic(args ...interface{}) { +func (l *logger) Panic(args ...any) { if l.level < nfs.PanicLevel { return } @@ -215,7 +215,7 @@ func (l *logger) Panic(args ...interface{}) { } // Panicf reroutes go-nfs Panicf messages to logPrintf -func (l *logger) Panicf(format string, args ...interface{}) { +func (l *logger) Panicf(format string, args ...any) { if l.level < nfs.PanicLevel { return } @@ -228,7 +228,7 @@ func (l *logger) ParseLevel(level string) (nfs.LogLevel, error) { } // Print reroutes go-nfs Print messages to Intercept -func (l *logger) Print(args ...interface{}) { +func (l *logger) Print(args ...any) { if l.level < nfs.InfoLevel { return } @@ -236,7 +236,7 @@ func (l *logger) Print(args ...interface{}) { } // Printf reroutes go-nfs Printf messages to Intercept -func (l *logger) Printf(format string, args ...interface{}) { +func (l *logger) Printf(format string, args ...any) { if l.level < nfs.InfoLevel { return } @@ -249,7 +249,7 @@ func (l *logger) SetLevel(level nfs.LogLevel) { } // Trace reroutes go-nfs Trace messages to Intercept -func (l *logger) Trace(args ...interface{}) { +func (l *logger) Trace(args ...any) { if l.level < nfs.DebugLevel { return } @@ -257,7 +257,7 @@ func (l *logger) Trace(args ...interface{}) { } // Tracef reroutes go-nfs Tracef messages to logPrintf -func (l *logger) Tracef(format string, args ...interface{}) { +func (l *logger) Tracef(format string, args ...any) { // FIXME BODGE ... the real fix is probably https://github.com/willscott/go-nfs/pull/28 // This comes from `Log.Tracef("request: %v", w.req)` in conn.go // DEBUG : nfs: request: RPC #3285799202 (mount.Umnt) @@ -272,7 +272,7 @@ func (l *logger) Tracef(format string, args ...interface{}) { } // Warn reroutes go-nfs Warn messages to Intercept -func (l *logger) Warn(args ...interface{}) { +func (l *logger) Warn(args ...any) { if l.level < nfs.WarnLevel { return } @@ -280,7 +280,7 @@ func (l *logger) Warn(args ...interface{}) { } // Warnf reroutes go-nfs Warnf messages to logPrintf -func (l *logger) Warnf(format string, args ...interface{}) { +func (l *logger) Warnf(format string, args ...any) { if l.level < nfs.WarnLevel { return } diff --git a/cmd/serve/proxy/proxy.go b/cmd/serve/proxy/proxy.go index 08ffa8b53..c8dcd6f17 100644 --- a/cmd/serve/proxy/proxy.go +++ b/cmd/serve/proxy/proxy.go @@ -182,7 +182,7 @@ func (p *Proxy) run(in map[string]string) (config configmap.Simple, err error) { } // call runs the auth proxy and returns a cacheEntry and an error -func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, err error) { +func (p *Proxy) call(user, auth string, isPublicKey bool) (value any, err error) { var config configmap.Simple // Contact the proxy if isPublicKey { @@ -222,7 +222,7 @@ func (p *Proxy) call(user, auth string, isPublicKey bool) (value interface{}, er fsString := name + ":" + root // Look for fs in the VFS cache - value, err = p.vfsCache.Get(user, func(key string) (value interface{}, ok bool, err error) { + value, err = p.vfsCache.Get(user, func(key string) (value any, ok bool, err error) { // Create the Fs from the cache f, err := cache.GetFn(p.ctx, fsString, func(ctx context.Context, fsString string) (fs.Fs, error) { // Update the config with the default values diff --git a/cmd/serve/s3/backend.go b/cmd/serve/s3/backend.go index 33ff6387b..90974cd43 100644 --- a/cmd/serve/s3/backend.go +++ b/cmd/serve/s3/backend.go @@ -5,6 +5,7 @@ import ( "context" "encoding/hex" "io" + "maps" "os" "path" "strings" @@ -144,9 +145,7 @@ func (b *s3Backend) HeadObject(ctx context.Context, bucketName, objectName strin if val, ok := b.meta.Load(fp); ok { metaMap := val.(map[string]string) - for k, v := range metaMap { - meta[k] = v - } + maps.Copy(meta, metaMap) } return &gofakes3.Object{ @@ -221,9 +220,7 @@ func (b *s3Backend) GetObject(ctx context.Context, bucketName, objectName string if val, ok := b.meta.Load(fp); ok { metaMap := val.(map[string]string) - for k, v := range metaMap { - meta[k] = v - } + maps.Copy(meta, metaMap) } return &gofakes3.Object{ diff --git a/cmd/serve/s3/logger.go b/cmd/serve/s3/logger.go index 4638d6e91..d99e27df7 100644 --- a/cmd/serve/s3/logger.go +++ b/cmd/serve/s3/logger.go @@ -11,7 +11,7 @@ import ( type logger struct{} // print log message -func (l logger) Print(level gofakes3.LogLevel, v ...interface{}) { +func (l logger) Print(level gofakes3.LogLevel, v ...any) { var s string if len(v) == 0 { s = "" diff --git a/cmd/serve/s3/s3_test.go b/cmd/serve/s3/s3_test.go index 5043179fb..aaece7e5b 100644 --- a/cmd/serve/s3/s3_test.go +++ b/cmd/serve/s3/s3_test.go @@ -11,19 +11,19 @@ import ( "net/url" "path" "path/filepath" + "slices" "testing" "time" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/rclone/rclone/fs/object" - _ "github.com/rclone/rclone/backend/local" "github.com/rclone/rclone/cmd/serve/proxy/proxyflags" "github.com/rclone/rclone/cmd/serve/servetest" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fstest" httplib "github.com/rclone/rclone/lib/http" "github.com/rclone/rclone/lib/random" @@ -226,13 +226,7 @@ func testListBuckets(t *testing.T, cases []TestCase, useProxy bool) { for _, tt := range tt.files { file := path.Join(tt.path, tt.filename) - found := false - for _, fname := range objects { - if file == fname { - found = true - break - } - } + found := slices.Contains(objects, file) require.Equal(t, true, found, "Object not found: "+file) } } diff --git a/cmd/serve/s3/server.go b/cmd/serve/s3/server.go index abc8ba4a9..88252d2cb 100644 --- a/cmd/serve/s3/server.go +++ b/cmd/serve/s3/server.go @@ -122,7 +122,7 @@ func (w *Server) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { } // auth does proxy authorization -func (w *Server) auth(accessKeyID string) (value interface{}, err error) { +func (w *Server) auth(accessKeyID string) (value any, err error) { VFS, _, err := w.proxy.Call(stringToMd5Hash(accessKeyID), accessKeyID, false) if err != nil { return nil, err diff --git a/cmd/serve/s3/utils.go b/cmd/serve/s3/utils.go index 53f79908b..1e83a5f0b 100644 --- a/cmd/serve/s3/utils.go +++ b/cmd/serve/s3/utils.go @@ -36,7 +36,7 @@ func getDirEntries(prefix string, VFS *vfs.VFS) (vfs.Nodes, error) { return dirEntries, nil } -func getFileHashByte(node interface{}) []byte { +func getFileHashByte(node any) []byte { b, err := hex.DecodeString(getFileHash(node)) if err != nil { return nil @@ -44,7 +44,7 @@ func getFileHashByte(node interface{}) []byte { return b } -func getFileHash(node interface{}) string { +func getFileHash(node any) string { var o fs.Object switch b := node.(type) { diff --git a/cmd/serve/webdav/webdav.go b/cmd/serve/webdav/webdav.go index 0c5c16416..348a97c35 100644 --- a/cmd/serve/webdav/webdav.go +++ b/cmd/serve/webdav/webdav.go @@ -273,7 +273,7 @@ func (w *WebDAV) getVFS(ctx context.Context) (VFS *vfs.VFS, err error) { } // auth does proxy authorization -func (w *WebDAV) auth(user, pass string) (value interface{}, err error) { +func (w *WebDAV) auth(user, pass string) (value any, err error) { VFS, _, err := w.proxy.Call(user, pass, false) if err != nil { return nil, err diff --git a/cmd/test/info/info.go b/cmd/test/info/info.go index 83bcdfee8..df6a51c6e 100644 --- a/cmd/test/info/info.go +++ b/cmd/test/info/info.go @@ -289,7 +289,7 @@ func (r *results) checkControls() { // Concurrency control tokens := make(chan struct{}, ci.Checkers) - for i := 0; i < ci.Checkers; i++ { + for range ci.Checkers { tokens <- struct{}{} } var wg sync.WaitGroup diff --git a/cmd/test/makefiles/makefiles.go b/cmd/test/makefiles/makefiles.go index 957dca020..40390a663 100644 --- a/cmd/test/makefiles/makefiles.go +++ b/cmd/test/makefiles/makefiles.go @@ -95,7 +95,7 @@ var makefilesCmd = &cobra.Command{ } dirs := root.list("", []string{}) totalBytes := int64(0) - for i := 0; i < numberOfFiles; i++ { + for range numberOfFiles { dir := dirs[randSource.Intn(len(dirs))] size := int64(minFileSize) if maxFileSize > minFileSize { diff --git a/cmd/tree/tree.go b/cmd/tree/tree.go index 88d0a0af9..5d882bc15 100644 --- a/cmd/tree/tree.go +++ b/cmd/tree/tree.go @@ -192,7 +192,7 @@ func (to *FileInfo) IsDir() bool { } // Sys is underlying data source (can return nil) -func (to *FileInfo) Sys() interface{} { +func (to *FileInfo) Sys() any { return nil } diff --git a/fs/accounting/inprogress.go b/fs/accounting/inprogress.go index 7fcbad781..1eded676b 100644 --- a/fs/accounting/inprogress.go +++ b/fs/accounting/inprogress.go @@ -2,6 +2,7 @@ package accounting import ( "context" + "maps" "sync" "github.com/rclone/rclone/fs" @@ -48,7 +49,5 @@ func (ip *inProgress) merge(m *inProgress) { defer ip.mu.Unlock() m.mu.Lock() defer m.mu.Unlock() - for key, val := range m.m { - ip.m[key] = val - } + maps.Copy(ip.m, m.m) } diff --git a/fs/accounting/stats.go b/fs/accounting/stats.go index 8a4526fab..88686e6c5 100644 --- a/fs/accounting/stats.go +++ b/fs/accounting/stats.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "slices" "sort" "strings" "sync" @@ -899,7 +900,7 @@ func (s *StatsInfo) _removeTransfer(transfer *Transfer, i int) { s.oldTimeRanges.merge() // remove the found entry - s.startedTransfers = append(s.startedTransfers[:i], s.startedTransfers[i+1:]...) + s.startedTransfers = slices.Delete(s.startedTransfers, i, i+1) // Find youngest active transfer oldestStart := now diff --git a/fs/accounting/stats_groups_test.go b/fs/accounting/stats_groups_test.go index b7a817824..4a0cd8628 100644 --- a/fs/accounting/stats_groups_test.go +++ b/fs/accounting/stats_groups_test.go @@ -104,11 +104,11 @@ func TestStatsGroupOperations(t *testing.T) { runtime.GC() runtime.ReadMemStats(&start) - for i := 0; i < count; i++ { + for i := range count { sg.set(ctx, fmt.Sprintf("test-%d", i), NewStats(ctx)) } - for i := 0; i < count; i++ { + for i := range count { sg.delete(fmt.Sprintf("test-%d", i)) } @@ -124,7 +124,7 @@ func TestStatsGroupOperations(t *testing.T) { testGroupStatsInfo := NewStatsGroup(ctx, "test-group") require.NoError(t, testGroupStatsInfo.DeleteFile(ctx, 0)) - for i := 0; i < 41; i++ { + for range 41 { require.NoError(t, GlobalStats().DeleteFile(ctx, 0)) } diff --git a/fs/accounting/stats_test.go b/fs/accounting/stats_test.go index 35c22037f..a6378d065 100644 --- a/fs/accounting/stats_test.go +++ b/fs/accounting/stats_test.go @@ -453,7 +453,7 @@ func TestPruneTransfers(t *testing.T) { assert.Equal(t, test.Transfers, len(s.startedTransfers)) s.mu.Unlock() - for i := 0; i < test.Transfers; i++ { + for range test.Transfers { s.PruneTransfers() } diff --git a/fs/accounting/token_bucket.go b/fs/accounting/token_bucket.go index a8b4bcd2d..00b36d344 100644 --- a/fs/accounting/token_bucket.go +++ b/fs/accounting/token_bucket.go @@ -65,10 +65,7 @@ func newEmptyTokenBucket(bandwidth fs.SizeSuffix) *rate.Limiter { // Relate maxBurstSize to bandwidth limit // 4M gives 2.5 Gb/s on Windows // Use defaultMaxBurstSize up to 2GBit/s (256MiB/s) then scale - maxBurstSize := (bandwidth * defaultMaxBurstSize) / (256 * 1024 * 1024) - if maxBurstSize < defaultMaxBurstSize { - maxBurstSize = defaultMaxBurstSize - } + maxBurstSize := max((bandwidth*defaultMaxBurstSize)/(256*1024*1024), defaultMaxBurstSize) // fs.Debugf(nil, "bandwidth=%v maxBurstSize=%v", bandwidth, maxBurstSize) tb := rate.NewLimiter(rate.Limit(bandwidth), int(maxBurstSize)) if tb != nil { diff --git a/fs/accounting/tpslimit.go b/fs/accounting/tpslimit.go index b4b834ae0..9d6c73d5a 100644 --- a/fs/accounting/tpslimit.go +++ b/fs/accounting/tpslimit.go @@ -16,10 +16,7 @@ var ( func StartLimitTPS(ctx context.Context) { ci := fs.GetConfig(ctx) if ci.TPSLimit > 0 { - tpsBurst := ci.TPSLimitBurst - if tpsBurst < 1 { - tpsBurst = 1 - } + tpsBurst := max(ci.TPSLimitBurst, 1) tpsBucket = rate.NewLimiter(rate.Limit(ci.TPSLimit), tpsBurst) fs.Infof(nil, "Starting transaction limiter: max %g transactions/s with burst %d", ci.TPSLimit, tpsBurst) } diff --git a/fs/accounting/tpslimit_test.go b/fs/accounting/tpslimit_test.go index 4e9a6c1ee..a1bde981b 100644 --- a/fs/accounting/tpslimit_test.go +++ b/fs/accounting/tpslimit_test.go @@ -12,7 +12,7 @@ import ( func TestLimitTPS(t *testing.T) { timeTransactions := func(n int, minTime, maxTime time.Duration) { start := time.Now() - for i := 0; i < n; i++ { + for range n { LimitTPS(context.Background()) } dt := time.Since(start) diff --git a/fs/accounting/transfermap.go b/fs/accounting/transfermap.go index 41409eece..1f8f6c752 100644 --- a/fs/accounting/transfermap.go +++ b/fs/accounting/transfermap.go @@ -3,6 +3,7 @@ package accounting import ( "context" "fmt" + "maps" "sort" "strings" "sync" @@ -47,9 +48,7 @@ func (tm *transferMap) del(remote string) bool { func (tm *transferMap) merge(m *transferMap) { tm.mu.Lock() m.mu.Lock() - for name, tr := range m.items { - tm.items[name] = tr - } + maps.Copy(tm.items, m.items) m.mu.Unlock() tm.mu.Unlock() } diff --git a/fs/asyncreader/asyncreader.go b/fs/asyncreader/asyncreader.go index a39ae8807..b645fa46f 100644 --- a/fs/asyncreader/asyncreader.go +++ b/fs/asyncreader/asyncreader.go @@ -75,7 +75,7 @@ func (a *AsyncReader) init(rd io.ReadCloser, buffers int) { a.size = softStartInitial // Create tokens - for i := 0; i < buffers; i++ { + for range buffers { a.token <- struct{}{} } @@ -249,10 +249,7 @@ func (a *AsyncReader) SkipBytes(skip int) (ok bool) { } } - n := len(a.cur.buffer()) - if n > skip { - n = skip - } + n := min(len(a.cur.buffer()), skip) a.cur.increment(n) skip -= n if skip == 0 { diff --git a/fs/asyncreader/asyncreader_test.go b/fs/asyncreader/asyncreader_test.go index 2cc3a8389..eae23b160 100644 --- a/fs/asyncreader/asyncreader_test.go +++ b/fs/asyncreader/asyncreader_test.go @@ -151,18 +151,18 @@ func TestAsyncReaderSizes(t *testing.T) { var texts [31]string str := "" all := "" - for i := 0; i < len(texts)-1; i++ { + for i := range len(texts) - 1 { texts[i] = str + "\n" all += texts[i] str += string(rune(i)%26 + 'a') } texts[len(texts)-1] = all - for h := 0; h < len(texts); h++ { + for h := range len(texts) { text := texts[h] - for i := 0; i < len(readMakers); i++ { - for j := 0; j < len(bufreaders); j++ { - for k := 0; k < len(bufsizes); k++ { + for i := range readMakers { + for j := range bufreaders { + for k := range bufsizes { for l := 1; l < 10; l++ { readmaker := readMakers[i] bufreader := bufreaders[j] @@ -192,18 +192,18 @@ func TestAsyncReaderWriteTo(t *testing.T) { var texts [31]string str := "" all := "" - for i := 0; i < len(texts)-1; i++ { + for i := range len(texts) - 1 { texts[i] = str + "\n" all += texts[i] str += string(rune(i)%26 + 'a') } texts[len(texts)-1] = all - for h := 0; h < len(texts); h++ { + for h := range len(texts) { text := texts[h] - for i := 0; i < len(readMakers); i++ { - for j := 0; j < len(bufreaders); j++ { - for k := 0; k < len(bufsizes); k++ { + for i := range readMakers { + for j := range bufreaders { + for k := range bufsizes { for l := 1; l < 10; l++ { readmaker := readMakers[i] bufreader := bufreaders[j] diff --git a/fs/backend_config.go b/fs/backend_config.go index a65239834..c8d0e0ffe 100644 --- a/fs/backend_config.go +++ b/fs/backend_config.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "slices" "strconv" "strings" @@ -75,11 +76,11 @@ type ConfigIn struct { // Error is displayed to the user before asking a question // Result is passed to the next call to Config if Option/OAuth isn't set type ConfigOut struct { - State string // State to jump to after this - Option *Option // Option to query user about - OAuth interface{} `json:"-"` // Do OAuth if set - Error string // error to be displayed to the user - Result string // if Option/OAuth not set then this is passed to the next state + State string // State to jump to after this + Option *Option // Option to query user about + OAuth any `json:"-"` // Do OAuth if set + Error string // error to be displayed to the user + Result string // if Option/OAuth not set then this is passed to the next state } // ConfigInputOptional asks the user for a string which may be empty @@ -555,13 +556,7 @@ func MatchProvider(providerConfig, provider string) bool { negate = true } providers := strings.Split(providerConfig, ",") - matched := false - for _, p := range providers { - if p == provider { - matched = true - break - } - } + matched := slices.Contains(providers, provider) if negate { return !matched } diff --git a/fs/bwtimetable.go b/fs/bwtimetable.go index eb01a8d76..9a2173862 100644 --- a/fs/bwtimetable.go +++ b/fs/bwtimetable.go @@ -168,7 +168,7 @@ func (x *BwTimetable) Set(s string) error { if err := validateHour(HHMM); err != nil { return err } - for i := 0; i < 7; i++ { + for i := range 7 { hh, _ := strconv.Atoi(HHMM[0:2]) mm, _ := strconv.Atoi(HHMM[3:]) ts := BwTimeSlot{ diff --git a/fs/cache/cache.go b/fs/cache/cache.go index 63afa94e8..0ab2492ce 100644 --- a/fs/cache/cache.go +++ b/fs/cache/cache.go @@ -27,7 +27,7 @@ func createOnFirstUse() { c = cache.New() c.SetExpireDuration(ci.FsCacheExpireDuration) c.SetExpireInterval(ci.FsCacheExpireInterval) - c.SetFinalizer(func(value interface{}) { + c.SetFinalizer(func(value any) { if s, ok := value.(fs.Shutdowner); ok { _ = fs.CountError(context.Background(), s.Shutdown(context.Background())) } @@ -98,7 +98,7 @@ func GetFn(ctx context.Context, fsString string, create func(ctx context.Context createOnFirstUse() canonicalFsString := Canonicalize(fsString) created := false - value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f interface{}, ok bool, err error) { + value, err := c.Get(canonicalFsString, func(canonicalFsString string) (f any, ok bool, err error) { f, err = create(ctx, fsString) // always create the backend with the original non-canonicalised string ok = err == nil || err == fs.ErrorIsFile created = ok @@ -149,9 +149,9 @@ func Pin(f fs.Fs) { // // This calls runtime.SetFinalizer on x so it shouldn't have a // finalizer already. -func PinUntilFinalized(f fs.Fs, x interface{}) { +func PinUntilFinalized(f fs.Fs, x any) { Pin(f) - runtime.SetFinalizer(x, func(_ interface{}) { + runtime.SetFinalizer(x, func(_ any) { Unpin(f) }) } diff --git a/fs/chunkedreader/chunkedreader_test.go b/fs/chunkedreader/chunkedreader_test.go index 62c72094e..c3d28e3d8 100644 --- a/fs/chunkedreader/chunkedreader_test.go +++ b/fs/chunkedreader/chunkedreader_test.go @@ -80,10 +80,7 @@ func testRead(content []byte, mode mockobject.SeekMode, streams int) func(*testi require.Equal(t, offset, p, what) n, err := cr.Read(buf) - end := offset + int64(bl) - if end > cl { - end = cl - } + end := min(offset+int64(bl), cl) l := int(end - offset) if l < bl { require.Equal(t, io.EOF, err, what) diff --git a/fs/chunksize/chunksize.go b/fs/chunksize/chunksize.go index 1a60dda5f..d96c780c4 100644 --- a/fs/chunksize/chunksize.go +++ b/fs/chunksize/chunksize.go @@ -17,7 +17,7 @@ import ( // Returns the default chunk size if it is sufficiently large enough // to support the given file size otherwise returns the smallest chunk // size necessary to allow the upload to proceed. -func Calculator(o interface{}, size int64, maxParts int, defaultChunkSize fs.SizeSuffix) fs.SizeSuffix { +func Calculator(o any, size int64, maxParts int, defaultChunkSize fs.SizeSuffix) fs.SizeSuffix { // If streaming then use default chunk size if size < 0 { fs.Debugf(o, "Streaming upload with chunk_size %s allows uploads of up to %s and will fail only when that limit is reached.", defaultChunkSize, fs.SizeSuffix(maxParts)*defaultChunkSize) diff --git a/fs/config/config.go b/fs/config/config.go index 494c179b1..617780c9c 100644 --- a/fs/config/config.go +++ b/fs/config/config.go @@ -386,7 +386,7 @@ func SaveConfig() { ctx := context.Background() ci := fs.GetConfig(ctx) var err error - for i := 0; i < ci.LowLevelRetries+1; i++ { + for range ci.LowLevelRetries + 1 { if err = LoadedData().Save(); err == nil { return } diff --git a/fs/config/configstruct/configstruct.go b/fs/config/configstruct/configstruct.go index 78322af9a..e9535d716 100644 --- a/fs/config/configstruct/configstruct.go +++ b/fs/config/configstruct/configstruct.go @@ -35,7 +35,7 @@ func camelToSnake(in string) string { // // Any other types are expected to be encoded by their String() // methods and decoded by their `Set(s string) error` methods. -func StringToInterface(def interface{}, in string) (newValue interface{}, err error) { +func StringToInterface(def any, in string) (newValue any, err error) { typ := reflect.TypeOf(def) o := reflect.New(typ) switch def.(type) { @@ -88,10 +88,10 @@ func StringToInterface(def interface{}, in string) (newValue interface{}, err er // Item describes a single entry in the options structure type Item struct { - Name string // snake_case - Field string // CamelCase - Set func(interface{}) // set this field - Value interface{} + Name string // snake_case + Field string // CamelCase + Set func(any) // set this field + Value any } // Items parses the opt struct and returns a slice of Item objects. @@ -105,7 +105,7 @@ type Item struct { // Nested structs are looked up too. If the parent struct has a struct // tag, this will be used as a prefix for the values in the sub // struct, otherwise they will be embedded as they are. -func Items(opt interface{}) (items []Item, err error) { +func Items(opt any) (items []Item, err error) { def := reflect.ValueOf(opt) if def.Kind() != reflect.Ptr { return nil, errors.New("argument must be a pointer") @@ -115,7 +115,7 @@ func Items(opt interface{}) (items []Item, err error) { return nil, errors.New("argument must be a pointer to a struct") } defType := def.Type() - for i := 0; i < def.NumField(); i++ { + for i := range def.NumField() { field := def.Field(i) fieldType := defType.Field(i) fieldName := fieldType.Name @@ -145,7 +145,7 @@ func Items(opt interface{}) (items []Item, err error) { defaultItem := Item{ Name: configName, Field: fieldName, - Set: func(newValue interface{}) { + Set: func(newValue any) { field.Set(reflect.ValueOf(newValue)) }, Value: field.Interface(), @@ -169,7 +169,7 @@ func Items(opt interface{}) (items []Item, err error) { // types and set in opt. // // All the field types in the struct must implement fmt.Scanner. -func Set(config configmap.Getter, opt interface{}) (err error) { +func Set(config configmap.Getter, opt any) (err error) { defaultItems, err := Items(opt) if err != nil { return err @@ -177,7 +177,7 @@ func Set(config configmap.Getter, opt interface{}) (err error) { for _, defaultItem := range defaultItems { newValue := defaultItem.Value if configValue, ok := config.Get(defaultItem.Name); ok { - var newNewValue interface{} + var newNewValue any newNewValue, err = StringToInterface(newValue, configValue) if err != nil { // Mask errors if setting an empty string as diff --git a/fs/config/configstruct/configstruct_test.go b/fs/config/configstruct/configstruct_test.go index fa30684be..a268553f9 100644 --- a/fs/config/configstruct/configstruct_test.go +++ b/fs/config/configstruct/configstruct_test.go @@ -180,8 +180,8 @@ func TestStringToInterface(t *testing.T) { item := struct{ A int }{2} for _, test := range []struct { in string - def interface{} - want interface{} + def any + want any err string }{ {"", string(""), "", ""}, diff --git a/fs/config/ui.go b/fs/config/ui.go index fcee469b1..6092fbae1 100644 --- a/fs/config/ui.go +++ b/fs/config/ui.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "os" + "slices" "sort" "strconv" "strings" @@ -148,10 +149,8 @@ func Choose(what string, kind string, choices, help []string, defaultValue strin result := ReadLine() i, err := strconv.Atoi(result) if err != nil { - for _, v := range choices { - if result == v { - return result - } + if slices.Contains(choices, result) { + return result } if result == "" { // If empty string is in the predefined list of choices it has already been returned above. diff --git a/fs/dirtree/dirtree_test.go b/fs/dirtree/dirtree_test.go index 81930fc1a..45cd9a3aa 100644 --- a/fs/dirtree/dirtree_test.go +++ b/fs/dirtree/dirtree_test.go @@ -207,7 +207,7 @@ func BenchmarkCheckParents(b *testing.B) { b.Run(fmt.Sprintf("%d", N), func(b *testing.B) { b.StopTimer() dt := New() - for i := 0; i < N; i++ { + for i := range N { remote := fmt.Sprintf("dir%09d/file%09d.txt", i, 1) o := mockobject.New(remote) dt.Add(o) diff --git a/fs/features.go b/fs/features.go index cb9b69649..7306f537c 100644 --- a/fs/features.go +++ b/fs/features.go @@ -191,7 +191,7 @@ type Features struct { // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that - Command func(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) + Command func(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) // Shutdown the backend, closing any background tasks and any // cached connections. @@ -209,7 +209,7 @@ func (ft *Features) Disable(name string) *Features { } v := reflect.ValueOf(ft).Elem() vType := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { vName := vType.Field(i).Name field := v.Field(i) if strings.EqualFold(name, vName) { @@ -238,7 +238,7 @@ func (ft *Features) Disable(name string) *Features { func (ft *Features) List() (out []string) { v := reflect.ValueOf(ft).Elem() vType := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { out = append(out, vType.Field(i).Name) } return out @@ -250,7 +250,7 @@ func (ft *Features) Enabled() (features map[string]bool) { v := reflect.ValueOf(ft).Elem() vType := v.Type() features = make(map[string]bool, v.NumField()) - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { vName := vType.Field(i).Name field := v.Field(i) if field.Kind() == reflect.Func { @@ -761,7 +761,7 @@ type Commander interface { // The result should be capable of being JSON encoded // If it is a string or a []string it will be shown to the user // otherwise it will be JSON encoded and shown to the user like that - Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) + Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) } // Shutdowner is an interface to wrap the Shutdown function diff --git a/fs/filter/filter.go b/fs/filter/filter.go index ebaa68119..ebe95e37c 100644 --- a/fs/filter/filter.go +++ b/fs/filter/filter.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "path" + "slices" "strings" "time" @@ -388,10 +389,8 @@ func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool { obj, ok := entry.(fs.Object) if ok { basename := path.Base(obj.Remote()) - for _, excludeFile := range f.Opt.ExcludeFile { - if basename == excludeFile { - return true - } + if slices.Contains(f.Opt.ExcludeFile, basename) { + return true } } } @@ -559,7 +558,7 @@ func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Conte remotes = make(chan string, checkers) g, gCtx = errgroup.WithContext(ctx) ) - for i := 0; i < checkers; i++ { + for range checkers { g.Go(func() (err error) { var entries = make(fs.DirEntries, 1) for remote := range remotes { diff --git a/fs/filter/rules.go b/fs/filter/rules.go index cffca0d20..085a02aa5 100644 --- a/fs/filter/rules.go +++ b/fs/filter/rules.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "regexp" + "slices" "strings" "github.com/rclone/rclone/fs" @@ -106,10 +107,8 @@ func (rs *rules) include(remote string) bool { // on. func (rs *rules) includeMany(remotes []string) bool { for _, rule := range rs.rules { - for _, remote := range remotes { - if rule.Match(remote) { - return rule.Include - } + if slices.ContainsFunc(remotes, rule.Match) { + return rule.Include } } return true diff --git a/fs/fserrors/error.go b/fs/fserrors/error.go index bb0f723e2..e65bd5be5 100644 --- a/fs/fserrors/error.go +++ b/fs/fserrors/error.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "slices" "strings" "time" @@ -44,7 +45,7 @@ func (r retryError) Retry() bool { var _ Retrier = retryError("") // RetryErrorf makes an error which indicates it would like to be retried -func RetryErrorf(format string, a ...interface{}) error { +func RetryErrorf(format string, a ...any) error { return retryError(fmt.Sprintf(format, a...)) } @@ -417,10 +418,8 @@ func ShouldRetry(err error) bool { } // Check if it is a retriable error - for _, retriableErr := range retriableErrors { - if err == retriableErr { - return true - } + if slices.Contains(retriableErrors, err) { + return true } // Check error strings (yuch!) too @@ -441,12 +440,7 @@ func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool { if resp == nil { return false } - for _, e := range retryErrorCodes { - if resp.StatusCode == e { - return true - } - } - return false + return slices.Contains(retryErrorCodes, resp.StatusCode) } // ContextError checks to see if ctx is in error. diff --git a/fs/fshttp/http.go b/fs/fshttp/http.go index 9d405df1d..97e0728af 100644 --- a/fs/fshttp/http.go +++ b/fs/fshttp/http.go @@ -245,10 +245,7 @@ func checkServerTime(req *http.Request, resp *http.Response) { // cleanAuth gets rid of one authBuf header within the first 4k func cleanAuth(buf, authBuf []byte) []byte { // Find how much buffer to check - n := 4096 - if len(buf) < n { - n = len(buf) - } + n := min(len(buf), 4096) // See if there is an Authorization: header i := bytes.Index(buf[:n], authBuf) if i < 0 { diff --git a/fs/log.go b/fs/log.go index 5088d1600..d7efc5b61 100644 --- a/fs/log.go +++ b/fs/log.go @@ -72,7 +72,7 @@ var LogOutput = func(level LogLevel, text string) { // LogValueItem describes keyed item for a JSON log entry type LogValueItem struct { key string - value interface{} + value any render bool } @@ -80,7 +80,7 @@ type LogValueItem struct { // augment the JSON output with more structured information. // // key is the dictionary parameter used to store value. -func LogValue(key string, value interface{}) LogValueItem { +func LogValue(key string, value any) LogValueItem { return LogValueItem{key: key, value: value, render: true} } @@ -91,7 +91,7 @@ func LogValue(key string, value interface{}) LogValueItem { // // String() will return a blank string - this is useful to put items // in which don't print into the log. -func LogValueHide(key string, value interface{}) LogValueItem { +func LogValueHide(key string, value any) LogValueItem { return LogValueItem{key: key, value: value, render: false} } @@ -125,7 +125,7 @@ func logLogrus(level LogLevel, text string, fields logrus.Fields) { } } -func logLogrusWithObject(level LogLevel, o interface{}, text string, fields logrus.Fields) { +func logLogrusWithObject(level LogLevel, o any, text string, fields logrus.Fields) { if o != nil { if fields == nil { fields = logrus.Fields{} @@ -136,11 +136,11 @@ func logLogrusWithObject(level LogLevel, o interface{}, text string, fields logr logLogrus(level, text, fields) } -func logJSON(level LogLevel, o interface{}, text string) { +func logJSON(level LogLevel, o any, text string) { logLogrusWithObject(level, o, text, nil) } -func logJSONf(level LogLevel, o interface{}, text string, args ...interface{}) { +func logJSONf(level LogLevel, o any, text string, args ...any) { text = fmt.Sprintf(text, args...) fields := logrus.Fields{} for _, arg := range args { @@ -151,19 +151,19 @@ func logJSONf(level LogLevel, o interface{}, text string, args ...interface{}) { logLogrusWithObject(level, o, text, fields) } -func logPlain(level LogLevel, o interface{}, text string) { +func logPlain(level LogLevel, o any, text string) { if o != nil { text = fmt.Sprintf("%v: %s", o, text) } LogOutput(level, text) } -func logPlainf(level LogLevel, o interface{}, text string, args ...interface{}) { +func logPlainf(level LogLevel, o any, text string, args ...any) { logPlain(level, o, fmt.Sprintf(text, args...)) } // LogPrint produces a log string from the arguments passed in -func LogPrint(level LogLevel, o interface{}, text string) { +func LogPrint(level LogLevel, o any, text string) { if GetConfig(context.TODO()).UseJSONLog { logJSON(level, o, text) } else { @@ -172,7 +172,7 @@ func LogPrint(level LogLevel, o interface{}, text string) { } // LogPrintf produces a log string from the arguments passed in -func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { +func LogPrintf(level LogLevel, o any, text string, args ...any) { if GetConfig(context.TODO()).UseJSONLog { logJSONf(level, o, text, args...) } else { @@ -181,14 +181,14 @@ func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) } // LogLevelPrint writes logs at the given level -func LogLevelPrint(level LogLevel, o interface{}, text string) { +func LogLevelPrint(level LogLevel, o any, text string) { if GetConfig(context.TODO()).LogLevel >= level { LogPrint(level, o, text) } } // LogLevelPrintf writes logs at the given level -func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { +func LogLevelPrintf(level LogLevel, o any, text string, args ...any) { if GetConfig(context.TODO()).LogLevel >= level { LogPrintf(level, o, text, args...) } @@ -196,7 +196,7 @@ func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interfac // Panic writes alert log output for this Object or Fs and calls panic(). // It should always be seen by the user. -func Panic(o interface{}, text string) { +func Panic(o any, text string) { if GetConfig(context.TODO()).LogLevel >= LogLevelAlert { LogPrint(LogLevelAlert, o, text) } @@ -205,7 +205,7 @@ func Panic(o interface{}, text string) { // Panicf writes alert log output for this Object or Fs and calls panic(). // It should always be seen by the user. -func Panicf(o interface{}, text string, args ...interface{}) { +func Panicf(o any, text string, args ...any) { if GetConfig(context.TODO()).LogLevel >= LogLevelAlert { LogPrintf(LogLevelAlert, o, text, args...) } @@ -214,7 +214,7 @@ func Panicf(o interface{}, text string, args ...interface{}) { // Fatal writes critical log output for this Object or Fs and calls os.Exit(1). // It should always be seen by the user. -func Fatal(o interface{}, text string) { +func Fatal(o any, text string) { if GetConfig(context.TODO()).LogLevel >= LogLevelCritical { LogPrint(LogLevelCritical, o, text) } @@ -223,7 +223,7 @@ func Fatal(o interface{}, text string) { // Fatalf writes critical log output for this Object or Fs and calls os.Exit(1). // It should always be seen by the user. -func Fatalf(o interface{}, text string, args ...interface{}) { +func Fatalf(o any, text string, args ...any) { if GetConfig(context.TODO()).LogLevel >= LogLevelCritical { LogPrintf(LogLevelCritical, o, text, args...) } @@ -232,23 +232,23 @@ func Fatalf(o interface{}, text string, args ...interface{}) { // Error writes error log output for this Object or Fs. It // should always be seen by the user. -func Error(o interface{}, text string) { +func Error(o any, text string) { LogLevelPrint(LogLevelError, o, text) } // Errorf writes error log output for this Object or Fs. It // should always be seen by the user. -func Errorf(o interface{}, text string, args ...interface{}) { +func Errorf(o any, text string, args ...any) { LogLevelPrintf(LogLevelError, o, text, args...) } // Print writes log output for this Object or Fs, same as Logf. -func Print(o interface{}, text string) { +func Print(o any, text string) { LogLevelPrint(LogLevelNotice, o, text) } // Printf writes log output for this Object or Fs, same as Logf. -func Printf(o interface{}, text string, args ...interface{}) { +func Printf(o any, text string, args ...any) { LogLevelPrintf(LogLevelNotice, o, text, args...) } @@ -257,7 +257,7 @@ func Printf(o interface{}, text string, args ...interface{}) { // By default rclone should not log very much so only use this for // important things the user should see. The user can filter these // out with the -q flag. -func Log(o interface{}, text string) { +func Log(o any, text string) { LogLevelPrint(LogLevelNotice, o, text) } @@ -266,7 +266,7 @@ func Log(o interface{}, text string) { // By default rclone should not log very much so only use this for // important things the user should see. The user can filter these // out with the -q flag. -func Logf(o interface{}, text string, args ...interface{}) { +func Logf(o any, text string, args ...any) { LogLevelPrintf(LogLevelNotice, o, text, args...) } @@ -274,32 +274,32 @@ func Logf(o interface{}, text string, args ...interface{}) { // level for logging transfers, deletions and things which should // appear with the -v flag. // There is name class on "Info", hence the name "Infoc", "c" for constant. -func Infoc(o interface{}, text string) { +func Infoc(o any, text string) { LogLevelPrint(LogLevelInfo, o, text) } // Infof writes info on transfers for this Object or Fs. Use this // level for logging transfers, deletions and things which should // appear with the -v flag. -func Infof(o interface{}, text string, args ...interface{}) { +func Infof(o any, text string, args ...any) { LogLevelPrintf(LogLevelInfo, o, text, args...) } // Debug writes debugging output for this Object or Fs. Use this for // debug only. The user must have to specify -vv to see this. -func Debug(o interface{}, text string) { +func Debug(o any, text string) { LogLevelPrint(LogLevelDebug, o, text) } // Debugf writes debugging output for this Object or Fs. Use this for // debug only. The user must have to specify -vv to see this. -func Debugf(o interface{}, text string, args ...interface{}) { +func Debugf(o any, text string, args ...any) { LogLevelPrintf(LogLevelDebug, o, text, args...) } // LogDirName returns an object for the logger, logging a root // directory which would normally be "" as the Fs -func LogDirName(f Fs, dir string) interface{} { +func LogDirName(f Fs, dir string) any { if dir != "" { return dir } diff --git a/fs/log/caller_hook.go b/fs/log/caller_hook.go index 4d760ae30..dd2c5c470 100644 --- a/fs/log/caller_hook.go +++ b/fs/log/caller_hook.go @@ -77,7 +77,7 @@ func (h *CallerHook) Fire(entry *logrus.Entry) error { func findCaller(skip int) string { file := "" line := 0 - for i := 0; i < 10; i++ { + for i := range 10 { file, line = getCaller(skip + i) if !strings.HasPrefix(file, "logrus") && !strings.Contains(file, "log.go") { break diff --git a/fs/log/log.go b/fs/log/log.go index 1400d4e7e..2e4df791b 100644 --- a/fs/log/log.go +++ b/fs/log/log.go @@ -78,13 +78,13 @@ func fnName() string { // function that logs the exit parameters. // // Any pointers in the exit function will be dereferenced -func Trace(o interface{}, format string, a ...interface{}) func(string, ...interface{}) { +func Trace(o any, format string, a ...any) func(string, ...any) { if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug { - return func(format string, a ...interface{}) {} + return func(format string, a ...any) {} } name := fnName() fs.LogPrintf(fs.LogLevelDebug, o, name+": "+format, a...) - return func(format string, a ...interface{}) { + return func(format string, a ...any) { for i := range a { // read the values of the pointed to items typ := reflect.TypeOf(a[i]) @@ -103,7 +103,7 @@ func Trace(o interface{}, format string, a ...interface{}) func(string, ...inter } // Stack logs a stack trace of callers with the o and info passed in -func Stack(o interface{}, info string) { +func Stack(o any, info string) { if fs.GetConfig(context.Background()).LogLevel < fs.LogLevelDebug { return } diff --git a/fs/march/march.go b/fs/march/march.go index 1116220db..685c3c671 100644 --- a/fs/march/march.go +++ b/fs/march/march.go @@ -154,7 +154,7 @@ func (m *March) Run(ctx context.Context) error { var traversing sync.WaitGroup // running directory traversals checkers := ci.Checkers in := make(chan listDirJob, checkers) - for i := 0; i < checkers; i++ { + for range checkers { wg.Add(1) go func() { defer wg.Done() diff --git a/fs/operations/multithread.go b/fs/operations/multithread.go index 582df8445..4f27d6b43 100644 --- a/fs/operations/multithread.go +++ b/fs/operations/multithread.go @@ -73,10 +73,7 @@ func (mc *multiThreadCopyState) copyChunk(ctx context.Context, chunk int, writer if start >= mc.size { return nil } - end := start + mc.partSize - if end > mc.size { - end = mc.size - } + end := min(start+mc.partSize, mc.size) size := end - start fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d (%d-%d) size %v starting", chunk+1, mc.numChunks, start, end, fs.SizeSuffix(size)) @@ -218,7 +215,7 @@ func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, mc.acc = tr.Account(gCtx, nil) fs.Debugf(src, "Starting multi-thread copy with %d chunks of size %v with %v parallel streams", mc.numChunks, fs.SizeSuffix(mc.partSize), concurrency) - for chunk := 0; chunk < mc.numChunks; chunk++ { + for chunk := range mc.numChunks { // Fail fast, in case an errgroup managed function returns an error if gCtx.Err() != nil { break diff --git a/fs/operations/operations.go b/fs/operations/operations.go index df9ea88da..6d3ca4f76 100644 --- a/fs/operations/operations.go +++ b/fs/operations/operations.go @@ -593,7 +593,7 @@ func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, b var errorCount atomic.Int32 var fatalErrorCount atomic.Int32 - for i := 0; i < ci.Checkers; i++ { + for range ci.Checkers { go func() { defer wg.Done() for dst := range toBeDeleted { @@ -732,7 +732,7 @@ func SameDir(fdst, fsrc fs.Info) bool { } // Retry runs fn up to maxTries times if it returns a retriable error -func Retry(ctx context.Context, o interface{}, maxTries int, fn func() error) (err error) { +func Retry(ctx context.Context, o any, maxTries int, fn func() error) (err error) { for tries := 1; tries <= maxTries; tries++ { // Call the function which might error err = fn() @@ -777,7 +777,7 @@ var StdoutMutex sync.Mutex // This writes to stdout holding the StdoutMutex. If you are going to // override it and write to os.Stdout then you should hold the // StdoutMutex too. -var SyncPrintf = func(format string, a ...interface{}) { +var SyncPrintf = func(format string, a ...any) { StdoutMutex.Lock() defer StdoutMutex.Unlock() fmt.Printf(format, a...) @@ -788,7 +788,7 @@ var SyncPrintf = func(format string, a ...interface{}) { // Ignores errors from Fprintf. // // Prints to stdout if w is nil -func SyncFprintf(w io.Writer, format string, a ...interface{}) { +func SyncFprintf(w io.Writer, format string, a ...any) { if w == nil || w == os.Stdout { SyncPrintf(format, a...) } else { @@ -2443,7 +2443,7 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err } renames := make(chan rename, ci.Checkers) g, gCtx := errgroup.WithContext(context.Background()) - for i := 0; i < ci.Checkers; i++ { + for range ci.Checkers { g.Go(func() error { for job := range renames { dstOverwritten, _ := f.NewObject(gCtx, job.newPath) @@ -2551,7 +2551,7 @@ var ( // skipDestructiveChoose asks the user which action to take // // Call with interactiveMu held -func skipDestructiveChoose(ctx context.Context, subject interface{}, action string) (skip bool) { +func skipDestructiveChoose(ctx context.Context, subject any, action string) (skip bool) { // Lock the StdoutMutex - must not call fs.Log anything // otherwise it will deadlock with --interactive --progress StdoutMutex.Lock() @@ -2601,7 +2601,7 @@ func skipDestructiveChoose(ctx context.Context, subject interface{}, action stri // // Together they should make sense in this sentence: "Rclone is about // to action subject". -func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) { +func SkipDestructive(ctx context.Context, subject any, action string) (skip bool) { var flag string ci := fs.GetConfig(ctx) switch { diff --git a/fs/operations/operations_test.go b/fs/operations/operations_test.go index 3f02c1bf1..8e6331156 100644 --- a/fs/operations/operations_test.go +++ b/fs/operations/operations_test.go @@ -1516,7 +1516,7 @@ func TestRcat(t *testing.T) { r.CheckRemoteItems(t, file1, file2) } - for i := 0; i < 4; i++ { + for i := range 4 { withChecksum := (i & 1) != 0 ignoreChecksum := (i & 2) != 0 t.Run(fmt.Sprintf("withChecksum=%v,ignoreChecksum=%v", withChecksum, ignoreChecksum), func(t *testing.T) { diff --git a/fs/operations/rc_test.go b/fs/operations/rc_test.go index 990cb9c6e..2d8d0f2af 100644 --- a/fs/operations/rc_test.go +++ b/fs/operations/rc_test.go @@ -530,12 +530,12 @@ func TestRcFsInfo(t *testing.T) { assert.Equal(t, want.Root, got["Root"]) assert.Equal(t, want.String, got["String"]) assert.Equal(t, float64(want.Precision), got["Precision"]) - var hashes []interface{} + var hashes []any for _, hash := range want.Hashes { hashes = append(hashes, hash) } assert.Equal(t, hashes, got["Hashes"]) - var features = map[string]interface{}{} + var features = map[string]any{} for k, v := range want.Features { features[k] = v } @@ -627,7 +627,7 @@ func TestRcCommand(t *testing.T) { assert.Contains(t, err.Error(), "command not found") return } - want := rc.Params{"result": map[string]interface{}{ + want := rc.Params{"result": map[string]any{ "arg": []string{ "path1", "path2", diff --git a/fs/rc/jobs/job.go b/fs/rc/jobs/job.go index ceca2cf78..26c45514c 100644 --- a/fs/rc/jobs/job.go +++ b/fs/rc/jobs/job.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "runtime/debug" + "slices" "sync" "sync/atomic" "time" @@ -79,7 +80,7 @@ func (job *Job) removeListener(fn *func()) { defer job.mu.Unlock() for i, ln := range job.listeners { if ln == fn { - job.listeners = append(job.listeners[:i], job.listeners[i+1:]...) + job.listeners = slices.Delete(job.listeners, i, i+1) return } } diff --git a/fs/rc/params.go b/fs/rc/params.go index 392fa9f13..ce4e71370 100644 --- a/fs/rc/params.go +++ b/fs/rc/params.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math" "net/http" "strconv" @@ -15,7 +16,7 @@ import ( ) // Params is the input and output type for the Func -type Params map[string]interface{} +type Params map[string]any // ErrParamNotFound - this is returned from the Get* functions if the // parameter isn't found along with a zero value of the requested @@ -70,7 +71,7 @@ func IsErrParamInvalid(err error) bool { // out should be a pointer type // // This isn't a very efficient way of dealing with this! -func Reshape(out interface{}, in interface{}) error { +func Reshape(out any, in any) error { b, err := json.Marshal(in) if err != nil { return fmt.Errorf("Reshape failed to Marshal: %w", err) @@ -85,9 +86,7 @@ func Reshape(out interface{}, in interface{}) error { // Copy shallow copies the Params func (p Params) Copy() (out Params) { out = make(Params, len(p)) - for k, v := range p { - out[k] = v - } + maps.Copy(out, p) return out } @@ -95,7 +94,7 @@ func (p Params) Copy() (out Params) { // // If the parameter isn't found then error will be of type // ErrParamNotFound and the returned value will be nil. -func (p Params) Get(key string) (interface{}, error) { +func (p Params) Get(key string) (any, error) { value, ok := p[key] if !ok { return nil, ErrParamNotFound(key) @@ -241,7 +240,7 @@ func (p Params) GetBool(key string) (bool, error) { // // If the parameter isn't found then error will be of type // ErrParamNotFound and out will be unchanged. -func (p Params) GetStruct(key string, out interface{}) error { +func (p Params) GetStruct(key string, out any) error { value, err := p.Get(key) if err != nil { return err @@ -262,7 +261,7 @@ func (p Params) GetStruct(key string, out interface{}) error { // GetStructMissingOK works like GetStruct but doesn't return an error // if the key is missing -func (p Params) GetStructMissingOK(key string, out interface{}) error { +func (p Params) GetStructMissingOK(key string, out any) error { _, ok := p[key] if !ok { return nil diff --git a/fs/rc/params_test.go b/fs/rc/params_test.go index 26f3153d9..d8b9b7e51 100644 --- a/fs/rc/params_test.go +++ b/fs/rc/params_test.go @@ -109,7 +109,7 @@ func TestParamsGetString(t *testing.T) { func TestParamsGetInt64(t *testing.T) { for _, test := range []struct { - value interface{} + value any result int64 errString string }{ @@ -152,7 +152,7 @@ func TestParamsGetInt64(t *testing.T) { func TestParamsGetFloat64(t *testing.T) { for _, test := range []struct { - value interface{} + value any result float64 errString string }{ @@ -193,7 +193,7 @@ func TestParamsGetFloat64(t *testing.T) { func TestParamsGetDuration(t *testing.T) { for _, test := range []struct { - value interface{} + value any result time.Duration errString string }{ @@ -244,7 +244,7 @@ func TestParamsGetDuration(t *testing.T) { func TestParamsGetBool(t *testing.T) { for _, test := range []struct { - value interface{} + value any result bool errString string }{ diff --git a/fs/rc/rcserver/metrics_test.go b/fs/rc/rcserver/metrics_test.go index 6141cc2a7..0c51134b6 100644 --- a/fs/rc/rcserver/metrics_test.go +++ b/fs/rc/rcserver/metrics_test.go @@ -41,7 +41,7 @@ func TestMetrics(t *testing.T) { // Test changing a couple options stats.Bytes(500) - for i := 0; i < 30; i++ { + for range 30 { require.NoError(t, stats.DeleteFile(context.Background(), 0)) } stats.Errors(2) diff --git a/fs/rc/rcserver/rcserver_test.go b/fs/rc/rcserver/rcserver_test.go index f79a926cf..d339d85b8 100644 --- a/fs/rc/rcserver/rcserver_test.go +++ b/fs/rc/rcserver/rcserver_test.go @@ -69,7 +69,7 @@ func TestRcServer(t *testing.T) { // Do the simplest possible test to check the server is alive // Do it a few times to wait for the server to start var resp *http.Response - for i := 0; i < 10; i++ { + for range 10 { resp, err = http.Get(testURL + "file.txt") if err == nil { break @@ -843,7 +843,7 @@ func TestContentTypeJSON(t *testing.T) { } func normalizeJSON(t *testing.T, jsonStr string) string { - var jsonObj map[string]interface{} + var jsonObj map[string]any err := json.Unmarshal([]byte(jsonStr), &jsonObj) require.NoError(t, err, "JSON unmarshalling failed") normalizedJSON, err := json.Marshal(jsonObj) diff --git a/fs/registry.go b/fs/registry.go index e341bd2d5..628a18afe 100644 --- a/fs/registry.go +++ b/fs/registry.go @@ -8,6 +8,7 @@ import ( "fmt" "reflect" "regexp" + "slices" "sort" "strings" "sync" @@ -195,8 +196,8 @@ type Option struct { Help string // help, start with a single sentence on a single line that will be extracted for command line help Groups string `json:",omitempty"` // groups this option belongs to - comma separated string for options classification Provider string `json:",omitempty"` // set to filter on provider - Default interface{} // default value, nil => "", if set (and not to nil or "") then Required does nothing - Value interface{} // value to be set by flags + Default any // default value, nil => "", if set (and not to nil or "") then Required does nothing + Value any // value to be set by flags Examples OptionExamples `json:",omitempty"` // predefined values that can be selected from list (multiple-choice option) ShortOpt string `json:",omitempty"` // the short option for this if required Hide OptionVisibility // set this to hide the config from the configurator or the command line @@ -232,7 +233,7 @@ func (o *Option) MarshalJSON() ([]byte, error) { } // GetValue gets the current value which is the default if not set -func (o *Option) GetValue() interface{} { +func (o *Option) GetValue() any { val := o.Value if val == nil { val = o.Default @@ -373,7 +374,7 @@ func Register(info *RegInfo) { aliasInfo.Name = alias aliasInfo.Prefix = alias aliasInfo.Hide = true - aliasInfo.Options = append(Options(nil), info.Options...) + aliasInfo.Options = slices.Clone(info.Options) for i := range aliasInfo.Options { aliasInfo.Options[i].Hide = OptionHideBoth } @@ -410,7 +411,7 @@ func MustFind(name string) *RegInfo { // OptionsInfo holds info about an block of options type OptionsInfo struct { Name string // name of this options block for the rc - Opt interface{} // pointer to a struct to set the options in + Opt any // pointer to a struct to set the options in Options Options // description of the options Reload func(context.Context) error // if not nil, call when options changed and on init } diff --git a/fs/sync/pipe.go b/fs/sync/pipe.go index a4a6fa621..124a07780 100644 --- a/fs/sync/pipe.go +++ b/fs/sync/pipe.go @@ -66,12 +66,12 @@ func (p *pipe) Swap(i, j int) { } // Push satisfy heap.Interface - must be called with lock held -func (p *pipe) Push(item interface{}) { +func (p *pipe) Push(item any) { p.queue = append(p.queue, item.(fs.ObjectPair)) } // Pop satisfy heap.Interface - must be called with lock held -func (p *pipe) Pop() interface{} { +func (p *pipe) Pop() any { old := p.queue n := len(old) item := old[n-1] diff --git a/fs/sync/pipe_test.go b/fs/sync/pipe_test.go index 4d9afbe26..94916aa82 100644 --- a/fs/sync/pipe_test.go +++ b/fs/sync/pipe_test.go @@ -114,11 +114,11 @@ func TestPipeConcurrent(t *testing.T) { ctx := context.Background() var count atomic.Int64 - for j := 0; j < readWriters; j++ { + for range readWriters { wg.Add(2) go func() { defer wg.Done() - for i := 0; i < N; i++ { + for range N { // Read from pipe pair2, ok := p.Get(ctx) assert.Equal(t, pair1, pair2) @@ -128,7 +128,7 @@ func TestPipeConcurrent(t *testing.T) { }() go func() { defer wg.Done() - for i := 0; i < N; i++ { + for range N { // Put an object ok := p.Put(ctx, pair1) assert.Equal(t, true, ok) diff --git a/fs/sync/sync.go b/fs/sync/sync.go index e30c86ed2..0e015769e 100644 --- a/fs/sync/sync.go +++ b/fs/sync/sync.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "path" + "slices" "sort" "strings" "sync" @@ -510,7 +511,7 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, // This starts the background checkers. func (s *syncCopyMove) startCheckers() { s.checkerWg.Add(s.ci.Checkers) - for i := 0; i < s.ci.Checkers; i++ { + for i := range s.ci.Checkers { fraction := (100 * i) / s.ci.Checkers go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg) } @@ -526,7 +527,7 @@ func (s *syncCopyMove) stopCheckers() { // This starts the background transfers func (s *syncCopyMove) startTransfers() { s.transfersWg.Add(s.ci.Transfers) - for i := 0; i < s.ci.Transfers; i++ { + for i := range s.ci.Transfers { fraction := (100 * i) / s.ci.Transfers go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg) } @@ -545,7 +546,7 @@ func (s *syncCopyMove) startRenamers() { return } s.renamerWg.Add(s.ci.Checkers) - for i := 0; i < s.ci.Checkers; i++ { + for i := range s.ci.Checkers { fraction := (100 * i) / s.ci.Checkers go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg) } @@ -827,7 +828,7 @@ func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) // Remove the entry and return it dst = dsts[i] - dsts = append(dsts[:i], dsts[i+1:]...) + dsts = slices.Delete(dsts, i, i+1) if len(dsts) > 0 { s.renameMap[hash] = dsts } else { @@ -856,7 +857,7 @@ func (s *syncCopyMove) makeRenameMap() { s.renameMap = make(map[string][]fs.Object) var wg sync.WaitGroup wg.Add(s.ci.Checkers) - for i := 0; i < s.ci.Checkers; i++ { + for range s.ci.Checkers { go func() { defer wg.Done() for obj := range in { diff --git a/fs/sync/sync_test.go b/fs/sync/sync_test.go index 9c24407b7..4c6407190 100644 --- a/fs/sync/sync_test.go +++ b/fs/sync/sync_test.go @@ -2723,7 +2723,7 @@ func testSyncConcurrent(t *testing.T, subtest string) { itemsBefore := []fstest.Item{} itemsAfter := []fstest.Item{} - for i := 0; i < NFILES; i++ { + for i := range NFILES { nameBoth := fmt.Sprintf("both%d", i) nameOnly := fmt.Sprintf("only%d", i) switch subtest { diff --git a/fs/walk/walk.go b/fs/walk/walk.go index 51e567a4f..9e54d4c7f 100644 --- a/fs/walk/walk.go +++ b/fs/walk/walk.go @@ -388,7 +388,7 @@ func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel i }() }) } - for i := 0; i < ci.Checkers; i++ { + for range ci.Checkers { wg.Add(1) go func() { defer wg.Done() diff --git a/fstest/fstests/fstests.go b/fstest/fstests/fstests.go index c6903bc9a..5e1105cfc 100644 --- a/fstest/fstests/fstests.go +++ b/fstest/fstests/fstests.go @@ -17,6 +17,7 @@ import ( "path" "path/filepath" "reflect" + "slices" "sort" "strconv" "strings" @@ -320,12 +321,7 @@ type Opt struct { // returns true if x is found in ss func stringsContains(x string, ss []string) bool { - for _, s := range ss { - if x == s { - return true - } - } - return false + return slices.Contains(ss, x) } // toUpperASCII returns a copy of the string s with all Unicode @@ -484,7 +480,7 @@ func Run(t *testing.T, opt *Opt) { } v := reflect.ValueOf(ft).Elem() vType := v.Type() - for i := 0; i < v.NumField(); i++ { + for i := range v.NumField() { vName := vType.Field(i).Name if stringsContains(vName, opt.UnimplementableFsMethods) { continue @@ -1068,7 +1064,7 @@ func Run(t *testing.T, opt *Opt) { var err error var objs []fs.Object var dirs []fs.Directory - for i := 0; i < 2; i++ { + for range 2 { dir, _ := path.Split(fileName) dir = dir[:len(dir)-1] objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1) @@ -2370,18 +2366,12 @@ func Run(t *testing.T, opt *Opt) { setUploadCutoffer, _ := f.(SetUploadCutoffer) - minChunkSize := opt.ChunkedUpload.MinChunkSize - if minChunkSize < 100 { - minChunkSize = 100 - } + minChunkSize := max(opt.ChunkedUpload.MinChunkSize, 100) if opt.ChunkedUpload.CeilChunkSize != nil { minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize) } - maxChunkSize := 2 * fs.Mebi - if maxChunkSize < 2*minChunkSize { - maxChunkSize = 2 * minChunkSize - } + maxChunkSize := max(2*fs.Mebi, 2*minChunkSize) if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize { maxChunkSize = opt.ChunkedUpload.MaxChunkSize } @@ -2495,10 +2485,7 @@ func Run(t *testing.T, opt *Opt) { t.Skipf("%T does not implement SetCopyCutoff", f) } - minChunkSize := opt.ChunkedUpload.MinChunkSize - if minChunkSize < 100 { - minChunkSize = 100 - } + minChunkSize := max(opt.ChunkedUpload.MinChunkSize, 100) if opt.ChunkedUpload.CeilChunkSize != nil { minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize) } diff --git a/fstest/run.go b/fstest/run.go index df95395c6..4ffb33692 100644 --- a/fstest/run.go +++ b/fstest/run.go @@ -57,7 +57,7 @@ type Run struct { Precision time.Duration cleanRemote func() mkdir map[string]bool // whether the remote has been made yet for the fs name - Logf, Fatalf func(text string, args ...interface{}) + Logf, Fatalf func(text string, args ...any) } // TestMain drives the tests diff --git a/fstest/testserver/testserver.go b/fstest/testserver/testserver.go index 9cc1354d2..64a83ed49 100644 --- a/fstest/testserver/testserver.go +++ b/fstest/testserver/testserver.go @@ -30,7 +30,7 @@ var ( // Assume we are run somewhere within the rclone root func findConfig() (string, error) { dir := filepath.Join("fstest", "testserver", "init.d") - for i := 0; i < 5; i++ { + for range 5 { fi, err := os.Stat(dir) if err == nil && fi.IsDir() { return filepath.Abs(dir) diff --git a/lib/batcher/batcher_test.go b/lib/batcher/batcher_test.go index e1f5d5551..3391bd81b 100644 --- a/lib/batcher/batcher_test.go +++ b/lib/batcher/batcher_test.go @@ -118,7 +118,7 @@ func TestBatcherCommit(t *testing.T) { require.NoError(t, err) defer b.Shutdown() - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) s := fmt.Sprintf("%d", i) go func() { @@ -162,7 +162,7 @@ func TestBatcherCommitFail(t *testing.T) { require.NoError(t, err) defer b.Shutdown() - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) s := fmt.Sprintf("%d", i) go func() { @@ -202,7 +202,7 @@ func TestBatcherCommitShutdown(t *testing.T) { b, err := New[Item, Result](ctx, nil, commitBatch, opt) require.NoError(t, err) - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) s := fmt.Sprintf("%d", i) go func() { @@ -256,7 +256,7 @@ func TestBatcherCommitAsync(t *testing.T) { require.NoError(t, err) defer b.Shutdown() - for i := 0; i < 10; i++ { + for i := range 10 { wg.Add(1) s := fmt.Sprintf("%d", i) go func() { diff --git a/lib/cache/cache.go b/lib/cache/cache.go index 6f841d182..5ef82a046 100644 --- a/lib/cache/cache.go +++ b/lib/cache/cache.go @@ -16,7 +16,7 @@ type Cache struct { expireRunning bool expireDuration time.Duration // expire the cache entry when it is older than this expireInterval time.Duration // interval to run the cache expire - finalize func(value interface{}) + finalize func(value any) } // New creates a new cache with the default expire duration and interval @@ -26,7 +26,7 @@ func New() *Cache { expireRunning: false, expireDuration: 300 * time.Second, expireInterval: 60 * time.Second, - finalize: func(_ interface{}) {}, + finalize: func(_ any) {}, } } @@ -56,17 +56,17 @@ func (c *Cache) SetExpireInterval(d time.Duration) *Cache { // cacheEntry is stored in the cache type cacheEntry struct { - value interface{} // cached item - err error // creation error - key string // key - lastUsed time.Time // time used for expiry - pinCount int // non zero if the entry should not be removed + value any // cached item + err error // creation error + key string // key + lastUsed time.Time // time used for expiry + pinCount int // non zero if the entry should not be removed } // CreateFunc is called to create new values. If the create function // returns an error it will be cached if ok is true, otherwise the // error will just be returned, allowing negative caching if required. -type CreateFunc func(key string) (value interface{}, ok bool, error error) +type CreateFunc func(key string) (value any, ok bool, error error) // used marks an entry as accessed now and kicks the expire timer off // should be called with the lock held @@ -80,7 +80,7 @@ func (c *Cache) used(entry *cacheEntry) { // Get gets a value named key either from the cache or creates it // afresh with the create function. -func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error) { +func (c *Cache) Get(key string, create CreateFunc) (value any, err error) { c.mu.Lock() entry, ok := c.cache[key] if !ok { @@ -125,7 +125,7 @@ func (c *Cache) Unpin(key string) { } // PutErr puts a value named key with err into the cache -func (c *Cache) PutErr(key string, value interface{}, err error) { +func (c *Cache) PutErr(key string, value any, err error) { c.mu.Lock() defer c.mu.Unlock() if c.noCache() { @@ -141,12 +141,12 @@ func (c *Cache) PutErr(key string, value interface{}, err error) { } // Put puts a value named key into the cache -func (c *Cache) Put(key string, value interface{}) { +func (c *Cache) Put(key string, value any) { c.PutErr(key, value, nil) } // GetMaybe returns the key and true if found, nil and false if not -func (c *Cache) GetMaybe(key string) (value interface{}, found bool) { +func (c *Cache) GetMaybe(key string) (value any, found bool) { c.mu.Lock() defer c.mu.Unlock() entry, found := c.cache[key] @@ -192,7 +192,7 @@ func (c *Cache) DeletePrefix(prefix string) (deleted int) { // // If there was an existing item at newKey then it takes precedence // and is returned otherwise the item (if any) at oldKey is returned. -func (c *Cache) Rename(oldKey, newKey string) (value interface{}, found bool) { +func (c *Cache) Rename(oldKey, newKey string) (value any, found bool) { c.mu.Lock() if newEntry, newFound := c.cache[newKey]; newFound { // If new entry is found use that @@ -255,7 +255,7 @@ func (c *Cache) Entries() int { } // SetFinalizer sets a function to be called when a value drops out of the cache -func (c *Cache) SetFinalizer(finalize func(interface{})) { +func (c *Cache) SetFinalizer(finalize func(any)) { c.mu.Lock() c.finalize = finalize c.mu.Unlock() diff --git a/lib/cache/cache_test.go b/lib/cache/cache_test.go index b98dabb54..7bf9322e0 100644 --- a/lib/cache/cache_test.go +++ b/lib/cache/cache_test.go @@ -18,7 +18,7 @@ var ( func setup(t *testing.T) (*Cache, CreateFunc) { called = 0 - create := func(path string) (interface{}, bool, error) { + create := func(path string) (any, bool, error) { assert.Equal(t, 0, called) called++ switch path { @@ -289,7 +289,7 @@ func TestDelete(t *testing.T) { } func TestDeletePrefix(t *testing.T) { - create := func(path string) (interface{}, bool, error) { + create := func(path string) (any, bool, error) { return path, true, nil } c := New() @@ -317,7 +317,7 @@ func TestDeletePrefix(t *testing.T) { func TestCacheRename(t *testing.T) { c := New() - create := func(path string) (interface{}, bool, error) { + create := func(path string) (any, bool, error) { return path, true, nil } @@ -353,10 +353,10 @@ func TestCacheRename(t *testing.T) { func TestCacheFinalize(t *testing.T) { c := New() numCalled := 0 - c.SetFinalizer(func(v interface{}) { + c.SetFinalizer(func(v any) { numCalled++ }) - create := func(path string) (interface{}, bool, error) { + create := func(path string) (any, bool, error) { return path, true, nil } _, _ = c.Get("ok", create) diff --git a/lib/encoder/internal/gen/main.go b/lib/encoder/internal/gen/main.go index dfe556760..1812091e9 100644 --- a/lib/encoder/internal/gen/main.go +++ b/lib/encoder/internal/gen/main.go @@ -6,6 +6,7 @@ import ( "fmt" "math/rand" "os" + "slices" "strconv" "strings" @@ -289,9 +290,9 @@ var testCasesSingleEdge = []testCase{ if j < i { continue } - rIn := append([]rune{}, rIn...) - rOut := append([]rune{}, rOut...) - quoteOut := append([]bool{}, quoteOut...) + rIn := slices.Clone(rIn) + rOut := slices.Clone(rOut) + quoteOut := slices.Clone(quoteOut) for _, in := range []rune{orig, replace} { expect, quote := in, false @@ -398,9 +399,9 @@ var testCasesDoubleEdge = []testCase{ testL := len(rIn) for _, i := range []int{0, testL - 1} { for _, secondOrig := range e2.orig { - rIn := append([]rune{}, rIn...) - rOut := append([]rune{}, rOut...) - quoteOut := append([]bool{}, quoteOut...) + rIn := slices.Clone(rIn) + rOut := slices.Clone(rOut) + quoteOut := slices.Clone(quoteOut) rIn[1], rOut[1], quoteOut[1] = secondOrig, secondOrig, false rIn[testL-2], rOut[testL-2], quoteOut[testL-2] = secondOrig, secondOrig, false @@ -432,18 +433,18 @@ var testCasesDoubleEdge = []testCase{ fatalW(fmt.Fprint(fd, "\n}\n"))("Error writing test case:") } -func fatal(err error, s ...interface{}) { +func fatal(err error, s ...any) { if err != nil { fs.Fatal(nil, fmt.Sprint(append(s, err))) } } -func fatalW(_ int, err error) func(...interface{}) { +func fatalW(_ int, err error) func(...any) { if err != nil { - return func(s ...interface{}) { + return func(s ...any) { fs.Fatal(nil, fmt.Sprint(append(s, err))) } } - return func(s ...interface{}) {} + return func(s ...any) {} } func invalidMask(mask encoder.MultiEncoder) bool { @@ -497,10 +498,7 @@ func buildTestString(mappings, testMappings []mapping, fill ...[]rune) (string, rOut = append(rOut, m.dst...) } inL := len(rIn) - testL := inL * 3 - if testL < 30 { - testL = 30 - } + testL := max(inL*3, 30) rIn = append(rIn, make([]rune, testL-inL)...) rOut = append(rOut, make([]rune, testL-inL)...) quoteOut := make([]bool, testL) @@ -563,7 +561,7 @@ func buildEdgeTestString(edges []edge, testMappings []mapping, fill [][]rune, // populate test strings with values from the `fill` set outer: - for pos := 0; pos < testL; pos++ { + for pos := range testL { m := pos % len(fill) i := rng.Intn(len(fill[m])) r := fill[m][i] diff --git a/lib/http/auth.go b/lib/http/auth.go index 2e2e9f6dd..0088f601e 100644 --- a/lib/http/auth.go +++ b/lib/http/auth.go @@ -66,7 +66,7 @@ Use ` + "`--{{ .Prefix }}salt`" + ` to change the password hashing salt from the // is returned then the user is not authenticated. // // If a non nil value is returned then it is added to the context under the key -type CustomAuthFn func(user, pass string) (value interface{}, err error) +type CustomAuthFn func(user, pass string) (value any, err error) // AuthConfigInfo descripts the Options in use var AuthConfigInfo = fs.Options{{ diff --git a/lib/http/context.go b/lib/http/context.go index cbb551568..e2f56132f 100644 --- a/lib/http/context.go +++ b/lib/http/context.go @@ -43,7 +43,7 @@ func PublicURL(r *http.Request) string { } // CtxGetAuth is a wrapper over the private Auth context key -func CtxGetAuth(ctx context.Context) interface{} { +func CtxGetAuth(ctx context.Context) any { return ctx.Value(ctxKeyAuth) } diff --git a/lib/http/middleware_test.go b/lib/http/middleware_test.go index c0e3911c2..d1f79b725 100644 --- a/lib/http/middleware_test.go +++ b/lib/http/middleware_test.go @@ -78,7 +78,7 @@ func TestMiddlewareAuth(t *testing.T) { }, auth: AuthConfig{ Realm: "test", - CustomAuthFn: func(user, pass string) (value interface{}, err error) { + CustomAuthFn: func(user, pass string) (value any, err error) { if user == "custom" && pass == "custom" { return true, nil } @@ -294,7 +294,7 @@ func TestMiddlewareAuthCertificateUser(t *testing.T) { }, auth: AuthConfig{ Realm: "test", - CustomAuthFn: func(user, pass string) (value interface{}, err error) { + CustomAuthFn: func(user, pass string) (value any, err error) { if user == "custom" && pass == "custom" { return true, nil } @@ -316,7 +316,7 @@ func TestMiddlewareAuthCertificateUser(t *testing.T) { }, auth: AuthConfig{ Realm: "test", - CustomAuthFn: func(user, pass string) (value interface{}, err error) { + CustomAuthFn: func(user, pass string) (value any, err error) { fmt.Println("CUSTOMAUTH", user, pass) if user == "rclone-dev-client" && pass == "" { return true, nil diff --git a/lib/http/serve/dir.go b/lib/http/serve/dir.go index 5a92ddeb8..ae1ead6a0 100644 --- a/lib/http/serve/dir.go +++ b/lib/http/serve/dir.go @@ -125,7 +125,7 @@ func (d *Directory) AddEntry(remote string, isDir bool) { } // Error logs the error and if a ResponseWriter is given it writes an http.StatusInternalServerError -func Error(ctx context.Context, what interface{}, w http.ResponseWriter, text string, err error) { +func Error(ctx context.Context, what any, w http.ResponseWriter, text string, err error) { err = fs.CountError(ctx, err) fs.Errorf(what, "%s: %v", text, err) if w != nil { diff --git a/lib/jwtutil/jwtutil.go b/lib/jwtutil/jwtutil.go index 43ae3f4bb..3bf7e6081 100644 --- a/lib/jwtutil/jwtutil.go +++ b/lib/jwtutil/jwtutil.go @@ -19,6 +19,8 @@ import ( "github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/lib/oauthutil" + "maps" + "golang.org/x/oauth2" ) @@ -32,11 +34,9 @@ func RandomHex(n int) (string, error) { } // Config configures rclone using JWT -func Config(id, name, url string, claims jwt.Claims, headerParams map[string]interface{}, queryParams map[string]string, privateKey *rsa.PrivateKey, m configmap.Mapper, client *http.Client) (err error) { +func Config(id, name, url string, claims jwt.Claims, headerParams map[string]any, queryParams map[string]string, privateKey *rsa.PrivateKey, m configmap.Mapper, client *http.Client) (err error) { jwtToken := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) - for key, value := range headerParams { - jwtToken.Header[key] = value - } + maps.Copy(jwtToken.Header, headerParams) payload, err := jwtToken.SignedString(privateKey) if err != nil { return fmt.Errorf("jwtutil: failed to encode payload: %w", err) diff --git a/lib/kv/internal_test.go b/lib/kv/internal_test.go index 5ff47b1a6..f101f720e 100644 --- a/lib/kv/internal_test.go +++ b/lib/kv/internal_test.go @@ -20,7 +20,7 @@ func TestKvConcurrency(t *testing.T) { ctx := context.Background() results := make([]*DB, threadNum) wg.Add(threadNum) - for i := 0; i < threadNum; i++ { + for i := range threadNum { go func(i int) { db, err := Start(ctx, "test", nil) require.NoError(t, err) @@ -35,11 +35,11 @@ func TestKvConcurrency(t *testing.T) { db := results[0] assert.Equal(t, 1, len(dbMap)) assert.Equal(t, threadNum, db.refs) - for i := 0; i < threadNum; i++ { + for i := range threadNum { assert.Equal(t, db, results[i]) } - for i := 0; i < threadNum; i++ { + for i := range threadNum { assert.Equal(t, 1, len(dbMap)) err := db.Stop(false) assert.NoError(t, err, "unexpected error %v at retry %d", err, i) @@ -54,7 +54,7 @@ func TestKvExit(t *testing.T) { require.Equal(t, 0, len(dbMap), "no databases can be started initially") const dbNum = 5 ctx := context.Background() - for i := 0; i < dbNum; i++ { + for i := range dbNum { facility := fmt.Sprintf("test-%d", i) for j := 0; j <= i; j++ { db, err := Start(ctx, facility, nil) diff --git a/lib/mmap/mmap_test.go b/lib/mmap/mmap_test.go index c36d4aadf..0fb0b797f 100644 --- a/lib/mmap/mmap_test.go +++ b/lib/mmap/mmap_test.go @@ -47,7 +47,7 @@ func BenchmarkAllocFree(b *testing.B) { func BenchmarkAllocFreeWithLotsOfAllocations(b *testing.B) { const size = 4096 alloc := func(n int) (allocs [][]byte) { - for i := 0; i < n; i++ { + for range n { mem := MustAlloc(size) mem[0] ^= 0xFF allocs = append(allocs, mem) @@ -76,7 +76,7 @@ func BenchmarkAllocFreeWithLotsOfAllocations(b *testing.B) { func BenchmarkAllocFreeForLotsOfAllocations(b *testing.B) { const size = 4096 alloc := func(n int) (allocs [][]byte) { - for i := 0; i < n; i++ { + for range n { mem := MustAlloc(size) mem[0] ^= 0xFF allocs = append(allocs, mem) diff --git a/lib/multipart/multipart.go b/lib/multipart/multipart.go index 1545f4ced..3e5cb4de5 100644 --- a/lib/multipart/multipart.go +++ b/lib/multipart/multipart.go @@ -62,10 +62,7 @@ func UploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, opt U } // make concurrency machinery - concurrency := info.Concurrency - if concurrency < 1 { - concurrency = 1 - } + concurrency := max(info.Concurrency, 1) tokens := pacer.NewTokenDispenser(concurrency) uploadCtx, cancel := context.WithCancel(ctx) diff --git a/lib/pacer/pacer.go b/lib/pacer/pacer.go index 6cfcb84d6..67f3ffece 100644 --- a/lib/pacer/pacer.go +++ b/lib/pacer/pacer.go @@ -112,7 +112,7 @@ func (p *Pacer) SetMaxConnections(n int) { p.connTokens = nil } else { p.connTokens = make(chan struct{}, n) - for i := 0; i < n; i++ { + for range n { p.connTokens <- struct{}{} } } diff --git a/lib/pacer/pacer_test.go b/lib/pacer/pacer_test.go index aac3cfb89..144298123 100644 --- a/lib/pacer/pacer_test.go +++ b/lib/pacer/pacer_test.go @@ -199,7 +199,7 @@ func TestGoogleDrivePacer(t *testing.T) { const n = 1000 var sum time.Duration // measure average time over n cycles - for i := 0; i < n; i++ { + for range n { c := NewGoogleDrive(MinSleep(1 * time.Millisecond)) sum += c.Calculate(test.state) } @@ -220,7 +220,7 @@ func TestGoogleDrivePacer(t *testing.T) { } { c := NewGoogleDrive(MinSleep(minSleep), Burst(10)) count := 0 - for i := 0; i < test.calls; i++ { + for range test.calls { sleep := c.Calculate(State{}) if sleep != 0 { count++ diff --git a/lib/pacer/pacers.go b/lib/pacer/pacers.go index 960d0469f..45a3f6095 100644 --- a/lib/pacer/pacers.go +++ b/lib/pacer/pacers.go @@ -97,10 +97,7 @@ func (c *Default) Calculate(state State) time.Duration { } return sleepTime } - sleepTime := (state.SleepTime<> c.decayConstant - if sleepTime < c.minSleep { - sleepTime = c.minSleep - } + sleepTime := max((state.SleepTime<>c.decayConstant, c.minSleep) return sleepTime } @@ -286,10 +283,7 @@ func (c *S3) Calculate(state State) time.Duration { if state.SleepTime == 0 { return c.minSleep } - sleepTime := (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1) - if sleepTime > c.maxSleep { - sleepTime = c.maxSleep - } + sleepTime := min((state.SleepTime<> c.decayConstant diff --git a/lib/pacer/tokens.go b/lib/pacer/tokens.go index d1e1e7cea..b32e65648 100644 --- a/lib/pacer/tokens.go +++ b/lib/pacer/tokens.go @@ -13,7 +13,7 @@ func NewTokenDispenser(n int) *TokenDispenser { tokens: make(chan struct{}, n), } // Fill up the upload tokens - for i := 0; i < n; i++ { + for range n { td.tokens <- struct{}{} } return td diff --git a/lib/pool/pool.go b/lib/pool/pool.go index 4cb4d3a71..a6484b7cc 100644 --- a/lib/pool/pool.go +++ b/lib/pool/pool.go @@ -82,7 +82,7 @@ func (bp *Pool) put(buf []byte) { // flush n entries from the entire buffer pool // Call with mu held func (bp *Pool) flush(n int) { - for i := 0; i < n; i++ { + for range n { bp.freeBuffer(bp.get()) } bp.minFill = len(bp.cache) diff --git a/lib/pool/pool_test.go b/lib/pool/pool_test.go index 90b0e03fa..72ac42a8b 100644 --- a/lib/pool/pool_test.go +++ b/lib/pool/pool_test.go @@ -118,7 +118,7 @@ func testFlusher(t *testing.T, useMmap bool, unreliable bool) { checkFlushHasHappened := func(desired int) { var n int - for i := 0; i < 10; i++ { + for range 10 { time.Sleep(100 * time.Millisecond) n = bp.InPool() if n <= desired { diff --git a/lib/pool/reader_writer_test.go b/lib/pool/reader_writer_test.go index d51c70b58..bd6a91631 100644 --- a/lib/pool/reader_writer_test.go +++ b/lib/pool/reader_writer_test.go @@ -355,10 +355,7 @@ func (r *testReader) Read(p []byte) (n int, err error) { if len(r.data) == 0 { return 0, io.EOF } - chunkSize := r.chunkSize - if chunkSize > len(r.data) { - chunkSize = len(r.data) - } + chunkSize := min(r.chunkSize, len(r.data)) n = copy(p, r.data[:chunkSize]) r.data = r.data[n:] return n, nil @@ -405,10 +402,7 @@ func TestRWBoundaryConditions(t *testing.T) { write := func(rw *RW, data []byte, chunkSize int) { writeData := data for len(writeData) > 0 { - i := chunkSize - if i > len(writeData) { - i = len(writeData) - } + i := min(chunkSize, len(writeData)) nn, err := rw.Write(writeData[:i]) assert.NoError(t, err) assert.Equal(t, len(writeData[:i]), nn) diff --git a/lib/random/random_test.go b/lib/random/random_test.go index 1be57dda6..78944f74b 100644 --- a/lib/random/random_test.go +++ b/lib/random/random_test.go @@ -8,7 +8,7 @@ import ( ) func TestStringLength(t *testing.T) { - for i := 0; i < 100; i++ { + for i := range 100 { s := String(i) assert.Equal(t, i, len(s)) } @@ -16,7 +16,7 @@ func TestStringLength(t *testing.T) { func TestStringDuplicates(t *testing.T) { seen := map[string]bool{} - for i := 0; i < 100; i++ { + for range 100 { s := String(8) assert.False(t, seen[s]) assert.Equal(t, 8, len(s)) @@ -41,7 +41,7 @@ func TestPasswordLength(t *testing.T) { func TestPasswordDuplicates(t *testing.T) { seen := map[string]bool{} - for i := 0; i < 100; i++ { + for range 100 { s, err := Password(64) require.NoError(t, err) assert.False(t, seen[s]) diff --git a/lib/ranges/ranges_test.go b/lib/ranges/ranges_test.go index f71322ac3..c172add29 100644 --- a/lib/ranges/ranges_test.go +++ b/lib/ranges/ranges_test.go @@ -3,6 +3,7 @@ package ranges import ( "fmt" "math/rand" + "slices" "testing" "github.com/stretchr/testify/assert" @@ -139,7 +140,7 @@ func checkRanges(t *testing.T, rs Ranges, what string) bool { return true } ok := true - for i := 0; i < len(rs)-1; i++ { + for i := range len(rs) - 1 { a := rs[i] b := rs[i+1] if a.Pos >= b.Pos { @@ -207,7 +208,7 @@ func TestRangeCoalesce(t *testing.T) { i: 1, }, } { - got := append(Ranges{}, test.rs...) + got := slices.Clone(test.rs) got.coalesce(test.i) what := fmt.Sprintf("test rs=%v, i=%d", test.rs, test.i) assert.Equal(t, test.want, got, what) @@ -224,7 +225,7 @@ func TestRangeInsert(t *testing.T) { { new: Range{Pos: 1, Size: 0}, rs: Ranges{}, - want: Ranges(nil), + want: Ranges{}, }, { new: Range{Pos: 1, Size: 1}, // .N....... @@ -269,7 +270,7 @@ func TestRangeInsert(t *testing.T) { want: Ranges{{38, 8}, {51, 12}}, }, } { - got := append(Ranges(nil), test.rs...) + got := slices.Clone(test.rs) got.Insert(test.new) what := fmt.Sprintf("test new=%v, rs=%v", test.new, test.rs) assert.Equal(t, test.want, got, what) @@ -279,9 +280,9 @@ func TestRangeInsert(t *testing.T) { } func TestRangeInsertRandom(t *testing.T) { - for i := 0; i < 100; i++ { + for range 100 { var rs Ranges - for j := 0; j < 100; j++ { + for range 100 { var r = Range{ Pos: rand.Int63n(100), Size: rand.Int63n(10) + 1, diff --git a/lib/rest/rest.go b/lib/rest/rest.go index a6a94070d..2557b68c3 100644 --- a/lib/rest/rest.go +++ b/lib/rest/rest.go @@ -11,6 +11,7 @@ import ( "errors" "fmt" "io" + "maps" "mime/multipart" "net/http" "net/url" @@ -186,14 +187,14 @@ func checkDrainAndClose(r io.ReadCloser, err *error) { } // DecodeJSON decodes resp.Body into result -func DecodeJSON(resp *http.Response, result interface{}) (err error) { +func DecodeJSON(resp *http.Response, result any) (err error) { defer checkDrainAndClose(resp.Body, &err) decoder := json.NewDecoder(resp.Body) return decoder.Decode(result) } // DecodeXML decodes resp.Body into result -func DecodeXML(resp *http.Response, result interface{}) (err error) { +func DecodeXML(resp *http.Response, result any) (err error) { defer checkDrainAndClose(resp.Body, &err) decoder := xml.NewDecoder(resp.Body) // MEGAcmd has included escaped HTML entities in its XML output, so we have to be able to @@ -286,9 +287,7 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e } headers := make(map[string]string) // Set default headers - for k, v := range api.headers { - headers[k] = v - } + maps.Copy(headers, api.headers) if opts.ContentType != "" { headers["Content-Type"] = opts.ContentType } @@ -311,9 +310,7 @@ func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, e req.Close = true } // Set any extra headers - for k, v := range opts.ExtraHeaders { - headers[k] = v - } + maps.Copy(headers, opts.ExtraHeaders) // add any options to the headers fs.OpenOptionAddHeaders(opts.Options, headers) // Now set the headers @@ -492,7 +489,7 @@ func MultipartUpload(ctx context.Context, in io.Reader, params url.Values, conte // parameter name MultipartMetadataName. // // It will return resp if at all possible, even if err is set -func (api *Client) CallJSON(ctx context.Context, opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) { +func (api *Client) CallJSON(ctx context.Context, opts *Opts, request any, response any) (resp *http.Response, err error) { return api.callCodec(ctx, opts, request, response, json.Marshal, DecodeJSON, "application/json") } @@ -509,14 +506,14 @@ func (api *Client) CallJSON(ctx context.Context, opts *Opts, request interface{} // See CallJSON for a description of MultipartParams and related opts. // // It will return resp if at all possible, even if err is set -func (api *Client) CallXML(ctx context.Context, opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) { +func (api *Client) CallXML(ctx context.Context, opts *Opts, request any, response any) (resp *http.Response, err error) { return api.callCodec(ctx, opts, request, response, xml.Marshal, DecodeXML, "application/xml") } -type marshalFn func(v interface{}) ([]byte, error) -type decodeFn func(resp *http.Response, result interface{}) (err error) +type marshalFn func(v any) ([]byte, error) +type decodeFn func(resp *http.Response, result any) (err error) -func (api *Client) callCodec(ctx context.Context, opts *Opts, request interface{}, response interface{}, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) { +func (api *Client) callCodec(ctx context.Context, opts *Opts, request any, response any, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) { var requestBody []byte // Marshal the request if given if request != nil { diff --git a/lib/structs/structs.go b/lib/structs/structs.go index 4bbc1a738..f4ad54441 100644 --- a/lib/structs/structs.go +++ b/lib/structs/structs.go @@ -17,12 +17,12 @@ import ( // This is useful for copying between almost identical structures that // are frequently present in auto-generated code for cloud storage // interfaces. -func SetFrom(a, b interface{}) { +func SetFrom(a, b any) { ta := reflect.TypeOf(a).Elem() tb := reflect.TypeOf(b).Elem() va := reflect.ValueOf(a).Elem() vb := reflect.ValueOf(b).Elem() - for i := 0; i < tb.NumField(); i++ { + for i := range tb.NumField() { bField := vb.Field(i) tbField := tb.Field(i) name := tbField.Name @@ -41,12 +41,12 @@ func SetFrom(a, b interface{}) { // This copies the public members only from b to a. This is useful if // you can't just use a struct copy because it contains a private // mutex, e.g. as http.Transport. -func SetDefaults(a, b interface{}) { +func SetDefaults(a, b any) { pt := reflect.TypeOf(a) t := pt.Elem() va := reflect.ValueOf(a).Elem() vb := reflect.ValueOf(b).Elem() - for i := 0; i < t.NumField(); i++ { + for i := range t.NumField() { aField := va.Field(i) // Set a from b if it is public if aField.CanSet() { diff --git a/lib/structs/structs_test.go b/lib/structs/structs_test.go index 7f742c5c9..fe13b19ed 100644 --- a/lib/structs/structs_test.go +++ b/lib/structs/structs_test.go @@ -9,7 +9,7 @@ import ( ) // returns the "%p" representation of the thing passed in -func ptr(p interface{}) string { +func ptr(p any) string { return fmt.Sprintf("%p", p) } diff --git a/vfs/dir.go b/vfs/dir.go index a4a31c4b6..d54e01e92 100644 --- a/vfs/dir.go +++ b/vfs/dir.go @@ -181,12 +181,12 @@ func (d *Dir) Path() (name string) { } // Sys returns underlying data source (can be nil) - satisfies Node interface -func (d *Dir) Sys() interface{} { +func (d *Dir) Sys() any { return d.sys.Load() } // SetSys sets the underlying data source (can be nil) - satisfies Node interface -func (d *Dir) SetSys(x interface{}) { +func (d *Dir) SetSys(x any) { d.sys.Store(x) } diff --git a/vfs/dir_test.go b/vfs/dir_test.go index 5a4b63e04..7bcbba0f6 100644 --- a/vfs/dir_test.go +++ b/vfs/dir_test.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "runtime" + "slices" "sort" "testing" "time" @@ -317,7 +318,7 @@ func TestDirReadDirAll(t *testing.T) { features := r.Fremote.Features() if features.CanHaveEmptyDirectories { // snip out virtualDir2 which will only be present if can't have empty dirs - want = append(want[:2], want[3:]...) + want = slices.Delete(want, 2, 3) } checkListing(t, dir, want) diff --git a/vfs/file.go b/vfs/file.go index b94e3dbb4..76aa03b7d 100644 --- a/vfs/file.go +++ b/vfs/file.go @@ -12,6 +12,8 @@ import ( "sync/atomic" "time" + "slices" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/log" "github.com/rclone/rclone/fs/operations" @@ -181,12 +183,12 @@ func (f *File) CachePath() string { } // Sys returns underlying data source (can be nil) - satisfies Node interface -func (f *File) Sys() interface{} { +func (f *File) Sys() any { return f.sys.Load() } // SetSys sets the underlying data source (can be nil) - satisfies Node interface -func (f *File) SetSys(x interface{}) { +func (f *File) SetSys(x any) { f.sys.Store(x) } @@ -343,7 +345,7 @@ func (f *File) delWriter(h Handle) { } } if found >= 0 { - f.writers = append(f.writers[:found], f.writers[found+1:]...) + f.writers = slices.Delete(f.writers, found, found+1) f.nwriters.Add(-1) } else { fs.Debugf(f._path(), "File.delWriter couldn't find handle") @@ -579,7 +581,7 @@ func (f *File) exists() bool { // // Call without the mutex held func (f *File) waitForValidObject() (o fs.Object, err error) { - for i := 0; i < 50; i++ { + for range 50 { f.mu.RLock() o = f.o nwriters := len(f.writers) @@ -752,7 +754,7 @@ const MaxSymlinkIterations = 32 func (f *File) resolveNode() (target Node, err error) { defer log.Trace(f.Path(), "")("target=%v, err=%v", &target, &err) seen := make(map[string]struct{}) - for tries := 0; tries < MaxSymlinkIterations; tries++ { + for range MaxSymlinkIterations { // If f isn't a symlink, we've arrived at the target if !f.IsSymlink() { return f, nil diff --git a/vfs/rc.go b/vfs/rc.go index c2ad3caca..d780048bd 100644 --- a/vfs/rc.go +++ b/vfs/rc.go @@ -228,7 +228,7 @@ func rcForget(ctx context.Context, in rc.Params) (out rc.Params, err error) { return out, nil } -func getDuration(k string, v interface{}) (time.Duration, error) { +func getDuration(k string, v any) (time.Duration, error) { s, ok := v.(string) if !ok { return 0, fmt.Errorf("value must be string %q=%v", k, v) @@ -278,7 +278,7 @@ func getStatus(vfs *VFS, in rc.Params) (out rc.Params, err error) { return rc.Params{ "enabled": vfs.Opt.PollInterval != 0, "supported": vfs.pollChan != nil, - "interval": map[string]interface{}{ + "interval": map[string]any{ "raw": vfs.Opt.PollInterval, "seconds": time.Duration(vfs.Opt.PollInterval) / time.Second, "string": vfs.Opt.PollInterval.String(), diff --git a/vfs/read.go b/vfs/read.go index aa2b6555a..777946f26 100644 --- a/vfs/read.go +++ b/vfs/read.go @@ -265,10 +265,7 @@ func (fh *ReadFileHandle) readAt(p []byte, off int64) (n int, err error) { fs.Errorf(fh.remote, "ReadFileHandle.Read error: %v", EBADF) return 0, ECLOSED } - maxBuf := 1024 * 1024 - if len(p) < maxBuf { - maxBuf = len(p) - } + maxBuf := min(len(p), 1024*1024) if gap := off - fh.offset; gap > 0 && gap < int64(8*maxBuf) { waitSequential("read", fh.remote, &fh.cond, time.Duration(fh.file.VFS().Opt.ReadWait), &fh.offset, off) } diff --git a/vfs/read_write_test.go b/vfs/read_write_test.go index e89e9a5e2..5b30ed675 100644 --- a/vfs/read_write_test.go +++ b/vfs/read_write_test.go @@ -732,7 +732,7 @@ func TestRWCacheUpdate(t *testing.T) { const filename = "TestRWCacheUpdate" modTime := time.Now().Add(-time.Hour) - for i := 0; i < 10; i++ { + for i := range 10 { modTime = modTime.Add(time.Minute) // Refresh test file contents := fmt.Sprintf("TestRWCacheUpdate%03d", i) diff --git a/vfs/test_vfs/test_vfs.go b/vfs/test_vfs/test_vfs.go index 19d57a670..a38f72e67 100644 --- a/vfs/test_vfs/test_vfs.go +++ b/vfs/test_vfs/test_vfs.go @@ -91,14 +91,14 @@ func (t *Test) randomTest() { } // logf logs things - not shown unless -v -func (t *Test) logf(format string, a ...interface{}) { +func (t *Test) logf(format string, a ...any) { if *verbose { fs.Logf(nil, t.prefix+format, a) } } // errorf logs errors -func (t *Test) errorf(format string, a ...interface{}) { +func (t *Test) errorf(format string, a ...any) { fs.Logf(nil, t.prefix+"ERROR: "+format, a) } @@ -267,7 +267,7 @@ func (t *Test) Tidy() { func (t *Test) RandomTests(iterations int, quit chan struct{}) { var finished = make(chan struct{}) go func() { - for i := 0; i < iterations; i++ { + for range iterations { t.randomTest() } close(finished) @@ -295,7 +295,7 @@ func main() { wg sync.WaitGroup quit = make(chan struct{}, *iterations) ) - for i := 0; i < *number; i++ { + for range *number { wg.Add(1) go func() { defer wg.Done() diff --git a/vfs/vfs.go b/vfs/vfs.go index 8907a6e50..01124ecc7 100644 --- a/vfs/vfs.go +++ b/vfs/vfs.go @@ -33,6 +33,8 @@ import ( "sync/atomic" "time" + "slices" + "github.com/go-git/go-billy/v5" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/cache" @@ -65,7 +67,7 @@ type Node interface { Open(flags int) (Handle, error) Truncate(size int64) error Path() string - SetSys(interface{}) + SetSys(any) } // Check interfaces @@ -358,7 +360,7 @@ func (vfs *VFS) Shutdown() { for i, activeVFS := range activeVFSes { if activeVFS == vfs { activeVFSes[i] = nil - active[configName] = append(activeVFSes[:i], activeVFSes[i+1:]...) + active[configName] = slices.Delete(activeVFSes, i, i+1) break } } diff --git a/vfs/vfscache/cache_test.go b/vfs/vfscache/cache_test.go index 327962cf2..8c5510f6d 100644 --- a/vfs/vfscache/cache_test.go +++ b/vfs/vfscache/cache_test.go @@ -640,7 +640,7 @@ func TestCacheCleaner(t *testing.T) { assert.Equal(t, fmt.Sprintf("%p", potato), fmt.Sprintf("%p", potato2)) assert.True(t, found) - for i := 0; i < 100; i++ { + for range 100 { time.Sleep(time.Duration(10 * opt.CachePollInterval)) potato2, found = c.get("potato") if !found { diff --git a/vfs/vfscache/item.go b/vfs/vfscache/item.go index b76688db1..66adc9b66 100644 --- a/vfs/vfscache/item.go +++ b/vfs/vfscache/item.go @@ -494,7 +494,7 @@ func (item *Item) _createFile(osPath string) (err error) { // Open the local file from the object passed in. Wraps open() // to provide recovery from out of space error. func (item *Item) Open(o fs.Object) (err error) { - for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ { + for range fs.GetConfig(context.TODO()).LowLevelRetries { item.preAccess() err = item.open(o) item.postAccess() @@ -1246,7 +1246,7 @@ func (item *Item) GetModTime() (modTime time.Time, err error) { func (item *Item) ReadAt(b []byte, off int64) (n int, err error) { n = 0 var expBackOff int - for retries := 0; retries < fs.GetConfig(context.TODO()).LowLevelRetries; retries++ { + for retries := range fs.GetConfig(context.TODO()).LowLevelRetries { item.preAccess() n, err = item.readAt(b, off) item.postAccess() diff --git a/vfs/vfscache/item_test.go b/vfs/vfscache/item_test.go index fa5840e01..048261b62 100644 --- a/vfs/vfscache/item_test.go +++ b/vfs/vfscache/item_test.go @@ -529,10 +529,7 @@ func TestItemReadWrite(t *testing.T) { assert.False(t, item.present()) for !item.present() { blockSize := rand.Intn(len(buf)) - offset := rand.Int63n(size+2*int64(blockSize)) - int64(blockSize) - if offset < 0 { - offset = 0 - } + offset := max(rand.Int63n(size+2*int64(blockSize))-int64(blockSize), 0) _, _ = readCheck(t, item, offset, blockSize) } require.NoError(t, item.Close(nil)) @@ -544,7 +541,7 @@ func TestItemReadWrite(t *testing.T) { require.NoError(t, item.Open(obj)) assert.False(t, item.present()) var wg sync.WaitGroup - for i := 0; i < 8; i++ { + for range 8 { wg.Add(1) go func() { defer wg.Done() @@ -553,10 +550,7 @@ func TestItemReadWrite(t *testing.T) { buf2 := make([]byte, 1024*1024) for !item.present() { blockSize := rand.Intn(len(buf)) - offset := rand.Int63n(size+2*int64(blockSize)) - int64(blockSize) - if offset < 0 { - offset = 0 - } + offset := max(rand.Int63n(size+2*int64(blockSize))-int64(blockSize), 0) _, _ = readCheckBuf(t, in, buf, buf2, item, offset, blockSize) } }() diff --git a/vfs/vfscache/writeback/writeback.go b/vfs/vfscache/writeback/writeback.go index b3c022253..fe7b9ed62 100644 --- a/vfs/vfscache/writeback/writeback.go +++ b/vfs/vfscache/writeback/writeback.go @@ -97,14 +97,14 @@ func (ws writeBackItems) Swap(i, j int) { ws[j].index = j } -func (ws *writeBackItems) Push(x interface{}) { +func (ws *writeBackItems) Push(x any) { n := len(*ws) item := x.(*writeBackItem) item.index = n *ws = append(*ws, item) } -func (ws *writeBackItems) Pop() interface{} { +func (ws *writeBackItems) Pop() any { old := *ws n := len(old) item := old[n-1] @@ -227,10 +227,7 @@ func (wb *WriteBack) _resetTimer() { return } wb.expiry = wbItem.expiry - dt := time.Until(wbItem.expiry) - if dt < 0 { - dt = 0 - } + dt := max(time.Until(wbItem.expiry), 0) // fs.Debugf(nil, "resetTimer dt=%v", dt) if wb.timer != nil { wb.timer.Stop() diff --git a/vfs/vfscache/writeback/writeback_test.go b/vfs/vfscache/writeback/writeback_test.go index 9bbf90f0d..87c067627 100644 --- a/vfs/vfscache/writeback/writeback_test.go +++ b/vfs/vfscache/writeback/writeback_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "slices" + "github.com/rclone/rclone/fs" "github.com/rclone/rclone/vfs/vfscommon" "github.com/stretchr/testify/assert" @@ -85,10 +87,8 @@ func checkOnHeap(t *testing.T, wb *WriteBack, wbItem *writeBackItem) { wb.mu.Lock() defer wb.mu.Unlock() assert.True(t, wbItem.onHeap) - for i := range wb.items { - if wb.items[i] == wbItem { - return - } + if slices.Contains(wb.items, wbItem) { + return } assert.Failf(t, "expecting %q on heap", wbItem.name) } @@ -275,7 +275,7 @@ func (pi *putItem) finish(err error) { } func waitUntilNoTransfers(t *testing.T, wb *WriteBack) { - for i := 0; i < 100; i++ { + for range 100 { wb.mu.Lock() uploads := wb.uploads wb.mu.Unlock() @@ -601,7 +601,7 @@ func TestWriteBackMaxQueue(t *testing.T) { // put toTransfer things in the queue pis := []*putItem{} - for i := 0; i < toTransfer; i++ { + for range toTransfer { pi := newPutItem(t) pis = append(pis, pi) wb.Add(0, fmt.Sprintf("number%d", 1), 10, true, pi.put) @@ -612,7 +612,7 @@ func TestWriteBackMaxQueue(t *testing.T) { assert.Equal(t, 0, inProgress) // now start the first maxTransfers - this should stop the timer - for i := 0; i < maxTransfers; i++ { + for i := range maxTransfers { <-pis[i].started } @@ -624,7 +624,7 @@ func TestWriteBackMaxQueue(t *testing.T) { assert.Equal(t, maxTransfers, inProgress) // now finish the first maxTransfers - for i := 0; i < maxTransfers; i++ { + for i := range maxTransfers { pis[i].finish(nil) } diff --git a/vfs/vfstest/fs.go b/vfs/vfstest/fs.go index 944ff4a35..c9db4fa19 100644 --- a/vfs/vfstest/fs.go +++ b/vfs/vfstest/fs.go @@ -364,7 +364,7 @@ func (r *Run) rm(t *testing.T, filepath string) { require.NoError(t, err) // Wait for file to disappear from listing - for i := 0; i < 100; i++ { + for range 100 { _, err := r.os.Stat(filepath) if os.IsNotExist(err) { return diff --git a/vfs/vfstest/read.go b/vfs/vfstest/read.go index 6b34a771a..518d5021f 100644 --- a/vfs/vfstest/read.go +++ b/vfs/vfstest/read.go @@ -15,10 +15,10 @@ func TestReadByByte(t *testing.T) { run.createFile(t, "testfile", string(data)) run.checkDir(t, "testfile 10") - for i := 0; i < len(data); i++ { + for i := range data { fd, err := run.os.Open(run.path("testfile")) assert.NoError(t, err) - for j := 0; j < i; j++ { + for j := range i { buf := make([]byte, 1) n, err := io.ReadFull(fd, buf) assert.NoError(t, err)