mirror of
https://github.com/rclone/rclone.git
synced 2025-06-25 14:31:39 +02:00
build: modernize Go usage
This commit modernizes Go usage. This was done with: go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./... Then files needed to be `go fmt`ed and a few comments needed to be restored. The modernizations include replacing - if/else conditional assignment by a call to the built-in min or max functions added in go1.21 - sort.Slice(x, func(i, j int) bool) { return s[i] < s[j] } by a call to slices.Sort(s), added in go1.21 - interface{} by the 'any' type added in go1.18 - append([]T(nil), s...) by slices.Clone(s) or slices.Concat(s), added in go1.21 - loop around an m[k]=v map update by a call to one of the Collect, Copy, Clone, or Insert functions from the maps package, added in go1.21 - []byte(fmt.Sprintf...) by fmt.Appendf(nil, ...), added in go1.19 - append(s[:i], s[i+1]...) by slices.Delete(s, i, i+1), added in go1.21 - a 3-clause for i := 0; i < n; i++ {} loop by for i := range n {}, added in go1.22
This commit is contained in:
parent
431386085f
commit
401cf81034
@ -19,6 +19,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -681,10 +682,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
statusCode := storageErr.StatusCode
|
statusCode := storageErr.StatusCode
|
||||||
for _, e := range retryErrorCodes {
|
if slices.Contains(retryErrorCodes, statusCode) {
|
||||||
if statusCode == e {
|
return true, err
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fserrors.ShouldRetry(err), err
|
return fserrors.ShouldRetry(err), err
|
||||||
|
@ -61,7 +61,7 @@ const chars = "abcdefghijklmnopqrstuvwzyxABCDEFGHIJKLMNOPQRSTUVWZYX"
|
|||||||
|
|
||||||
func randomString(charCount int) string {
|
func randomString(charCount int) string {
|
||||||
strBldr := strings.Builder{}
|
strBldr := strings.Builder{}
|
||||||
for i := 0; i < charCount; i++ {
|
for range charCount {
|
||||||
randPos := rand.Int63n(52)
|
randPos := rand.Int63n(52)
|
||||||
strBldr.WriteByte(chars[randPos])
|
strBldr.WriteByte(chars[randPos])
|
||||||
}
|
}
|
||||||
|
@ -130,10 +130,10 @@ type AuthorizeAccountResponse struct {
|
|||||||
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
AbsoluteMinimumPartSize int `json:"absoluteMinimumPartSize"` // The smallest possible size of a part of a large file.
|
||||||
AccountID string `json:"accountId"` // The identifier for the account.
|
AccountID string `json:"accountId"` // The identifier for the account.
|
||||||
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
Allowed struct { // An object (see below) containing the capabilities of this auth token, and any restrictions on using it.
|
||||||
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
BucketID string `json:"bucketId"` // When present, access is restricted to one bucket.
|
||||||
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
BucketName string `json:"bucketName"` // When present, name of bucket - may be empty
|
||||||
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
Capabilities []string `json:"capabilities"` // A list of strings, each one naming a capability the key has.
|
||||||
NamePrefix interface{} `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
NamePrefix any `json:"namePrefix"` // When present, access is restricted to files whose names start with the prefix
|
||||||
} `json:"allowed"`
|
} `json:"allowed"`
|
||||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||||
|
@ -16,6 +16,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -589,12 +590,7 @@ func (f *Fs) authorizeAccount(ctx context.Context) error {
|
|||||||
|
|
||||||
// hasPermission returns if the current AuthorizationToken has the selected permission
|
// hasPermission returns if the current AuthorizationToken has the selected permission
|
||||||
func (f *Fs) hasPermission(permission string) bool {
|
func (f *Fs) hasPermission(permission string) bool {
|
||||||
for _, capability := range f.info.Allowed.Capabilities {
|
return slices.Contains(f.info.Allowed.Capabilities, permission)
|
||||||
if capability == permission {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||||
@ -1275,7 +1271,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden b
|
|||||||
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
toBeDeleted := make(chan *api.File, f.ci.Transfers)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(f.ci.Transfers)
|
wg.Add(f.ci.Transfers)
|
||||||
for i := 0; i < f.ci.Transfers; i++ {
|
for range f.ci.Transfers {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for object := range toBeDeleted {
|
for object := range toBeDeleted {
|
||||||
@ -1939,7 +1935,7 @@ func init() {
|
|||||||
// urlEncode encodes in with % encoding
|
// urlEncode encodes in with % encoding
|
||||||
func urlEncode(in string) string {
|
func urlEncode(in string) string {
|
||||||
var out bytes.Buffer
|
var out bytes.Buffer
|
||||||
for i := 0; i < len(in); i++ {
|
for i := range len(in) {
|
||||||
c := in[i]
|
c := in[i]
|
||||||
if noNeedToEncode[c] {
|
if noNeedToEncode[c] {
|
||||||
_ = out.WriteByte(c)
|
_ = out.WriteByte(c)
|
||||||
@ -2260,7 +2256,7 @@ See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
var newRule api.LifecycleRule
|
var newRule api.LifecycleRule
|
||||||
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
|
||||||
days, err := strconv.Atoi(daysStr)
|
days, err := strconv.Atoi(daysStr)
|
||||||
@ -2349,7 +2345,7 @@ Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
maxAge := defaultMaxAge
|
maxAge := defaultMaxAge
|
||||||
if opt["max-age"] != "" {
|
if opt["max-age"] != "" {
|
||||||
maxAge, err = fs.ParseDuration(opt["max-age"])
|
maxAge, err = fs.ParseDuration(opt["max-age"])
|
||||||
@ -2372,7 +2368,7 @@ it would do.
|
|||||||
`,
|
`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
return nil, f.cleanUp(ctx, true, false, 0)
|
return nil, f.cleanUp(ctx, true, false, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2391,7 +2387,7 @@ var commandHelp = []fs.CommandHelp{
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "lifecycle":
|
case "lifecycle":
|
||||||
return f.lifecycleCommand(ctx, name, arg, opt)
|
return f.lifecycleCommand(ctx, name, arg, opt)
|
||||||
|
@ -478,17 +478,14 @@ func (up *largeUpload) Copy(ctx context.Context) (err error) {
|
|||||||
remaining = up.size
|
remaining = up.size
|
||||||
)
|
)
|
||||||
g.SetLimit(up.f.opt.UploadConcurrency)
|
g.SetLimit(up.f.opt.UploadConcurrency)
|
||||||
for part := 0; part < up.parts; part++ {
|
for part := range up.parts {
|
||||||
// Fail fast, in case an errgroup managed function returns an error
|
// Fail fast, in case an errgroup managed function returns an error
|
||||||
// gCtx is cancelled. There is no point in copying all the other parts.
|
// gCtx is cancelled. There is no point in copying all the other parts.
|
||||||
if gCtx.Err() != nil {
|
if gCtx.Err() != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := min(remaining, up.chunkSize)
|
||||||
if reqSize >= up.chunkSize {
|
|
||||||
reqSize = up.chunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
part := part // for the closure
|
part := part // for the closure
|
||||||
g.Go(func() (err error) {
|
g.Go(func() (err error) {
|
||||||
|
@ -237,8 +237,8 @@ func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *boxCustomC
|
|||||||
return claims, nil
|
return claims, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]interface{} {
|
func getSigningHeaders(boxConfig *api.ConfigJSON) map[string]any {
|
||||||
signingHeaders := map[string]interface{}{
|
signingHeaders := map[string]any{
|
||||||
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
"kid": boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||||
}
|
}
|
||||||
return signingHeaders
|
return signingHeaders
|
||||||
@ -1343,12 +1343,8 @@ func (f *Fs) changeNotifyRunner(ctx context.Context, notifyFunc func(string, fs.
|
|||||||
nextStreamPosition = streamPosition
|
nextStreamPosition = streamPosition
|
||||||
|
|
||||||
for {
|
for {
|
||||||
limit := f.opt.ListChunk
|
|
||||||
|
|
||||||
// box only allows a max of 500 events
|
// box only allows a max of 500 events
|
||||||
if limit > 500 {
|
limit := min(f.opt.ListChunk, 500)
|
||||||
limit = 500
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: "GET",
|
Method: "GET",
|
||||||
|
@ -105,7 +105,7 @@ func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api
|
|||||||
const defaultDelay = 10
|
const defaultDelay = 10
|
||||||
var tries int
|
var tries int
|
||||||
outer:
|
outer:
|
||||||
for tries = 0; tries < maxTries; tries++ {
|
for tries = range maxTries {
|
||||||
err = o.fs.pacer.Call(func() (bool, error) {
|
err = o.fs.pacer.Call(func() (bool, error) {
|
||||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -203,7 +203,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, direct
|
|||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
outer:
|
outer:
|
||||||
for part := 0; part < session.TotalParts; part++ {
|
for part := range session.TotalParts {
|
||||||
// Check any errors
|
// Check any errors
|
||||||
select {
|
select {
|
||||||
case err = <-errs:
|
case err = <-errs:
|
||||||
@ -211,10 +211,7 @@ outer:
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
reqSize := remaining
|
reqSize := min(remaining, chunkSize)
|
||||||
if reqSize >= chunkSize {
|
|
||||||
reqSize = chunkSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make a block of memory
|
// Make a block of memory
|
||||||
buf := make([]byte, reqSize)
|
buf := make([]byte, reqSize)
|
||||||
|
8
backend/cache/cache.go
vendored
8
backend/cache/cache.go
vendored
@ -1092,7 +1092,7 @@ func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) er
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(entries); i++ {
|
for i := range entries {
|
||||||
innerDir, ok := entries[i].(fs.Directory)
|
innerDir, ok := entries[i].(fs.Directory)
|
||||||
if ok {
|
if ok {
|
||||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||||
@ -1428,7 +1428,7 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// wait until both are done
|
// wait until both are done
|
||||||
for c := 0; c < 2; c++ {
|
for range 2 {
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1753,7 +1753,7 @@ func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns stats about the cache storage
|
// Stats returns stats about the cache storage
|
||||||
func (f *Fs) Stats() (map[string]map[string]interface{}, error) {
|
func (f *Fs) Stats() (map[string]map[string]any, error) {
|
||||||
return f.cache.Stats()
|
return f.cache.Stats()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1933,7 +1933,7 @@ var commandHelp = []fs.CommandHelp{
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "stats":
|
case "stats":
|
||||||
return f.Stats()
|
return f.Stats()
|
||||||
|
16
backend/cache/cache_internal_test.go
vendored
16
backend/cache/cache_internal_test.go
vendored
@ -360,7 +360,7 @@ func TestInternalWrappedWrittenContentMatches(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, int64(len(checkSample)), o.Size())
|
require.Equal(t, int64(len(checkSample)), o.Size())
|
||||||
|
|
||||||
for i := 0; i < len(checkSample); i++ {
|
for i := range checkSample {
|
||||||
require.Equal(t, testData[i], checkSample[i])
|
require.Equal(t, testData[i], checkSample[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -387,7 +387,7 @@ func TestInternalLargeWrittenContentMatches(t *testing.T) {
|
|||||||
|
|
||||||
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
readData, err := runInstance.readDataFromRemote(t, rootFs, "data.bin", 0, testSize, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for i := 0; i < len(readData); i++ {
|
for i := range readData {
|
||||||
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
require.Equalf(t, testData[i], readData[i], "at byte %v", i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -688,7 +688,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
|||||||
co, ok := o.(*cache.Object)
|
co, ok := o.(*cache.Object)
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
for i := 0; i < 4; i++ { // read first 4
|
for i := range 4 { // read first 4
|
||||||
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
_ = runInstance.readDataFromObj(t, co, chunkSize*int64(i), chunkSize*int64(i+1), false)
|
||||||
}
|
}
|
||||||
cfs.CleanUpCache(true)
|
cfs.CleanUpCache(true)
|
||||||
@ -971,7 +971,7 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
|||||||
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
f, err := os.CreateTemp("", "rclonecache-tempfile")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for i := 0; i < int(cnt); i++ {
|
for range int(cnt) {
|
||||||
data := randStringBytes(int(chunk))
|
data := randStringBytes(int(chunk))
|
||||||
_, _ = f.Write(data)
|
_, _ = f.Write(data)
|
||||||
}
|
}
|
||||||
@ -1085,9 +1085,9 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error) {
|
func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]any, error) {
|
||||||
var err error
|
var err error
|
||||||
var l []interface{}
|
var l []any
|
||||||
var list fs.DirEntries
|
var list fs.DirEntries
|
||||||
list, err = f.List(context.Background(), remote)
|
list, err = f.List(context.Background(), remote)
|
||||||
for _, ll := range list {
|
for _, ll := range list {
|
||||||
@ -1215,7 +1215,7 @@ func (r *run) listenForBackgroundUpload(t *testing.T, f fs.Fs, remote string) ch
|
|||||||
var err error
|
var err error
|
||||||
var state cache.BackgroundUploadState
|
var state cache.BackgroundUploadState
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
select {
|
select {
|
||||||
case state = <-buCh:
|
case state = <-buCh:
|
||||||
// continue
|
// continue
|
||||||
@ -1293,7 +1293,7 @@ func (r *run) completeAllBackgroundUploads(t *testing.T, f fs.Fs, lastRemote str
|
|||||||
|
|
||||||
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
func (r *run) retryBlock(block func() error, maxRetries int, rate time.Duration) error {
|
||||||
var err error
|
var err error
|
||||||
for i := 0; i < maxRetries; i++ {
|
for range maxRetries {
|
||||||
err = block()
|
err = block()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
|
2
backend/cache/cache_upload_test.go
vendored
2
backend/cache/cache_upload_test.go
vendored
@ -162,7 +162,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
|||||||
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
randInstance := rand.New(rand.NewSource(time.Now().Unix()))
|
||||||
|
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for i := 0; i < totalFiles; i++ {
|
for i := range totalFiles {
|
||||||
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
size := int64(randInstance.Intn(maxSize-minSize) + minSize)
|
||||||
testReader := runInstance.randomReader(t, size)
|
testReader := runInstance.randomReader(t, size)
|
||||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||||
|
4
backend/cache/handle.go
vendored
4
backend/cache/handle.go
vendored
@ -182,7 +182,7 @@ func (r *Handle) queueOffset(offset int64) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < r.workers; i++ {
|
for i := range r.workers {
|
||||||
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
o := r.preloadOffset + int64(r.cacheFs().opt.ChunkSize)*int64(i)
|
||||||
if o < 0 || o >= r.cachedObject.Size() {
|
if o < 0 || o >= r.cachedObject.Size() {
|
||||||
continue
|
continue
|
||||||
@ -222,7 +222,7 @@ func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
|||||||
if !found {
|
if !found {
|
||||||
// we're gonna give the workers a chance to pickup the chunk
|
// we're gonna give the workers a chance to pickup the chunk
|
||||||
// and retry a couple of times
|
// and retry a couple of times
|
||||||
for i := 0; i < r.cacheFs().opt.ReadRetries*8; i++ {
|
for i := range r.cacheFs().opt.ReadRetries * 8 {
|
||||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
found = true
|
found = true
|
||||||
|
8
backend/cache/plex.go
vendored
8
backend/cache/plex.go
vendored
@ -209,7 +209,7 @@ func (p *plexConnector) authenticate() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var data map[string]interface{}
|
var data map[string]any
|
||||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to obtain token: %w", err)
|
return fmt.Errorf("failed to obtain token: %w", err)
|
||||||
@ -273,11 +273,11 @@ func (p *plexConnector) isPlaying(co *Object) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
func get(m any, path ...any) (any, bool) {
|
||||||
for _, p := range path {
|
for _, p := range path {
|
||||||
switch idx := p.(type) {
|
switch idx := p.(type) {
|
||||||
case string:
|
case string:
|
||||||
if mm, ok := m.(map[string]interface{}); ok {
|
if mm, ok := m.(map[string]any); ok {
|
||||||
if val, found := mm[idx]; found {
|
if val, found := mm[idx]; found {
|
||||||
m = val
|
m = val
|
||||||
continue
|
continue
|
||||||
@ -285,7 +285,7 @@ func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
|||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
case int:
|
case int:
|
||||||
if mm, ok := m.([]interface{}); ok {
|
if mm, ok := m.([]any); ok {
|
||||||
if len(mm) > idx {
|
if len(mm) > idx {
|
||||||
m = mm[idx]
|
m = mm[idx]
|
||||||
continue
|
continue
|
||||||
|
8
backend/cache/storage_persistent.go
vendored
8
backend/cache/storage_persistent.go
vendored
@ -607,16 +607,16 @@ func (b *Persistent) CleanChunksBySize(maxSize int64) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Stats returns a go map with the stats key values
|
// Stats returns a go map with the stats key values
|
||||||
func (b *Persistent) Stats() (map[string]map[string]interface{}, error) {
|
func (b *Persistent) Stats() (map[string]map[string]any, error) {
|
||||||
r := make(map[string]map[string]interface{})
|
r := make(map[string]map[string]any)
|
||||||
r["data"] = make(map[string]interface{})
|
r["data"] = make(map[string]any)
|
||||||
r["data"]["oldest-ts"] = time.Now()
|
r["data"]["oldest-ts"] = time.Now()
|
||||||
r["data"]["oldest-file"] = ""
|
r["data"]["oldest-file"] = ""
|
||||||
r["data"]["newest-ts"] = time.Now()
|
r["data"]["newest-ts"] = time.Now()
|
||||||
r["data"]["newest-file"] = ""
|
r["data"]["newest-file"] = ""
|
||||||
r["data"]["total-chunks"] = 0
|
r["data"]["total-chunks"] = 0
|
||||||
r["data"]["total-size"] = int64(0)
|
r["data"]["total-size"] = int64(0)
|
||||||
r["files"] = make(map[string]interface{})
|
r["files"] = make(map[string]any)
|
||||||
r["files"]["oldest-ts"] = time.Now()
|
r["files"]["oldest-ts"] = time.Now()
|
||||||
r["files"]["oldest-name"] = ""
|
r["files"]["oldest-name"] = ""
|
||||||
r["files"]["newest-ts"] = time.Now()
|
r["files"]["newest-ts"] = time.Now()
|
||||||
|
@ -632,7 +632,7 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
|
|||||||
|
|
||||||
// forbidChunk prints error message or raises error if file is chunk.
|
// forbidChunk prints error message or raises error if file is chunk.
|
||||||
// First argument sets log prefix, use `false` to suppress message.
|
// First argument sets log prefix, use `false` to suppress message.
|
||||||
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
|
func (f *Fs) forbidChunk(o any, filePath string) error {
|
||||||
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
|
||||||
if f.opt.FailHard {
|
if f.opt.FailHard {
|
||||||
return fmt.Errorf("chunk overlap with %q", parentPath)
|
return fmt.Errorf("chunk overlap with %q", parentPath)
|
||||||
@ -680,7 +680,7 @@ func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err
|
|||||||
circleSec := unixSec % closestPrimeZzzzSeconds
|
circleSec := unixSec % closestPrimeZzzzSeconds
|
||||||
first4chars := strconv.FormatInt(circleSec, 36)
|
first4chars := strconv.FormatInt(circleSec, 36)
|
||||||
|
|
||||||
for tries := 0; tries < maxTransactionProbes; tries++ {
|
for range maxTransactionProbes {
|
||||||
f.xactIDMutex.Lock()
|
f.xactIDMutex.Lock()
|
||||||
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
|
||||||
f.xactIDMutex.Unlock()
|
f.xactIDMutex.Unlock()
|
||||||
@ -1189,10 +1189,7 @@ func (f *Fs) put(
|
|||||||
}
|
}
|
||||||
|
|
||||||
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
|
||||||
size := c.sizeLeft
|
size := min(c.sizeLeft, c.chunkSize)
|
||||||
if size > c.chunkSize {
|
|
||||||
size = c.chunkSize
|
|
||||||
}
|
|
||||||
savedReadCount := c.readCount
|
savedReadCount := c.readCount
|
||||||
|
|
||||||
// If a single chunk is expected, avoid the extra rename operation
|
// If a single chunk is expected, avoid the extra rename operation
|
||||||
@ -1477,10 +1474,7 @@ func (c *chunkingReader) dummyRead(in io.Reader, size int64) error {
|
|||||||
const bufLen = 1048576 // 1 MiB
|
const bufLen = 1048576 // 1 MiB
|
||||||
buf := make([]byte, bufLen)
|
buf := make([]byte, bufLen)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
n := size
|
n := min(size, bufLen)
|
||||||
if n > bufLen {
|
|
||||||
n = bufLen
|
|
||||||
}
|
|
||||||
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
if _, err := io.ReadFull(in, buf[0:n]); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
type settings map[string]interface{}
|
type settings map[string]any
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, path string, opts settings) fs.Fs {
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||||
|
@ -192,7 +192,7 @@ func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bo
|
|||||||
dirNameEncrypt: dirNameEncrypt,
|
dirNameEncrypt: dirNameEncrypt,
|
||||||
encryptedSuffix: ".bin",
|
encryptedSuffix: ".bin",
|
||||||
}
|
}
|
||||||
c.buffers.New = func() interface{} {
|
c.buffers.New = func() any {
|
||||||
return new([blockSize]byte)
|
return new([blockSize]byte)
|
||||||
}
|
}
|
||||||
err := c.Key(password, salt)
|
err := c.Key(password, salt)
|
||||||
@ -336,7 +336,7 @@ func (c *Cipher) obfuscateSegment(plaintext string) string {
|
|||||||
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
_, _ = result.WriteString(strconv.Itoa(dir) + ".")
|
||||||
|
|
||||||
// but we'll augment it with the nameKey for real calculation
|
// but we'll augment it with the nameKey for real calculation
|
||||||
for i := 0; i < len(c.nameKey); i++ {
|
for i := range len(c.nameKey) {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,7 +418,7 @@ func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// add the nameKey to get the real rotate distance
|
// add the nameKey to get the real rotate distance
|
||||||
for i := 0; i < len(c.nameKey); i++ {
|
for i := range len(c.nameKey) {
|
||||||
dir += int(c.nameKey[i])
|
dir += int(c.nameKey[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -664,7 +664,7 @@ func (n *nonce) increment() {
|
|||||||
// add a uint64 to the nonce
|
// add a uint64 to the nonce
|
||||||
func (n *nonce) add(x uint64) {
|
func (n *nonce) add(x uint64) {
|
||||||
carry := uint16(0)
|
carry := uint16(0)
|
||||||
for i := 0; i < 8; i++ {
|
for i := range 8 {
|
||||||
digit := (*n)[i]
|
digit := (*n)[i]
|
||||||
xDigit := byte(x)
|
xDigit := byte(x)
|
||||||
x >>= 8
|
x >>= 8
|
||||||
|
@ -1307,10 +1307,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
|||||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||||
end := len(ciphertext)
|
end := len(ciphertext)
|
||||||
if underlyingLimit >= 0 {
|
if underlyingLimit >= 0 {
|
||||||
end = int(underlyingOffset + underlyingLimit)
|
end = min(int(underlyingOffset+underlyingLimit), len(ciphertext))
|
||||||
if end > len(ciphertext) {
|
|
||||||
end = len(ciphertext)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
reader = io.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||||
return reader, nil
|
return reader, nil
|
||||||
@ -1490,7 +1487,7 @@ func TestDecrypterRead(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Test truncating the file at each possible point
|
// Test truncating the file at each possible point
|
||||||
for i := 0; i < len(file16)-1; i++ {
|
for i := range len(file16) - 1 {
|
||||||
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
what := fmt.Sprintf("truncating to %d/%d", i, len(file16))
|
||||||
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
cd := newCloseDetector(bytes.NewBuffer(file16[:i]))
|
||||||
fh, err := c.newDecrypter(cd)
|
fh, err := c.newDecrypter(cd)
|
||||||
|
@ -924,7 +924,7 @@ Usage Example:
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "decode":
|
case "decode":
|
||||||
out := make([]string, 0, len(arg))
|
out := make([]string, 0, len(arg))
|
||||||
|
@ -25,7 +25,7 @@ func Pad(n int, buf []byte) []byte {
|
|||||||
}
|
}
|
||||||
length := len(buf)
|
length := len(buf)
|
||||||
padding := n - (length % n)
|
padding := n - (length % n)
|
||||||
for i := 0; i < padding; i++ {
|
for range padding {
|
||||||
buf = append(buf, byte(padding))
|
buf = append(buf, byte(padding))
|
||||||
}
|
}
|
||||||
if (len(buf) % n) != 0 {
|
if (len(buf) % n) != 0 {
|
||||||
@ -54,7 +54,7 @@ func Unpad(n int, buf []byte) ([]byte, error) {
|
|||||||
if padding == 0 {
|
if padding == 0 {
|
||||||
return nil, ErrorPaddingTooShort
|
return nil, ErrorPaddingTooShort
|
||||||
}
|
}
|
||||||
for i := 0; i < padding; i++ {
|
for i := range padding {
|
||||||
if buf[length-1-i] != byte(padding) {
|
if buf[length-1-i] != byte(padding) {
|
||||||
return nil, ErrorPaddingNotAllTheSame
|
return nil, ErrorPaddingNotAllTheSame
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -199,12 +200,7 @@ func driveScopes(scopesString string) (scopes []string) {
|
|||||||
|
|
||||||
// Returns true if one of the scopes was "drive.appfolder"
|
// Returns true if one of the scopes was "drive.appfolder"
|
||||||
func driveScopesContainsAppFolder(scopes []string) bool {
|
func driveScopesContainsAppFolder(scopes []string) bool {
|
||||||
for _, scope := range scopes {
|
return slices.Contains(scopes, scopePrefix+"drive.appfolder")
|
||||||
if scope == scopePrefix+"drive.appfolder" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func driveOAuthOptions() []fs.Option {
|
func driveOAuthOptions() []fs.Option {
|
||||||
@ -958,12 +954,7 @@ func parseDrivePath(path string) (root string, err error) {
|
|||||||
type listFn func(*drive.File) bool
|
type listFn func(*drive.File) bool
|
||||||
|
|
||||||
func containsString(slice []string, s string) bool {
|
func containsString(slice []string, s string) bool {
|
||||||
for _, e := range slice {
|
return slices.Contains(slice, s)
|
||||||
if e == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getFile returns drive.File for the ID passed and fields passed in
|
// getFile returns drive.File for the ID passed and fields passed in
|
||||||
@ -1152,13 +1143,7 @@ OUTER:
|
|||||||
// Check the case of items is correct since
|
// Check the case of items is correct since
|
||||||
// the `=` operator is case insensitive.
|
// the `=` operator is case insensitive.
|
||||||
if title != "" && title != item.Name {
|
if title != "" && title != item.Name {
|
||||||
found := false
|
found := slices.Contains(stems, item.Name)
|
||||||
for _, stem := range stems {
|
|
||||||
if stem == item.Name {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
if !found {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1561,13 +1546,10 @@ func (f *Fs) getFileFields(ctx context.Context) (fields googleapi.Field) {
|
|||||||
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
func (f *Fs) newRegularObject(ctx context.Context, remote string, info *drive.File) (obj fs.Object, err error) {
|
||||||
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
|
||||||
if f.opt.SkipChecksumGphotos {
|
if f.opt.SkipChecksumGphotos {
|
||||||
for _, space := range info.Spaces {
|
if slices.Contains(info.Spaces, "photos") {
|
||||||
if space == "photos" {
|
info.Md5Checksum = ""
|
||||||
info.Md5Checksum = ""
|
info.Sha1Checksum = ""
|
||||||
info.Sha1Checksum = ""
|
info.Sha256Checksum = ""
|
||||||
info.Sha256Checksum = ""
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
o := &Object{
|
o := &Object{
|
||||||
@ -2245,7 +2227,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
in <- listREntry{directoryID, dir}
|
in <- listREntry{directoryID, dir}
|
||||||
|
|
||||||
for i := 0; i < f.ci.Checkers; i++ {
|
for range f.ci.Checkers {
|
||||||
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
go f.listRRunner(ctx, &wg, in, out, cb, sendJob)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
@ -2254,11 +2236,8 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
// if the input channel overflowed add the collected entries to the channel now
|
// if the input channel overflowed add the collected entries to the channel now
|
||||||
for len(overflow) > 0 {
|
for len(overflow) > 0 {
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
l := len(overflow)
|
|
||||||
// only fill half of the channel to prevent entries being put into overflow again
|
// only fill half of the channel to prevent entries being put into overflow again
|
||||||
if l > listRInputBuffer/2 {
|
l := min(len(overflow), listRInputBuffer/2)
|
||||||
l = listRInputBuffer / 2
|
|
||||||
}
|
|
||||||
wg.Add(l)
|
wg.Add(l)
|
||||||
for _, d := range overflow[:l] {
|
for _, d := range overflow[:l] {
|
||||||
in <- d
|
in <- d
|
||||||
@ -2278,7 +2257,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
|
|||||||
mu.Unlock()
|
mu.Unlock()
|
||||||
}()
|
}()
|
||||||
// wait until the all workers to finish
|
// wait until the all workers to finish
|
||||||
for i := 0; i < f.ci.Checkers; i++ {
|
for range f.ci.Checkers {
|
||||||
e := <-out
|
e := <-out
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
// if one worker returns an error early, close the input so all other workers exit
|
// if one worker returns an error early, close the input so all other workers exit
|
||||||
@ -3914,7 +3893,7 @@ Third delete all orphaned files to the trash
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "get":
|
case "get":
|
||||||
out := make(map[string]string)
|
out := make(map[string]string)
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -324,9 +325,7 @@ func (o *baseObject) parseMetadata(ctx context.Context, info *drive.File) (err e
|
|||||||
metadata := make(fs.Metadata, 16)
|
metadata := make(fs.Metadata, 16)
|
||||||
|
|
||||||
// Dump user metadata first as it overrides system metadata
|
// Dump user metadata first as it overrides system metadata
|
||||||
for k, v := range info.Properties {
|
maps.Copy(metadata, info.Properties)
|
||||||
metadata[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
// System metadata
|
// System metadata
|
||||||
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
metadata["copy-requires-writer-permission"] = fmt.Sprint(info.CopyRequiresWriterPermission)
|
||||||
|
@ -177,10 +177,7 @@ func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
|||||||
if start >= rx.ContentLength {
|
if start >= rx.ContentLength {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
reqSize = rx.ContentLength - start
|
reqSize = min(rx.ContentLength-start, int64(rx.f.opt.ChunkSize))
|
||||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
|
||||||
reqSize = int64(rx.f.opt.ChunkSize)
|
|
||||||
}
|
|
||||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||||
} else {
|
} else {
|
||||||
// If size unknown read into buffer
|
// If size unknown read into buffer
|
||||||
|
@ -55,10 +55,7 @@ func (d *digest) Write(p []byte) (n int, err error) {
|
|||||||
n = len(p)
|
n = len(p)
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
d.writtenMore = true
|
d.writtenMore = true
|
||||||
toWrite := bytesPerBlock - d.n
|
toWrite := min(bytesPerBlock-d.n, len(p))
|
||||||
if toWrite > len(p) {
|
|
||||||
toWrite = len(p)
|
|
||||||
}
|
|
||||||
_, err = d.blockHash.Write(p[:toWrite])
|
_, err = d.blockHash.Write(p[:toWrite])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(hashReturnedError)
|
panic(hashReturnedError)
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
func testChunk(t *testing.T, chunk int) {
|
func testChunk(t *testing.T, chunk int) {
|
||||||
data := make([]byte, chunk)
|
data := make([]byte, chunk)
|
||||||
for i := 0; i < chunk; i++ {
|
for i := range chunk {
|
||||||
data[i] = 'A'
|
data[i] = 'A'
|
||||||
}
|
}
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
|
@ -216,11 +216,11 @@ var ItemFields = mustFields(Item{})
|
|||||||
|
|
||||||
// fields returns the JSON fields in use by opt as a | separated
|
// fields returns the JSON fields in use by opt as a | separated
|
||||||
// string.
|
// string.
|
||||||
func fields(opt interface{}) (pipeTags string, err error) {
|
func fields(opt any) (pipeTags string, err error) {
|
||||||
var tags []string
|
var tags []string
|
||||||
def := reflect.ValueOf(opt)
|
def := reflect.ValueOf(opt)
|
||||||
defType := def.Type()
|
defType := def.Type()
|
||||||
for i := 0; i < def.NumField(); i++ {
|
for i := range def.NumField() {
|
||||||
field := defType.Field(i)
|
field := defType.Field(i)
|
||||||
tag, ok := field.Tag.Lookup("json")
|
tag, ok := field.Tag.Lookup("json")
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -239,7 +239,7 @@ func fields(opt interface{}) (pipeTags string, err error) {
|
|||||||
|
|
||||||
// mustFields returns the JSON fields in use by opt as a | separated
|
// mustFields returns the JSON fields in use by opt as a | separated
|
||||||
// string. It panics on failure.
|
// string. It panics on failure.
|
||||||
func mustFields(opt interface{}) string {
|
func mustFields(opt any) string {
|
||||||
tags, err := fields(opt)
|
tags, err := fields(opt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -351,12 +351,12 @@ type SpaceInfo struct {
|
|||||||
// DeleteResponse is returned from doDeleteFile
|
// DeleteResponse is returned from doDeleteFile
|
||||||
type DeleteResponse struct {
|
type DeleteResponse struct {
|
||||||
Status
|
Status
|
||||||
Deleted []string `json:"deleted"`
|
Deleted []string `json:"deleted"`
|
||||||
Errors []interface{} `json:"errors"`
|
Errors []any `json:"errors"`
|
||||||
ID string `json:"fi_id"`
|
ID string `json:"fi_id"`
|
||||||
BackgroundTask int `json:"backgroundtask"`
|
BackgroundTask int `json:"backgroundtask"`
|
||||||
UsSize string `json:"us_size"`
|
UsSize string `json:"us_size"`
|
||||||
PaSize string `json:"pa_size"`
|
PaSize string `json:"pa_size"`
|
||||||
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
//SpaceInfo SpaceInfo `json:"spaceinfo"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,7 +371,7 @@ func (f *Fs) getToken(ctx context.Context) (token string, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// params for rpc
|
// params for rpc
|
||||||
type params map[string]interface{}
|
type params map[string]any
|
||||||
|
|
||||||
// rpc calls the rpc.php method of the SME file fabric
|
// rpc calls the rpc.php method of the SME file fabric
|
||||||
//
|
//
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -169,11 +170,9 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
if apiErr, ok := err.(files_sdk.ResponseError); ok {
|
||||||
for _, e := range retryErrorCodes {
|
if slices.Contains(retryErrorCodes, apiErr.HttpCode) {
|
||||||
if apiErr.HttpCode == e {
|
fs.Debugf(nil, "Retrying API error %v", err)
|
||||||
fs.Debugf(nil, "Retrying API error %v", err)
|
return true, err
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
type settings map[string]interface{}
|
type settings map[string]any
|
||||||
|
|
||||||
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
func deriveFs(ctx context.Context, t *testing.T, f fs.Fs, opts settings) fs.Fs {
|
||||||
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
fsName := strings.Split(f.Name(), "{")[0] // strip off hash
|
||||||
|
@ -4,6 +4,7 @@ package googlephotos
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -119,7 +120,7 @@ func (as *albums) _del(album *api.Album) {
|
|||||||
dirs := as.path[dir]
|
dirs := as.path[dir]
|
||||||
for i, dir := range dirs {
|
for i, dir := range dirs {
|
||||||
if dir == leaf {
|
if dir == leaf {
|
||||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
dirs = slices.Delete(dirs, i, i+1)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -388,7 +388,7 @@ func (f *Fs) fetchEndpoint(ctx context.Context, name string) (endpoint string, e
|
|||||||
Method: "GET",
|
Method: "GET",
|
||||||
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
RootURL: "https://accounts.google.com/.well-known/openid-configuration",
|
||||||
}
|
}
|
||||||
var openIDconfig map[string]interface{}
|
var openIDconfig map[string]any
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
resp, err := f.unAuth.CallJSON(ctx, &opts, nil, &openIDconfig)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
@ -448,7 +448,7 @@ func (f *Fs) Disconnect(ctx context.Context) (err error) {
|
|||||||
"token_type_hint": []string{"access_token"},
|
"token_type_hint": []string{"access_token"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
var res interface{}
|
var res any
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
resp, err := f.srv.CallJSON(ctx, &opts, nil, &res)
|
||||||
return shouldRetry(ctx, resp, err)
|
return shouldRetry(ctx, resp, err)
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "drop":
|
case "drop":
|
||||||
return nil, f.db.Stop(true)
|
return nil, f.db.Stop(true)
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"encoding/gob"
|
"encoding/gob"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -195,9 +196,7 @@ func (op *kvPut) Do(ctx context.Context, b kv.Bucket) (err error) {
|
|||||||
r.Fp = op.fp
|
r.Fp = op.fp
|
||||||
}
|
}
|
||||||
|
|
||||||
for hashType, hashVal := range op.hashes {
|
maps.Copy(r.Hashes, op.hashes)
|
||||||
r.Hashes[hashType] = hashVal
|
|
||||||
}
|
|
||||||
if data, err = r.encode(op.key); err != nil {
|
if data, err = r.encode(op.key); err != nil {
|
||||||
return fmt.Errorf("marshal failed: %w", err)
|
return fmt.Errorf("marshal failed: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -52,10 +52,7 @@ func writeByBlock(p []byte, writer io.Writer, blockSize uint32, bytesInBlock *ui
|
|||||||
total := len(p)
|
total := len(p)
|
||||||
nullBytes := make([]byte, blockSize)
|
nullBytes := make([]byte, blockSize)
|
||||||
for len(p) > 0 {
|
for len(p) > 0 {
|
||||||
toWrite := int(blockSize - *bytesInBlock)
|
toWrite := min(int(blockSize-*bytesInBlock), len(p))
|
||||||
if toWrite > len(p) {
|
|
||||||
toWrite = len(p)
|
|
||||||
}
|
|
||||||
c, err := writer.Write(p[:toWrite])
|
c, err := writer.Write(p[:toWrite])
|
||||||
*bytesInBlock += uint32(c)
|
*bytesInBlock += uint32(c)
|
||||||
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
*onlyNullBytesInBlock = *onlyNullBytesInBlock && bytes.Equal(nullBytes[:toWrite], p[:toWrite])
|
||||||
@ -276,7 +273,7 @@ func (h *hidriveHash) Sum(b []byte) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
checksum := zeroSum
|
checksum := zeroSum
|
||||||
for i := 0; i < len(h.levels); i++ {
|
for i := range h.levels {
|
||||||
level := h.levels[i]
|
level := h.levels[i]
|
||||||
if i < len(h.levels)-1 {
|
if i < len(h.levels)-1 {
|
||||||
// Aggregate non-empty non-final levels.
|
// Aggregate non-empty non-final levels.
|
||||||
|
@ -216,7 +216,7 @@ func TestLevelWrite(t *testing.T) {
|
|||||||
func TestLevelIsFull(t *testing.T) {
|
func TestLevelIsFull(t *testing.T) {
|
||||||
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
content := [hidrivehash.Size]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
|
||||||
l := hidrivehash.NewLevel()
|
l := hidrivehash.NewLevel()
|
||||||
for i := 0; i < 256; i++ {
|
for range 256 {
|
||||||
assert.False(t, l.(internal.LevelHash).IsFull())
|
assert.False(t, l.(internal.LevelHash).IsFull())
|
||||||
written, err := l.Write(content[:])
|
written, err := l.Write(content[:])
|
||||||
assert.Equal(t, len(content), written)
|
assert.Equal(t, len(content), written)
|
||||||
|
@ -505,7 +505,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
|||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
entriesMu.Unlock()
|
entriesMu.Unlock()
|
||||||
}
|
}
|
||||||
for i := 0; i < checkers; i++ {
|
for range checkers {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
@ -740,7 +740,7 @@ It doesn't return anything.
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "set":
|
case "set":
|
||||||
newOpt := f.opt
|
newOpt := f.opt
|
||||||
|
@ -76,7 +76,7 @@ func (c *Client) DriveService() (*DriveService, error) {
|
|||||||
// This function is the main entry point for making requests to the iCloud
|
// This function is the main entry point for making requests to the iCloud
|
||||||
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
// API. If the initial request returns a 401 (Unauthorized), it will try to
|
||||||
// reauthenticate and retry the request.
|
// reauthenticate and retry the request.
|
||||||
func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
func (c *Client) Request(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
if err != nil && resp != nil {
|
if err != nil && resp != nil {
|
||||||
// try to reauth
|
// try to reauth
|
||||||
@ -100,7 +100,7 @@ func (c *Client) Request(ctx context.Context, opts rest.Opts, request interface{
|
|||||||
// This function is useful when you have a session that is already
|
// This function is useful when you have a session that is already
|
||||||
// authenticated, but you need to make a request without triggering
|
// authenticated, but you need to make a request without triggering
|
||||||
// a re-authentication.
|
// a re-authentication.
|
||||||
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
func (c *Client) RequestNoReAuth(ctx context.Context, opts rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||||
// Make the request without re-authenticating
|
// Make the request without re-authenticating
|
||||||
resp, err = c.Session.Request(ctx, opts, request, response)
|
resp, err = c.Session.Request(ctx, opts, request, response)
|
||||||
return resp, err
|
return resp, err
|
||||||
@ -161,6 +161,6 @@ func newRequestError(Status string, Text string) *RequestError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newErr orf makes a new error from sprintf parameters.
|
// newErr orf makes a new error from sprintf parameters.
|
||||||
func newRequestErrorf(Status string, Text string, Parameters ...interface{}) *RequestError {
|
func newRequestErrorf(Status string, Text string, Parameters ...any) *RequestError {
|
||||||
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
return newRequestError(strings.ToLower(Status), fmt.Sprintf(Text, Parameters...))
|
||||||
}
|
}
|
||||||
|
@ -733,8 +733,8 @@ type DocumentUpdateResponse struct {
|
|||||||
StatusCode int `json:"status_code"`
|
StatusCode int `json:"status_code"`
|
||||||
ErrorMessage string `json:"error_message"`
|
ErrorMessage string `json:"error_message"`
|
||||||
} `json:"status"`
|
} `json:"status"`
|
||||||
OperationID interface{} `json:"operation_id"`
|
OperationID any `json:"operation_id"`
|
||||||
Document *Document `json:"document"`
|
Document *Document `json:"document"`
|
||||||
} `json:"results"`
|
} `json:"results"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -765,9 +765,9 @@ type Document struct {
|
|||||||
IsWritable bool `json:"is_writable"`
|
IsWritable bool `json:"is_writable"`
|
||||||
IsHidden bool `json:"is_hidden"`
|
IsHidden bool `json:"is_hidden"`
|
||||||
} `json:"file_flags"`
|
} `json:"file_flags"`
|
||||||
LastOpenedTime int64 `json:"lastOpenedTime"`
|
LastOpenedTime int64 `json:"lastOpenedTime"`
|
||||||
RestorePath interface{} `json:"restorePath"`
|
RestorePath any `json:"restorePath"`
|
||||||
HasChainedParent bool `json:"hasChainedParent"`
|
HasChainedParent bool `json:"hasChainedParent"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DriveID returns the drive ID of the Document.
|
// DriveID returns the drive ID of the Document.
|
||||||
|
@ -3,13 +3,13 @@ package api
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"slices"
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/oracle/oci-go-sdk/v65/common"
|
"github.com/oracle/oci-go-sdk/v65/common"
|
||||||
|
|
||||||
"github.com/rclone/rclone/fs/fshttp"
|
"github.com/rclone/rclone/fs/fshttp"
|
||||||
"github.com/rclone/rclone/lib/rest"
|
"github.com/rclone/rclone/lib/rest"
|
||||||
)
|
)
|
||||||
@ -35,7 +35,7 @@ type Session struct {
|
|||||||
// }
|
// }
|
||||||
|
|
||||||
// Request makes a request
|
// Request makes a request
|
||||||
func (s *Session) Request(ctx context.Context, opts rest.Opts, request interface{}, response interface{}) (*http.Response, error) {
|
func (s *Session) Request(ctx context.Context, opts rest.Opts, request any, response any) (*http.Response, error) {
|
||||||
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
resp, err := s.srv.CallJSON(ctx, &opts, &request, &response)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -129,7 +129,7 @@ func (s *Session) AuthWithToken(ctx context.Context) error {
|
|||||||
|
|
||||||
// Validate2FACode validates the 2FA code
|
// Validate2FACode validates the 2FA code
|
||||||
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
func (s *Session) Validate2FACode(ctx context.Context, code string) error {
|
||||||
values := map[string]interface{}{"securityCode": map[string]string{"code": code}}
|
values := map[string]any{"securityCode": map[string]string{"code": code}}
|
||||||
body, err := IntoReader(values)
|
body, err := IntoReader(values)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -220,9 +220,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
|
|||||||
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
"Referer": fmt.Sprintf("%s/", homeEndpoint),
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
}
|
}
|
||||||
for k, v := range overwrite {
|
maps.Copy(headers, overwrite)
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,9 +228,7 @@ func (s *Session) GetAuthHeaders(overwrite map[string]string) map[string]string
|
|||||||
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
func (s *Session) GetHeaders(overwrite map[string]string) map[string]string {
|
||||||
headers := GetCommonHeaders(map[string]string{})
|
headers := GetCommonHeaders(map[string]string{})
|
||||||
headers["Cookie"] = s.GetCookieString()
|
headers["Cookie"] = s.GetCookieString()
|
||||||
for k, v := range overwrite {
|
maps.Copy(headers, overwrite)
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -254,9 +250,7 @@ func GetCommonHeaders(overwrite map[string]string) map[string]string {
|
|||||||
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
"Referer": fmt.Sprintf("%s/", baseEndpoint),
|
||||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
|
||||||
}
|
}
|
||||||
for k, v := range overwrite {
|
maps.Copy(headers, overwrite)
|
||||||
headers[k] = v
|
|
||||||
}
|
|
||||||
return headers
|
return headers
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,33 +332,33 @@ type AccountInfo struct {
|
|||||||
|
|
||||||
// ValidateDataDsInfo represents an validation info
|
// ValidateDataDsInfo represents an validation info
|
||||||
type ValidateDataDsInfo struct {
|
type ValidateDataDsInfo struct {
|
||||||
HsaVersion int `json:"hsaVersion"`
|
HsaVersion int `json:"hsaVersion"`
|
||||||
LastName string `json:"lastName"`
|
LastName string `json:"lastName"`
|
||||||
ICDPEnabled bool `json:"iCDPEnabled"`
|
ICDPEnabled bool `json:"iCDPEnabled"`
|
||||||
TantorMigrated bool `json:"tantorMigrated"`
|
TantorMigrated bool `json:"tantorMigrated"`
|
||||||
Dsid string `json:"dsid"`
|
Dsid string `json:"dsid"`
|
||||||
HsaEnabled bool `json:"hsaEnabled"`
|
HsaEnabled bool `json:"hsaEnabled"`
|
||||||
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
IsHideMyEmailSubscriptionActive bool `json:"isHideMyEmailSubscriptionActive"`
|
||||||
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
IroncadeMigrated bool `json:"ironcadeMigrated"`
|
||||||
Locale string `json:"locale"`
|
Locale string `json:"locale"`
|
||||||
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
BrZoneConsolidated bool `json:"brZoneConsolidated"`
|
||||||
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
ICDRSCapableDeviceList string `json:"ICDRSCapableDeviceList"`
|
||||||
IsManagedAppleID bool `json:"isManagedAppleID"`
|
IsManagedAppleID bool `json:"isManagedAppleID"`
|
||||||
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
IsCustomDomainsFeatureAvailable bool `json:"isCustomDomainsFeatureAvailable"`
|
||||||
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
IsHideMyEmailFeatureAvailable bool `json:"isHideMyEmailFeatureAvailable"`
|
||||||
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
ContinueOnDeviceEligibleDeviceInfo []string `json:"ContinueOnDeviceEligibleDeviceInfo"`
|
||||||
Gilligvited bool `json:"gilligvited"`
|
Gilligvited bool `json:"gilligvited"`
|
||||||
AppleIDAliases []interface{} `json:"appleIdAliases"`
|
AppleIDAliases []any `json:"appleIdAliases"`
|
||||||
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
UbiquityEOLEnabled bool `json:"ubiquityEOLEnabled"`
|
||||||
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
IsPaidDeveloper bool `json:"isPaidDeveloper"`
|
||||||
CountryCode string `json:"countryCode"`
|
CountryCode string `json:"countryCode"`
|
||||||
NotificationID string `json:"notificationId"`
|
NotificationID string `json:"notificationId"`
|
||||||
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
PrimaryEmailVerified bool `json:"primaryEmailVerified"`
|
||||||
ADsID string `json:"aDsID"`
|
ADsID string `json:"aDsID"`
|
||||||
Locked bool `json:"locked"`
|
Locked bool `json:"locked"`
|
||||||
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
ICDRSCapableDeviceCount int `json:"ICDRSCapableDeviceCount"`
|
||||||
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
HasICloudQualifyingDevice bool `json:"hasICloudQualifyingDevice"`
|
||||||
PrimaryEmail string `json:"primaryEmail"`
|
PrimaryEmail string `json:"primaryEmail"`
|
||||||
AppleIDEntries []struct {
|
AppleIDEntries []struct {
|
||||||
IsPrimary bool `json:"isPrimary"`
|
IsPrimary bool `json:"isPrimary"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -142,12 +143,7 @@ func shouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool {
|
|||||||
if resp == nil {
|
if resp == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, e := range retryErrorCodes {
|
return slices.Contains(retryErrorCodes, resp.StatusCode)
|
||||||
if resp.StatusCode == e {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
|
||||||
|
@ -13,6 +13,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -200,7 +201,7 @@ Only enable if you need to be guaranteed to be reflected after write operations.
|
|||||||
const iaItemMaxSize int64 = 1099511627776
|
const iaItemMaxSize int64 = 1099511627776
|
||||||
|
|
||||||
// metadata keys that are not writeable
|
// metadata keys that are not writeable
|
||||||
var roMetadataKey = map[string]interface{}{
|
var roMetadataKey = map[string]any{
|
||||||
// do not add mtime here, it's a documented exception
|
// do not add mtime here, it's a documented exception
|
||||||
"name": nil, "source": nil, "size": nil, "md5": nil,
|
"name": nil, "source": nil, "size": nil, "md5": nil,
|
||||||
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
"crc32": nil, "sha1": nil, "format": nil, "old_version": nil,
|
||||||
@ -991,10 +992,8 @@ func (o *Object) Metadata(ctx context.Context) (m fs.Metadata, err error) {
|
|||||||
|
|
||||||
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
for _, e := range retryErrorCodes {
|
if slices.Contains(retryErrorCodes, resp.StatusCode) {
|
||||||
if resp.StatusCode == e {
|
return true, err
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ok, not an awserr, check for generic failure conditions
|
// Ok, not an awserr, check for generic failure conditions
|
||||||
@ -1147,13 +1146,7 @@ func (f *Fs) waitFileUpload(ctx context.Context, reqPath, tracker string, newSiz
|
|||||||
}
|
}
|
||||||
|
|
||||||
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
|
fileTrackers, _ := listOrString(iaFile.UpdateTrack)
|
||||||
trackerMatch := false
|
trackerMatch := slices.Contains(fileTrackers, tracker)
|
||||||
for _, v := range fileTrackers {
|
|
||||||
if v == tracker {
|
|
||||||
trackerMatch = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !trackerMatch {
|
if !trackerMatch {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func (t *Rfc3339Time) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
|||||||
|
|
||||||
// MarshalJSON turns a Rfc3339Time into JSON
|
// MarshalJSON turns a Rfc3339Time into JSON
|
||||||
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
func (t *Rfc3339Time) MarshalJSON() ([]byte, error) {
|
||||||
return []byte(fmt.Sprintf("\"%s\"", t.String())), nil
|
return fmt.Appendf(nil, "\"%s\"", t.String()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoginToken is struct representing the login token generated in the WebUI
|
// LoginToken is struct representing the login token generated in the WebUI
|
||||||
@ -165,25 +165,25 @@ type DeviceRegistrationResponse struct {
|
|||||||
|
|
||||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||||
type CustomerInfo struct {
|
type CustomerInfo struct {
|
||||||
Username string `json:"username"`
|
Username string `json:"username"`
|
||||||
Email string `json:"email"`
|
Email string `json:"email"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
CountryCode string `json:"country_code"`
|
CountryCode string `json:"country_code"`
|
||||||
LanguageCode string `json:"language_code"`
|
LanguageCode string `json:"language_code"`
|
||||||
CustomerGroupCode string `json:"customer_group_code"`
|
CustomerGroupCode string `json:"customer_group_code"`
|
||||||
BrandCode string `json:"brand_code"`
|
BrandCode string `json:"brand_code"`
|
||||||
AccountType string `json:"account_type"`
|
AccountType string `json:"account_type"`
|
||||||
SubscriptionType string `json:"subscription_type"`
|
SubscriptionType string `json:"subscription_type"`
|
||||||
Usage int64 `json:"usage"`
|
Usage int64 `json:"usage"`
|
||||||
Quota int64 `json:"quota"`
|
Quota int64 `json:"quota"`
|
||||||
BusinessUsage int64 `json:"business_usage"`
|
BusinessUsage int64 `json:"business_usage"`
|
||||||
BusinessQuota int64 `json:"business_quota"`
|
BusinessQuota int64 `json:"business_quota"`
|
||||||
WriteLocked bool `json:"write_locked"`
|
WriteLocked bool `json:"write_locked"`
|
||||||
ReadLocked bool `json:"read_locked"`
|
ReadLocked bool `json:"read_locked"`
|
||||||
LockedCause interface{} `json:"locked_cause"`
|
LockedCause any `json:"locked_cause"`
|
||||||
WebHash string `json:"web_hash"`
|
WebHash string `json:"web_hash"`
|
||||||
AndroidHash string `json:"android_hash"`
|
AndroidHash string `json:"android_hash"`
|
||||||
IOSHash string `json:"ios_hash"`
|
IOSHash string `json:"ios_hash"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TrashResponse is returned when emptying the Trash
|
// TrashResponse is returned when emptying the Trash
|
||||||
|
@ -193,7 +193,7 @@ func (o *Object) set(e *entity) {
|
|||||||
// Call linkbox with the query in opts and return result
|
// Call linkbox with the query in opts and return result
|
||||||
//
|
//
|
||||||
// This will be checked for error and an error will be returned if Status != 1
|
// This will be checked for error and an error will be returned if Status != 1
|
||||||
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result interface{}) error {
|
func getUnmarshaledResponse(ctx context.Context, f *Fs, opts *rest.Opts, result any) error {
|
||||||
err := f.pacer.Call(func() (bool, error) {
|
err := f.pacer.Call(func() (bool, error) {
|
||||||
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
|
resp, err := f.srv.CallJSON(ctx, opts, nil, &result)
|
||||||
return f.shouldRetry(ctx, resp, err)
|
return f.shouldRetry(ctx, resp, err)
|
||||||
|
@ -1046,7 +1046,7 @@ you can try to change the output.`,
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (any, error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "noop":
|
case "noop":
|
||||||
if txt, ok := opt["error"]; ok {
|
if txt, ok := opt["error"]; ok {
|
||||||
@ -1056,7 +1056,7 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str
|
|||||||
return nil, errors.New(txt)
|
return nil, errors.New(txt)
|
||||||
}
|
}
|
||||||
if _, ok := opt["echo"]; ok {
|
if _, ok := opt["echo"]; ok {
|
||||||
out := map[string]interface{}{}
|
out := map[string]any{}
|
||||||
out["name"] = name
|
out["name"] = name
|
||||||
out["arg"] = arg
|
out["arg"] = arg
|
||||||
out["opt"] = opt
|
out["opt"] = opt
|
||||||
|
@ -86,7 +86,7 @@ func TestVerifyCopy(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
src.(*Object).fs.opt.NoCheckUpdated = true
|
src.(*Object).fs.opt.NoCheckUpdated = true
|
||||||
|
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
|
go r.WriteFile(src.Remote(), fmt.Sprintf("some new content %d", i), src.ModTime(context.Background()))
|
||||||
}
|
}
|
||||||
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
|
_, err = operations.Copy(context.Background(), r.Fremote, nil, filePath+"2", src)
|
||||||
|
@ -63,8 +63,8 @@ type UserInfoResponse struct {
|
|||||||
Prolong bool `json:"prolong"`
|
Prolong bool `json:"prolong"`
|
||||||
Promocodes struct {
|
Promocodes struct {
|
||||||
} `json:"promocodes"`
|
} `json:"promocodes"`
|
||||||
Subscription []interface{} `json:"subscription"`
|
Subscription []any `json:"subscription"`
|
||||||
Version string `json:"version"`
|
Version string `json:"version"`
|
||||||
} `json:"billing"`
|
} `json:"billing"`
|
||||||
Bonuses struct {
|
Bonuses struct {
|
||||||
CameraUpload bool `json:"camera_upload"`
|
CameraUpload bool `json:"camera_upload"`
|
||||||
|
@ -901,7 +901,7 @@ func (t *treeState) NextRecord() (fs.DirEntry, error) {
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
case api.ListParseUnknown15:
|
case api.ListParseUnknown15:
|
||||||
skip := int(r.ReadPu32())
|
skip := int(r.ReadPu32())
|
||||||
for i := 0; i < skip; i++ {
|
for range skip {
|
||||||
r.ReadPu32()
|
r.ReadPu32()
|
||||||
r.ReadPu32()
|
r.ReadPu32()
|
||||||
}
|
}
|
||||||
@ -1768,7 +1768,7 @@ func (f *Fs) eligibleForSpeedup(remote string, size int64, options ...fs.OpenOpt
|
|||||||
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
||||||
f.speedupGlobs = nil
|
f.speedupGlobs = nil
|
||||||
f.speedupAny = false
|
f.speedupAny = false
|
||||||
uniqueValidPatterns := make(map[string]interface{})
|
uniqueValidPatterns := make(map[string]any)
|
||||||
|
|
||||||
for _, pattern := range strings.Split(patternString, ",") {
|
for _, pattern := range strings.Split(patternString, ",") {
|
||||||
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
pattern = strings.ToLower(strings.TrimSpace(pattern))
|
||||||
@ -2131,10 +2131,7 @@ func getTransferRange(size int64, options ...fs.OpenOption) (start int64, end in
|
|||||||
if limit < 0 {
|
if limit < 0 {
|
||||||
limit = size - offset
|
limit = size - offset
|
||||||
}
|
}
|
||||||
end = offset + limit
|
end = min(offset+limit, size)
|
||||||
if end > size {
|
|
||||||
end = size
|
|
||||||
}
|
|
||||||
partial = !(offset == 0 && end == size)
|
partial = !(offset == 0 && end == size)
|
||||||
return offset, end, partial
|
return offset, end, partial
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
func testChunk(t *testing.T, chunk int) {
|
func testChunk(t *testing.T, chunk int) {
|
||||||
data := make([]byte, chunk)
|
data := make([]byte, chunk)
|
||||||
for i := 0; i < chunk; i++ {
|
for i := range chunk {
|
||||||
data[i] = 'A'
|
data[i] = 'A'
|
||||||
}
|
}
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -218,11 +219,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||||
srv.SetHTTPS(opt.UseHTTPS)
|
srv.SetHTTPS(opt.UseHTTPS)
|
||||||
srv.SetLogger(func(format string, v ...interface{}) {
|
srv.SetLogger(func(format string, v ...any) {
|
||||||
fs.Infof("*go-mega*", format, v...)
|
fs.Infof("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
if opt.Debug {
|
if opt.Debug {
|
||||||
srv.SetDebugger(func(format string, v ...interface{}) {
|
srv.SetDebugger(func(format string, v ...any) {
|
||||||
fs.Debugf("*go-mega*", format, v...)
|
fs.Debugf("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -498,11 +499,8 @@ func (f *Fs) list(ctx context.Context, dir *mega.Node, fn listFn) (found bool, e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("list failed: %w", err)
|
return false, fmt.Errorf("list failed: %w", err)
|
||||||
}
|
}
|
||||||
for _, item := range nodes {
|
if slices.ContainsFunc(nodes, fn) {
|
||||||
if fn(item) {
|
found = true
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -1156,7 +1154,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Upload the chunks
|
// Upload the chunks
|
||||||
// FIXME do this in parallel
|
// FIXME do this in parallel
|
||||||
for id := 0; id < u.Chunks(); id++ {
|
for id := range u.Chunks() {
|
||||||
_, chunkSize, err := u.ChunkLocation(id)
|
_, chunkSize, err := u.ChunkLocation(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("upload failed to read chunk location: %w", err)
|
return fmt.Errorf("upload failed to read chunk location: %w", err)
|
||||||
|
@ -29,7 +29,7 @@ func testPurgeListDeadlock(t *testing.T) {
|
|||||||
r.Fremote.Features().Disable("Purge") // force fallback-purge
|
r.Fremote.Features().Disable("Purge") // force fallback-purge
|
||||||
|
|
||||||
// make a lot of files to prevent it from finishing too quickly
|
// make a lot of files to prevent it from finishing too quickly
|
||||||
for i := 0; i < 100; i++ {
|
for i := range 100 {
|
||||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||||
r.WriteObject(ctx, dst, "hello", t1)
|
r.WriteObject(ctx, dst, "hello", t1)
|
||||||
}
|
}
|
||||||
|
@ -274,7 +274,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Command the backend to run a named commands: du and symlink
|
// Command the backend to run a named commands: du and symlink
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "du":
|
case "du":
|
||||||
// No arg parsing needed, the path is passed in the fs
|
// No arg parsing needed, the path is passed in the fs
|
||||||
@ -858,7 +858,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err
|
|||||||
|
|
||||||
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
|
// callBackend calls NetStorage API using either rest.Call or rest.CallXML function,
|
||||||
// depending on whether the response is required
|
// depending on whether the response is required
|
||||||
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response interface{}, options []fs.OpenOption) (io.ReadCloser, error) {
|
func (f *Fs) callBackend(ctx context.Context, URL, method, actionHeader string, noResponse bool, response any, options []fs.OpenOption) (io.ReadCloser, error) {
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
Method: method,
|
Method: method,
|
||||||
RootURL: URL,
|
RootURL: URL,
|
||||||
@ -1080,7 +1080,7 @@ func (o *Object) netStorageDownloadRequest(ctx context.Context, options []fs.Ope
|
|||||||
}
|
}
|
||||||
|
|
||||||
// netStorageDuRequest performs a NetStorage du request
|
// netStorageDuRequest performs a NetStorage du request
|
||||||
func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
|
func (f *Fs) netStorageDuRequest(ctx context.Context) (any, error) {
|
||||||
URL := f.url("")
|
URL := f.url("")
|
||||||
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
|
const actionHeader = "version=1&action=du&format=xml&encoding=utf-8"
|
||||||
duResp := &Du{}
|
duResp := &Du{}
|
||||||
@ -1100,7 +1100,7 @@ func (f *Fs) netStorageDuRequest(ctx context.Context) (interface{}, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// netStorageDuRequest performs a NetStorage symlink request
|
// netStorageDuRequest performs a NetStorage symlink request
|
||||||
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (interface{}, error) {
|
func (f *Fs) netStorageSymlinkRequest(ctx context.Context, URL string, dst string, modTime *int64) (any, error) {
|
||||||
target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
|
target := url.QueryEscape(strings.TrimSuffix(dst, "/"))
|
||||||
actionHeader := "version=1&action=symlink&target=" + target
|
actionHeader := "version=1&action=symlink&target=" + target
|
||||||
if modTime != nil {
|
if modTime != nil {
|
||||||
|
@ -2532,10 +2532,7 @@ func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, src fs.Objec
|
|||||||
remaining := size
|
remaining := size
|
||||||
position := int64(0)
|
position := int64(0)
|
||||||
for remaining > 0 {
|
for remaining > 0 {
|
||||||
n := int64(o.fs.opt.ChunkSize)
|
n := min(remaining, int64(o.fs.opt.ChunkSize))
|
||||||
if remaining < n {
|
|
||||||
n = remaining
|
|
||||||
}
|
|
||||||
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
seg := readers.NewRepeatableReader(io.LimitReader(in, n))
|
||||||
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
fs.Debugf(o, "Uploading segment %d/%d size %d", position, size, n)
|
||||||
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
|
info, err = o.uploadFragment(ctx, uploadURL, position, size, seg, n, options...)
|
||||||
|
@ -86,7 +86,7 @@ func (q *quickXorHash) Write(p []byte) (n int, err error) {
|
|||||||
|
|
||||||
// Calculate the current checksum
|
// Calculate the current checksum
|
||||||
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
func (q *quickXorHash) checkSum() (h [Size + 1]byte) {
|
||||||
for i := 0; i < dataSize; i++ {
|
for i := range dataSize {
|
||||||
shift := (i * 11) % 160
|
shift := (i * 11) % 160
|
||||||
shiftBytes := shift / 8
|
shiftBytes := shift / 8
|
||||||
shiftBits := shift % 8
|
shiftBits := shift % 8
|
||||||
|
@ -130,10 +130,7 @@ func TestQuickXorHashByBlock(t *testing.T) {
|
|||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
h := New()
|
h := New()
|
||||||
for i := 0; i < len(in); i += blockSize {
|
for i := 0; i < len(in); i += blockSize {
|
||||||
end := i + blockSize
|
end := min(i+blockSize, len(in))
|
||||||
if end > len(in) {
|
|
||||||
end = len(in)
|
|
||||||
}
|
|
||||||
n, err := h.Write(in[i:end])
|
n, err := h.Write(in[i:end])
|
||||||
require.Equal(t, end-i, n, what)
|
require.Equal(t, end-i, n, what)
|
||||||
require.NoError(t, err, what)
|
require.NoError(t, err, what)
|
||||||
|
@ -491,7 +491,7 @@ func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object,
|
|||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/file/move_copy.json",
|
Path: "/file/move_copy.json",
|
||||||
}
|
}
|
||||||
var request interface{} = moveCopyFileData
|
var request any = moveCopyFileData
|
||||||
|
|
||||||
// use /file/rename.json if moving within the same directory
|
// use /file/rename.json if moving within the same directory
|
||||||
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
_, srcDirID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||||
@ -564,7 +564,7 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
|
|||||||
Method: "POST",
|
Method: "POST",
|
||||||
Path: "/folder/move_copy.json",
|
Path: "/folder/move_copy.json",
|
||||||
}
|
}
|
||||||
var request interface{} = moveFolderData
|
var request any = moveFolderData
|
||||||
|
|
||||||
// use /folder/rename.json if moving within the same parent directory
|
// use /folder/rename.json if moving within the same parent directory
|
||||||
if srcDirectoryID == dstDirectoryID {
|
if srcDirectoryID == dstDirectoryID {
|
||||||
@ -1042,10 +1042,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
chunkCounter := 0
|
chunkCounter := 0
|
||||||
|
|
||||||
for remainingBytes > 0 {
|
for remainingBytes > 0 {
|
||||||
currentChunkSize := int64(o.fs.opt.ChunkSize)
|
currentChunkSize := min(int64(o.fs.opt.ChunkSize), remainingBytes)
|
||||||
if currentChunkSize > remainingBytes {
|
|
||||||
currentChunkSize = remainingBytes
|
|
||||||
}
|
|
||||||
remainingBytes -= currentChunkSize
|
remainingBytes -= currentChunkSize
|
||||||
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
fs.Debugf(o, "Uploading chunk %d, size=%d, remain=%d", chunkCounter, currentChunkSize, remainingBytes)
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ If it is a string or a []string it will be shown to the user
|
|||||||
otherwise it will be JSON encoded and shown to the user like that
|
otherwise it will be JSON encoded and shown to the user like that
|
||||||
*/
|
*/
|
||||||
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
||||||
opt map[string]string) (result interface{}, err error) {
|
opt map[string]string) (result any, err error) {
|
||||||
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
// fs.Debugf(f, "command %v, args: %v, opts:%v", commandName, args, opt)
|
||||||
switch commandName {
|
switch commandName {
|
||||||
case operationRename:
|
case operationRename:
|
||||||
@ -159,7 +159,7 @@ func (f *Fs) Command(ctx context.Context, commandName string, args []string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) rename(ctx context.Context, remote, newName string) (interface{}, error) {
|
func (f *Fs) rename(ctx context.Context, remote, newName string) (any, error) {
|
||||||
if remote == "" {
|
if remote == "" {
|
||||||
return nil, fmt.Errorf("path to object file cannot be empty")
|
return nil, fmt.Errorf("path to object file cannot be empty")
|
||||||
}
|
}
|
||||||
@ -332,7 +332,7 @@ func (f *Fs) listMultipartUploadParts(ctx context.Context, bucketName, bucketPat
|
|||||||
return uploadedParts, nil
|
return uploadedParts, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) restore(ctx context.Context, opt map[string]string) (interface{}, error) {
|
func (f *Fs) restore(ctx context.Context, opt map[string]string) (any, error) {
|
||||||
req := objectstorage.RestoreObjectsRequest{
|
req := objectstorage.RestoreObjectsRequest{
|
||||||
NamespaceName: common.String(f.opt.Namespace),
|
NamespaceName: common.String(f.opt.Namespace),
|
||||||
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
RestoreObjectsDetails: objectstorage.RestoreObjectsDetails{},
|
||||||
|
@ -112,7 +112,7 @@ func copyObjectWaitForWorkRequest(ctx context.Context, wID *string, entityType s
|
|||||||
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
string(objectstorage.WorkRequestSummaryStatusCanceled),
|
||||||
string(objectstorage.WorkRequestStatusFailed),
|
string(objectstorage.WorkRequestStatusFailed),
|
||||||
},
|
},
|
||||||
Refresh: func() (interface{}, string, error) {
|
Refresh: func() (any, string, error) {
|
||||||
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
getWorkRequestRequest := objectstorage.GetWorkRequestRequest{}
|
||||||
getWorkRequestRequest.WorkRequestId = wID
|
getWorkRequestRequest.WorkRequestId = wID
|
||||||
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
workRequestResponse, err := client.GetWorkRequest(context.Background(), getWorkRequestRequest)
|
||||||
|
@ -131,7 +131,7 @@ func (o *Object) setMetaData(
|
|||||||
contentMd5 *string,
|
contentMd5 *string,
|
||||||
contentType *string,
|
contentType *string,
|
||||||
lastModified *common.SDKTime,
|
lastModified *common.SDKTime,
|
||||||
storageTier interface{},
|
storageTier any,
|
||||||
meta map[string]string) error {
|
meta map[string]string) error {
|
||||||
|
|
||||||
if contentLength != nil {
|
if contentLength != nil {
|
||||||
|
@ -5,6 +5,7 @@ package oracleobjectstorage
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -23,7 +24,7 @@ var refreshGracePeriod = 30 * time.Second
|
|||||||
//
|
//
|
||||||
// `state` is the latest state of that object. And `err` is any error that
|
// `state` is the latest state of that object. And `err` is any error that
|
||||||
// may have happened while refreshing the state.
|
// may have happened while refreshing the state.
|
||||||
type StateRefreshFunc func() (result interface{}, state string, err error)
|
type StateRefreshFunc func() (result any, state string, err error)
|
||||||
|
|
||||||
// StateChangeConf is the configuration struct used for `WaitForState`.
|
// StateChangeConf is the configuration struct used for `WaitForState`.
|
||||||
type StateChangeConf struct {
|
type StateChangeConf struct {
|
||||||
@ -56,7 +57,7 @@ type StateChangeConf struct {
|
|||||||
// reach the target state.
|
// reach the target state.
|
||||||
//
|
//
|
||||||
// Cancellation from the passed in context will cancel the refresh loop
|
// Cancellation from the passed in context will cancel the refresh loop
|
||||||
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (interface{}, error) {
|
func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType string) (any, error) {
|
||||||
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
// fs.Debugf(entityType, "Waiting for state to become: %s", conf.Target)
|
||||||
|
|
||||||
notfoundTick := 0
|
notfoundTick := 0
|
||||||
@ -72,7 +73,7 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Result struct {
|
type Result struct {
|
||||||
Result interface{}
|
Result any
|
||||||
State string
|
State string
|
||||||
Error error
|
Error error
|
||||||
Done bool
|
Done bool
|
||||||
@ -165,12 +166,9 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, allowed := range conf.Pending {
|
if slices.Contains(conf.Pending, currentState) {
|
||||||
if currentState == allowed {
|
found = true
|
||||||
found = true
|
targetOccurrence = 0
|
||||||
targetOccurrence = 0
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !found && len(conf.Pending) > 0 {
|
if !found && len(conf.Pending) > 0 {
|
||||||
@ -278,8 +276,8 @@ func (conf *StateChangeConf) WaitForStateContext(ctx context.Context, entityType
|
|||||||
// NotFoundError resource not found error
|
// NotFoundError resource not found error
|
||||||
type NotFoundError struct {
|
type NotFoundError struct {
|
||||||
LastError error
|
LastError error
|
||||||
LastRequest interface{}
|
LastRequest any
|
||||||
LastResponse interface{}
|
LastResponse any
|
||||||
Message string
|
Message string
|
||||||
Retries int
|
Retries int
|
||||||
}
|
}
|
||||||
|
@ -990,10 +990,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
free := q.Quota - q.UsedQuota
|
free := max(q.Quota-q.UsedQuota, 0)
|
||||||
if free < 0 {
|
|
||||||
free = 0
|
|
||||||
}
|
|
||||||
usage = &fs.Usage{
|
usage = &fs.Usage{
|
||||||
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
Total: fs.NewUsageValue(q.Quota), // quota of bytes that can be used
|
||||||
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
Used: fs.NewUsageValue(q.UsedQuota), // bytes in use
|
||||||
@ -1324,7 +1321,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// sometimes pcloud leaves a half complete file on
|
// sometimes pcloud leaves a half complete file on
|
||||||
// error, so delete it if it exists, trying a few times
|
// error, so delete it if it exists, trying a few times
|
||||||
for i := 0; i < 5; i++ {
|
for range 5 {
|
||||||
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
delObj, delErr := o.fs.NewObject(ctx, o.remote)
|
||||||
if delErr == nil && delObj != nil {
|
if delErr == nil && delObj != nil {
|
||||||
_ = delObj.Remove(ctx)
|
_ = delObj.Remove(ctx)
|
||||||
|
@ -37,7 +37,7 @@ func (c *writerAt) Close() error {
|
|||||||
}
|
}
|
||||||
sizeOk := false
|
sizeOk := false
|
||||||
sizeLastSeen := int64(0)
|
sizeLastSeen := int64(0)
|
||||||
for retry := 0; retry < 5; retry++ {
|
for retry := range 5 {
|
||||||
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
fs.Debugf(c.remote, "checking file size: try %d/5", retry)
|
||||||
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
obj, err := c.fs.NewObject(c.ctx, c.remote)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -71,14 +71,14 @@ type Error struct {
|
|||||||
|
|
||||||
// ErrorDetails contains further details of api error
|
// ErrorDetails contains further details of api error
|
||||||
type ErrorDetails struct {
|
type ErrorDetails struct {
|
||||||
Type string `json:"@type,omitempty"`
|
Type string `json:"@type,omitempty"`
|
||||||
Reason string `json:"reason,omitempty"`
|
Reason string `json:"reason,omitempty"`
|
||||||
Domain string `json:"domain,omitempty"`
|
Domain string `json:"domain,omitempty"`
|
||||||
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
Metadata struct{} `json:"metadata,omitempty"` // TODO: undiscovered yet
|
||||||
Locale string `json:"locale,omitempty"` // e.g. "en"
|
Locale string `json:"locale,omitempty"` // e.g. "en"
|
||||||
Message string `json:"message,omitempty"`
|
Message string `json:"message,omitempty"`
|
||||||
StackEntries []interface{} `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
StackEntries []any `json:"stack_entries,omitempty"` // TODO: undiscovered yet
|
||||||
Detail string `json:"detail,omitempty"`
|
Detail string `json:"detail,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns a string for the error and satisfies the error interface
|
// Error returns a string for the error and satisfies the error interface
|
||||||
@ -168,44 +168,44 @@ type FileList struct {
|
|||||||
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
// for a single file, i.e. supports for higher `--multi-thread-streams=N`.
|
||||||
// However, it is not generally applicable as it is only for media.
|
// However, it is not generally applicable as it is only for media.
|
||||||
type File struct {
|
type File struct {
|
||||||
Apps []*FileApp `json:"apps,omitempty"`
|
Apps []*FileApp `json:"apps,omitempty"`
|
||||||
Audit *FileAudit `json:"audit,omitempty"`
|
Audit *FileAudit `json:"audit,omitempty"`
|
||||||
Collection string `json:"collection,omitempty"` // TODO
|
Collection string `json:"collection,omitempty"` // TODO
|
||||||
CreatedTime Time `json:"created_time,omitempty"`
|
CreatedTime Time `json:"created_time,omitempty"`
|
||||||
DeleteTime Time `json:"delete_time,omitempty"`
|
DeleteTime Time `json:"delete_time,omitempty"`
|
||||||
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
FileCategory string `json:"file_category,omitempty"` // "AUDIO", "VIDEO"
|
||||||
FileExtension string `json:"file_extension,omitempty"`
|
FileExtension string `json:"file_extension,omitempty"`
|
||||||
FolderType string `json:"folder_type,omitempty"`
|
FolderType string `json:"folder_type,omitempty"`
|
||||||
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
Hash string `json:"hash,omitempty"` // custom hash with a form of sha1sum
|
||||||
IconLink string `json:"icon_link,omitempty"`
|
IconLink string `json:"icon_link,omitempty"`
|
||||||
ID string `json:"id,omitempty"`
|
ID string `json:"id,omitempty"`
|
||||||
Kind string `json:"kind,omitempty"` // "drive#file"
|
Kind string `json:"kind,omitempty"` // "drive#file"
|
||||||
Links *FileLinks `json:"links,omitempty"`
|
Links *FileLinks `json:"links,omitempty"`
|
||||||
Md5Checksum string `json:"md5_checksum,omitempty"`
|
Md5Checksum string `json:"md5_checksum,omitempty"`
|
||||||
Medias []*Media `json:"medias,omitempty"`
|
Medias []*Media `json:"medias,omitempty"`
|
||||||
MimeType string `json:"mime_type,omitempty"`
|
MimeType string `json:"mime_type,omitempty"`
|
||||||
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
ModifiedTime Time `json:"modified_time,omitempty"` // updated when renamed or moved
|
||||||
Name string `json:"name,omitempty"`
|
Name string `json:"name,omitempty"`
|
||||||
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
OriginalFileIndex int `json:"original_file_index,omitempty"` // TODO
|
||||||
OriginalURL string `json:"original_url,omitempty"`
|
OriginalURL string `json:"original_url,omitempty"`
|
||||||
Params *FileParams `json:"params,omitempty"`
|
Params *FileParams `json:"params,omitempty"`
|
||||||
ParentID string `json:"parent_id,omitempty"`
|
ParentID string `json:"parent_id,omitempty"`
|
||||||
Phase string `json:"phase,omitempty"`
|
Phase string `json:"phase,omitempty"`
|
||||||
Revision int `json:"revision,omitempty,string"`
|
Revision int `json:"revision,omitempty,string"`
|
||||||
ReferenceEvents []interface{} `json:"reference_events"`
|
ReferenceEvents []any `json:"reference_events"`
|
||||||
ReferenceResource interface{} `json:"reference_resource"`
|
ReferenceResource any `json:"reference_resource"`
|
||||||
Size int64 `json:"size,omitempty,string"`
|
Size int64 `json:"size,omitempty,string"`
|
||||||
SortName string `json:"sort_name,omitempty"`
|
SortName string `json:"sort_name,omitempty"`
|
||||||
Space string `json:"space,omitempty"`
|
Space string `json:"space,omitempty"`
|
||||||
SpellName []interface{} `json:"spell_name,omitempty"` // TODO maybe list of something?
|
SpellName []any `json:"spell_name,omitempty"` // TODO maybe list of something?
|
||||||
Starred bool `json:"starred,omitempty"`
|
Starred bool `json:"starred,omitempty"`
|
||||||
Tags []interface{} `json:"tags"`
|
Tags []any `json:"tags"`
|
||||||
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
ThumbnailLink string `json:"thumbnail_link,omitempty"`
|
||||||
Trashed bool `json:"trashed,omitempty"`
|
Trashed bool `json:"trashed,omitempty"`
|
||||||
UserID string `json:"user_id,omitempty"`
|
UserID string `json:"user_id,omitempty"`
|
||||||
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
UserModifiedTime Time `json:"user_modified_time,omitempty"`
|
||||||
WebContentLink string `json:"web_content_link,omitempty"`
|
WebContentLink string `json:"web_content_link,omitempty"`
|
||||||
Writable bool `json:"writable,omitempty"`
|
Writable bool `json:"writable,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileLinks includes links to file at backend
|
// FileLinks includes links to file at backend
|
||||||
@ -235,18 +235,18 @@ type Media struct {
|
|||||||
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
VideoType string `json:"video_type,omitempty"` // "mpegts"
|
||||||
HdrType string `json:"hdr_type,omitempty"`
|
HdrType string `json:"hdr_type,omitempty"`
|
||||||
} `json:"video,omitempty"`
|
} `json:"video,omitempty"`
|
||||||
Link *Link `json:"link,omitempty"`
|
Link *Link `json:"link,omitempty"`
|
||||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||||
VipTypes []interface{} `json:"vip_types,omitempty"` // TODO maybe list of something?
|
VipTypes []any `json:"vip_types,omitempty"` // TODO maybe list of something?
|
||||||
RedirectLink string `json:"redirect_link,omitempty"`
|
RedirectLink string `json:"redirect_link,omitempty"`
|
||||||
IconLink string `json:"icon_link,omitempty"`
|
IconLink string `json:"icon_link,omitempty"`
|
||||||
IsDefault bool `json:"is_default,omitempty"`
|
IsDefault bool `json:"is_default,omitempty"`
|
||||||
Priority int `json:"priority,omitempty"`
|
Priority int `json:"priority,omitempty"`
|
||||||
IsOrigin bool `json:"is_origin,omitempty"`
|
IsOrigin bool `json:"is_origin,omitempty"`
|
||||||
ResolutionName string `json:"resolution_name,omitempty"`
|
ResolutionName string `json:"resolution_name,omitempty"`
|
||||||
IsVisible bool `json:"is_visible,omitempty"`
|
IsVisible bool `json:"is_visible,omitempty"`
|
||||||
Category string `json:"category,omitempty"` // "category_origin"
|
Category string `json:"category,omitempty"` // "category_origin"
|
||||||
Audio interface{} `json:"audio"` // TODO: undiscovered yet
|
Audio any `json:"audio"` // TODO: undiscovered yet
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileParams includes parameters for instant open
|
// FileParams includes parameters for instant open
|
||||||
@ -263,20 +263,20 @@ type FileParams struct {
|
|||||||
|
|
||||||
// FileApp includes parameters for instant open
|
// FileApp includes parameters for instant open
|
||||||
type FileApp struct {
|
type FileApp struct {
|
||||||
ID string `json:"id,omitempty"` // "decompress" for rar files
|
ID string `json:"id,omitempty"` // "decompress" for rar files
|
||||||
Name string `json:"name,omitempty"` // decompress" for rar files
|
Name string `json:"name,omitempty"` // decompress" for rar files
|
||||||
Access []interface{} `json:"access,omitempty"`
|
Access []any `json:"access,omitempty"`
|
||||||
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
Link string `json:"link,omitempty"` // "https://mypikpak.com/drive/decompression/{File.Id}?gcid={File.Hash}\u0026wv-style=topbar%3Ahide"
|
||||||
RedirectLink string `json:"redirect_link,omitempty"`
|
RedirectLink string `json:"redirect_link,omitempty"`
|
||||||
VipTypes []interface{} `json:"vip_types,omitempty"`
|
VipTypes []any `json:"vip_types,omitempty"`
|
||||||
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
NeedMoreQuota bool `json:"need_more_quota,omitempty"`
|
||||||
IconLink string `json:"icon_link,omitempty"`
|
IconLink string `json:"icon_link,omitempty"`
|
||||||
IsDefault bool `json:"is_default,omitempty"`
|
IsDefault bool `json:"is_default,omitempty"`
|
||||||
Params struct{} `json:"params,omitempty"` // TODO
|
Params struct{} `json:"params,omitempty"` // TODO
|
||||||
CategoryIDs []interface{} `json:"category_ids,omitempty"`
|
CategoryIDs []any `json:"category_ids,omitempty"`
|
||||||
AdSceneType int `json:"ad_scene_type,omitempty"`
|
AdSceneType int `json:"ad_scene_type,omitempty"`
|
||||||
Space string `json:"space,omitempty"`
|
Space string `json:"space,omitempty"`
|
||||||
Links struct{} `json:"links,omitempty"` // TODO
|
Links struct{} `json:"links,omitempty"` // TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
@ -290,27 +290,27 @@ type TaskList struct {
|
|||||||
|
|
||||||
// Task is a basic element representing a single task such as offline download and upload
|
// Task is a basic element representing a single task such as offline download and upload
|
||||||
type Task struct {
|
type Task struct {
|
||||||
Kind string `json:"kind,omitempty"` // "drive#task"
|
Kind string `json:"kind,omitempty"` // "drive#task"
|
||||||
ID string `json:"id,omitempty"` // task id?
|
ID string `json:"id,omitempty"` // task id?
|
||||||
Name string `json:"name,omitempty"` // torrent name?
|
Name string `json:"name,omitempty"` // torrent name?
|
||||||
Type string `json:"type,omitempty"` // "offline"
|
Type string `json:"type,omitempty"` // "offline"
|
||||||
UserID string `json:"user_id,omitempty"`
|
UserID string `json:"user_id,omitempty"`
|
||||||
Statuses []interface{} `json:"statuses,omitempty"` // TODO
|
Statuses []any `json:"statuses,omitempty"` // TODO
|
||||||
StatusSize int `json:"status_size,omitempty"` // TODO
|
StatusSize int `json:"status_size,omitempty"` // TODO
|
||||||
Params *TaskParams `json:"params,omitempty"` // TODO
|
Params *TaskParams `json:"params,omitempty"` // TODO
|
||||||
FileID string `json:"file_id,omitempty"`
|
FileID string `json:"file_id,omitempty"`
|
||||||
FileName string `json:"file_name,omitempty"`
|
FileName string `json:"file_name,omitempty"`
|
||||||
FileSize string `json:"file_size,omitempty"`
|
FileSize string `json:"file_size,omitempty"`
|
||||||
Message string `json:"message,omitempty"` // e.g. "Saving"
|
Message string `json:"message,omitempty"` // e.g. "Saving"
|
||||||
CreatedTime Time `json:"created_time,omitempty"`
|
CreatedTime Time `json:"created_time,omitempty"`
|
||||||
UpdatedTime Time `json:"updated_time,omitempty"`
|
UpdatedTime Time `json:"updated_time,omitempty"`
|
||||||
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
ThirdTaskID string `json:"third_task_id,omitempty"` // TODO
|
||||||
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
Phase string `json:"phase,omitempty"` // e.g. "PHASE_TYPE_RUNNING"
|
||||||
Progress int `json:"progress,omitempty"`
|
Progress int `json:"progress,omitempty"`
|
||||||
IconLink string `json:"icon_link,omitempty"`
|
IconLink string `json:"icon_link,omitempty"`
|
||||||
Callback string `json:"callback,omitempty"`
|
Callback string `json:"callback,omitempty"`
|
||||||
ReferenceResource interface{} `json:"reference_resource,omitempty"` // TODO
|
ReferenceResource any `json:"reference_resource,omitempty"` // TODO
|
||||||
Space string `json:"space,omitempty"`
|
Space string `json:"space,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskParams includes parameters informing status of Task
|
// TaskParams includes parameters informing status of Task
|
||||||
|
@ -638,7 +638,7 @@ func (c *pikpakClient) SetCaptchaTokener(ctx context.Context, m configmap.Mapper
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request interface{}, response interface{}) (resp *http.Response, err error) {
|
func (c *pikpakClient) CallJSON(ctx context.Context, opts *rest.Opts, request any, response any) (resp *http.Response, err error) {
|
||||||
if c.captcha != nil {
|
if c.captcha != nil {
|
||||||
token, err := c.captcha.Token(opts)
|
token, err := c.captcha.Token(opts)
|
||||||
if err != nil || token == "" {
|
if err != nil || token == "" {
|
||||||
|
@ -1232,7 +1232,7 @@ func (f *Fs) uploadByForm(ctx context.Context, in io.Reader, name string, size i
|
|||||||
params := url.Values{}
|
params := url.Values{}
|
||||||
iVal := reflect.ValueOf(&form.MultiParts).Elem()
|
iVal := reflect.ValueOf(&form.MultiParts).Elem()
|
||||||
iTyp := iVal.Type()
|
iTyp := iVal.Type()
|
||||||
for i := 0; i < iVal.NumField(); i++ {
|
for i := range iVal.NumField() {
|
||||||
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
params.Set(iTyp.Field(i).Tag.Get("json"), iVal.Field(i).String())
|
||||||
}
|
}
|
||||||
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
formReader, contentType, overhead, err := rest.MultipartUpload(ctx, in, params, "file", name)
|
||||||
@ -1520,7 +1520,7 @@ Result:
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "addurl":
|
case "addurl":
|
||||||
if len(arg) != 1 {
|
if len(arg) != 1 {
|
||||||
|
@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -13,10 +14,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func checkStatusCode(resp *http.Response, expected ...int) error {
|
func checkStatusCode(resp *http.Response, expected ...int) error {
|
||||||
for _, code := range expected {
|
if slices.Contains(expected, resp.StatusCode) {
|
||||||
if resp.StatusCode == code {
|
return nil
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return &statusCodeError{response: resp}
|
return &statusCodeError{response: resp}
|
||||||
}
|
}
|
||||||
|
@ -332,10 +332,7 @@ func (f *Fs) sendUpload(ctx context.Context, location string, size int64, in io.
|
|||||||
var offsetMismatch bool
|
var offsetMismatch bool
|
||||||
buf := make([]byte, defaultChunkSize)
|
buf := make([]byte, defaultChunkSize)
|
||||||
for clientOffset < size {
|
for clientOffset < size {
|
||||||
chunkSize := size - clientOffset
|
chunkSize := min(size-clientOffset, int64(defaultChunkSize))
|
||||||
if chunkSize >= int64(defaultChunkSize) {
|
|
||||||
chunkSize = int64(defaultChunkSize)
|
|
||||||
}
|
|
||||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||||
chunkStart := clientOffset
|
chunkStart := clientOffset
|
||||||
reqSize := chunkSize
|
reqSize := chunkSize
|
||||||
|
@ -358,7 +358,7 @@ func (mu *multiUploader) multiPartUpload(firstBuf io.ReadSeeker) (err error) {
|
|||||||
})()
|
})()
|
||||||
|
|
||||||
ch := make(chan chunk, mu.cfg.concurrency)
|
ch := make(chan chunk, mu.cfg.concurrency)
|
||||||
for i := 0; i < mu.cfg.concurrency; i++ {
|
for range mu.cfg.concurrency {
|
||||||
mu.wg.Add(1)
|
mu.wg.Add(1)
|
||||||
go mu.readChunk(ch)
|
go mu.readChunk(ch)
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -643,10 +644,8 @@ func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, removedID := range result.IDs {
|
if slices.Contains(result.IDs, id) {
|
||||||
if removedID == id {
|
return nil
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("file %s was not deleted successfully", id)
|
return fmt.Errorf("file %s was not deleted successfully", id)
|
||||||
|
@ -59,11 +59,7 @@ func (u *UploadMemoryManager) Consume(fileID string, neededMemory int64, speed f
|
|||||||
|
|
||||||
defer func() { u.fileUsage[fileID] = borrowed }()
|
defer func() { u.fileUsage[fileID] = borrowed }()
|
||||||
|
|
||||||
effectiveChunkSize := int64(speed * u.effectiveTime.Seconds())
|
effectiveChunkSize := max(int64(speed*u.effectiveTime.Seconds()), u.reserved)
|
||||||
|
|
||||||
if effectiveChunkSize < u.reserved {
|
|
||||||
effectiveChunkSize = u.reserved
|
|
||||||
}
|
|
||||||
|
|
||||||
if neededMemory < effectiveChunkSize {
|
if neededMemory < effectiveChunkSize {
|
||||||
effectiveChunkSize = neededMemory
|
effectiveChunkSize = neededMemory
|
||||||
|
@ -19,6 +19,7 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -3097,10 +3098,8 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, e := range retryErrorCodes {
|
if slices.Contains(retryErrorCodes, httpStatusCode) {
|
||||||
if httpStatusCode == e {
|
return true, err
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Ok, not an awserr, check for generic failure conditions
|
// Ok, not an awserr, check for generic failure conditions
|
||||||
@ -3230,7 +3229,7 @@ func fixupRequest(o *s3.Options, opt *Options) {
|
|||||||
type s3logger struct{}
|
type s3logger struct{}
|
||||||
|
|
||||||
// Logf is expected to support the standard fmt package "verbs".
|
// Logf is expected to support the standard fmt package "verbs".
|
||||||
func (s3logger) Logf(classification logging.Classification, format string, v ...interface{}) {
|
func (s3logger) Logf(classification logging.Classification, format string, v ...any) {
|
||||||
switch classification {
|
switch classification {
|
||||||
default:
|
default:
|
||||||
case logging.Debug:
|
case logging.Debug:
|
||||||
@ -5253,7 +5252,7 @@ It doesn't return anything.
|
|||||||
// The result should be capable of being JSON encoded
|
// The result should be capable of being JSON encoded
|
||||||
// If it is a string or a []string it will be shown to the user
|
// If it is a string or a []string it will be shown to the user
|
||||||
// otherwise it will be JSON encoded and shown to the user like that
|
// otherwise it will be JSON encoded and shown to the user like that
|
||||||
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
|
func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out any, err error) {
|
||||||
switch name {
|
switch name {
|
||||||
case "restore":
|
case "restore":
|
||||||
req := s3.RestoreObjectInput{
|
req := s3.RestoreObjectInput{
|
||||||
|
@ -9,9 +9,9 @@ import (
|
|||||||
|
|
||||||
// Renew allows tokens to be renewed on expiry.
|
// Renew allows tokens to be renewed on expiry.
|
||||||
type Renew struct {
|
type Renew struct {
|
||||||
ts *time.Ticker // timer indicating when it's time to renew the token
|
ts *time.Ticker // timer indicating when it's time to renew the token
|
||||||
run func() error // the callback to do the renewal
|
run func() error // the callback to do the renewal
|
||||||
done chan interface{} // channel to end the go routine
|
done chan any // channel to end the go routine
|
||||||
shutdown *sync.Once
|
shutdown *sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -22,7 +22,7 @@ func NewRenew(every time.Duration, run func() error) *Renew {
|
|||||||
r := &Renew{
|
r := &Renew{
|
||||||
ts: time.NewTicker(every),
|
ts: time.NewTicker(every),
|
||||||
run: run,
|
run: run,
|
||||||
done: make(chan interface{}),
|
done: make(chan any),
|
||||||
shutdown: &sync.Once{},
|
shutdown: &sync.Once{},
|
||||||
}
|
}
|
||||||
go r.renewOnExpiry()
|
go r.renewOnExpiry()
|
||||||
|
@ -1313,7 +1313,7 @@ func (f *Fs) getCachedLibraries(ctx context.Context) ([]api.Library, error) {
|
|||||||
f.librariesMutex.Lock()
|
f.librariesMutex.Lock()
|
||||||
defer f.librariesMutex.Unlock()
|
defer f.librariesMutex.Unlock()
|
||||||
|
|
||||||
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value interface{}, ok bool, error error) {
|
libraries, err := f.libraries.Get(librariesCacheKey, func(key string) (value any, ok bool, error error) {
|
||||||
// Load the libraries if not present in the cache
|
// Load the libraries if not present in the cache
|
||||||
libraries, err := f.getLibraries(ctx)
|
libraries, err := f.getLibraries(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -89,7 +90,7 @@ func (f *Fs) newSSHSessionExternal() *sshSessionExternal {
|
|||||||
// Connect to a remote host and request the sftp subsystem via
|
// Connect to a remote host and request the sftp subsystem via
|
||||||
// the 'ssh' command. This assumes that passwordless login is
|
// the 'ssh' command. This assumes that passwordless login is
|
||||||
// correctly configured.
|
// correctly configured.
|
||||||
ssh := append([]string(nil), s.f.opt.SSH...)
|
ssh := slices.Clone(s.f.opt.SSH)
|
||||||
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
s.cmd = exec.CommandContext(ctx, ssh[0], ssh[1:]...)
|
||||||
|
|
||||||
// Allow the command a short time only to shut down
|
// Allow the command a short time only to shut down
|
||||||
|
@ -20,13 +20,13 @@ func TestStringLock(t *testing.T) {
|
|||||||
inner = 100
|
inner = 100
|
||||||
total = outer * inner
|
total = outer * inner
|
||||||
)
|
)
|
||||||
for k := 0; k < outer; k++ {
|
for range outer {
|
||||||
for j := range counter {
|
for j := range counter {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(j int) {
|
go func(j int) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
ID := fmt.Sprintf("%d", j)
|
ID := fmt.Sprintf("%d", j)
|
||||||
for i := 0; i < inner; i++ {
|
for range inner {
|
||||||
lock.Lock(ID)
|
lock.Lock(ID)
|
||||||
n := counter[j]
|
n := counter[j]
|
||||||
time.Sleep(1 * time.Millisecond)
|
time.Sleep(1 * time.Millisecond)
|
||||||
|
@ -537,7 +537,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
// Fill up (or reset) the buffer tokens
|
// Fill up (or reset) the buffer tokens
|
||||||
func (f *Fs) fillBufferTokens() {
|
func (f *Fs) fillBufferTokens() {
|
||||||
f.bufferTokens = make(chan []byte, f.ci.Transfers)
|
f.bufferTokens = make(chan []byte, f.ci.Transfers)
|
||||||
for i := 0; i < f.ci.Transfers; i++ {
|
for range f.ci.Transfers {
|
||||||
f.bufferTokens <- nil
|
f.bufferTokens <- nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -57,10 +57,7 @@ func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs
|
|||||||
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
|
return nil, fmt.Errorf("can't use method %q with newLargeUpload", info.Method)
|
||||||
}
|
}
|
||||||
|
|
||||||
threads := f.ci.Transfers
|
threads := min(f.ci.Transfers, info.MaxNumberOfThreads)
|
||||||
if threads > info.MaxNumberOfThreads {
|
|
||||||
threads = info.MaxNumberOfThreads
|
|
||||||
}
|
|
||||||
|
|
||||||
// unwrap the accounting from the input, we use wrap to put it
|
// unwrap the accounting from the input, we use wrap to put it
|
||||||
// back on after the buffering
|
// back on after the buffering
|
||||||
|
@ -337,7 +337,7 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup stray files left after failed upload
|
// Cleanup stray files left after failed upload
|
||||||
for i := 0; i < 5; i++ {
|
for range 5 {
|
||||||
cleanObj, cleanErr := f.NewObject(ctx, src.Remote())
|
cleanObj, cleanErr := f.NewObject(ctx, src.Remote())
|
||||||
if cleanErr == nil {
|
if cleanErr == nil {
|
||||||
cleanErr = cleanObj.Remove(ctx)
|
cleanErr = cleanObj.Remove(ctx)
|
||||||
|
@ -574,7 +574,7 @@ func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string,
|
|||||||
RootURL: pathID,
|
RootURL: pathID,
|
||||||
NoResponse: true,
|
NoResponse: true,
|
||||||
}
|
}
|
||||||
var mkdir interface{}
|
var mkdir any
|
||||||
if pathID == f.opt.RootID {
|
if pathID == f.opt.RootID {
|
||||||
// folders at the root are syncFolders
|
// folders at the root are syncFolders
|
||||||
mkdir = &api.CreateSyncFolder{
|
mkdir = &api.CreateSyncFolder{
|
||||||
|
@ -8,8 +8,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"maps"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"slices"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -417,10 +419,8 @@ func shouldRetry(ctx context.Context, err error) (bool, error) {
|
|||||||
}
|
}
|
||||||
// If this is a swift.Error object extract the HTTP error code
|
// If this is a swift.Error object extract the HTTP error code
|
||||||
if swiftError, ok := err.(*swift.Error); ok {
|
if swiftError, ok := err.(*swift.Error); ok {
|
||||||
for _, e := range retryErrorCodes {
|
if slices.Contains(retryErrorCodes, swiftError.StatusCode) {
|
||||||
if swiftError.StatusCode == e {
|
return true, err
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Check for generic failure conditions
|
// Check for generic failure conditions
|
||||||
@ -701,7 +701,7 @@ func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix
|
|||||||
if !recurse {
|
if !recurse {
|
||||||
opts.Delimiter = '/'
|
opts.Delimiter = '/'
|
||||||
}
|
}
|
||||||
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
|
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (any, error) {
|
||||||
var objects []swift.Object
|
var objects []swift.Object
|
||||||
var err error
|
var err error
|
||||||
err = f.pacer.Call(func() (bool, error) {
|
err = f.pacer.Call(func() (bool, error) {
|
||||||
@ -1378,9 +1378,7 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
|||||||
meta := o.headers.ObjectMetadata()
|
meta := o.headers.ObjectMetadata()
|
||||||
meta.SetModTime(modTime)
|
meta.SetModTime(modTime)
|
||||||
newHeaders := meta.ObjectHeaders()
|
newHeaders := meta.ObjectHeaders()
|
||||||
for k, v := range newHeaders {
|
maps.Copy(o.headers, newHeaders)
|
||||||
o.headers[k] = v
|
|
||||||
}
|
|
||||||
// Include any other metadata from request
|
// Include any other metadata from request
|
||||||
for k, v := range o.headers {
|
for k, v := range o.headers {
|
||||||
if strings.HasPrefix(k, "X-Object-") {
|
if strings.HasPrefix(k, "X-Object-") {
|
||||||
@ -1450,7 +1448,7 @@ func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string
|
|||||||
// encoded but we need '&' encoded.
|
// encoded but we need '&' encoded.
|
||||||
func urlEncode(str string) string {
|
func urlEncode(str string) string {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
for i := 0; i < len(str); i++ {
|
for i := range len(str) {
|
||||||
c := str[i]
|
c := str[i]
|
||||||
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
|
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
|
||||||
_ = buf.WriteByte(c)
|
_ = buf.WriteByte(c)
|
||||||
|
@ -82,8 +82,8 @@ type File struct {
|
|||||||
ContentType string `json:"content_type"`
|
ContentType string `json:"content_type"`
|
||||||
Format struct {
|
Format struct {
|
||||||
} `json:"format"`
|
} `json:"format"`
|
||||||
DownloadTypes []interface{} `json:"download_types"`
|
DownloadTypes []any `json:"download_types"`
|
||||||
ThumbnailInfo []interface{} `json:"thumbnail_info"`
|
ThumbnailInfo []any `json:"thumbnail_info"`
|
||||||
PreviewInfo struct {
|
PreviewInfo struct {
|
||||||
} `json:"preview_info"`
|
} `json:"preview_info"`
|
||||||
Privacy string `json:"privacy"`
|
Privacy string `json:"privacy"`
|
||||||
|
@ -729,7 +729,7 @@ func (o *Object) Storable() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *Object) updateFileProperties(ctx context.Context, req interface{}) (err error) {
|
func (o *Object) updateFileProperties(ctx context.Context, req any) (err error) {
|
||||||
var resp *api.File
|
var resp *api.File
|
||||||
|
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
@ -887,7 +887,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
|
|
||||||
// Remove implements the mandatory method fs.Object.Remove
|
// Remove implements the mandatory method fs.Object.Remove
|
||||||
func (o *Object) Remove(ctx context.Context) error {
|
func (o *Object) Remove(ctx context.Context) error {
|
||||||
for i := 0; i < 2; i++ {
|
for range 2 {
|
||||||
// First call moves the item to recycle bin, second deletes it for good
|
// First call moves the item to recycle bin, second deletes it for good
|
||||||
var err error
|
var err error
|
||||||
opts := rest.Opts{
|
opts := rest.Opts{
|
||||||
|
@ -902,7 +902,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}
|
}
|
||||||
// Backward compatible to old config
|
// Backward compatible to old config
|
||||||
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
if len(opt.Upstreams) == 0 && len(opt.Remotes) > 0 {
|
||||||
for i := 0; i < len(opt.Remotes)-1; i++ {
|
for i := range len(opt.Remotes) - 1 {
|
||||||
opt.Remotes[i] += ":ro"
|
opt.Remotes[i] += ":ro"
|
||||||
}
|
}
|
||||||
opt.Upstreams = opt.Remotes
|
opt.Upstreams = opt.Remotes
|
||||||
@ -1045,7 +1045,7 @@ func parentDir(absPath string) string {
|
|||||||
|
|
||||||
func multithread(num int, fn func(int)) {
|
func multithread(num int, fn func(int)) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i := 0; i < num; i++ {
|
for i := range num {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
i := i
|
i := i
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -246,7 +246,7 @@ func NewFs(ctx context.Context, name string, root string, config configmap.Mappe
|
|||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) decodeError(resp *http.Response, response interface{}) (err error) {
|
func (f *Fs) decodeError(resp *http.Response, response any) (err error) {
|
||||||
defer fs.CheckClose(resp.Body, &err)
|
defer fs.CheckClose(resp.Body, &err)
|
||||||
|
|
||||||
body, err := io.ReadAll(resp.Body)
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
@ -112,12 +112,8 @@ func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, pa
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
contentLength := chunkSize
|
|
||||||
|
|
||||||
// Last chunk may be smaller
|
// Last chunk may be smaller
|
||||||
if size-offset < contentLength {
|
contentLength := min(size-offset, chunkSize)
|
||||||
contentLength = size - offset
|
|
||||||
}
|
|
||||||
|
|
||||||
endOffset := offset + contentLength - 1
|
endOffset := offset + contentLength - 1
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ func (ca *CookieAuth) getSPToken(ctx context.Context) (conf *SharepointSuccessRe
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
reqData := map[string]interface{}{
|
reqData := map[string]any{
|
||||||
"Username": ca.user,
|
"Username": ca.user,
|
||||||
"Password": ca.pass,
|
"Password": ca.pass,
|
||||||
"Address": ca.endpoint,
|
"Address": ca.endpoint,
|
||||||
|
@ -23,20 +23,20 @@ type ResourceInfoRequestOptions struct {
|
|||||||
|
|
||||||
// ResourceInfoResponse struct is returned by the API for metadata requests.
|
// ResourceInfoResponse struct is returned by the API for metadata requests.
|
||||||
type ResourceInfoResponse struct {
|
type ResourceInfoResponse struct {
|
||||||
PublicKey string `json:"public_key"`
|
PublicKey string `json:"public_key"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Created string `json:"created"`
|
Created string `json:"created"`
|
||||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
CustomProperties map[string]any `json:"custom_properties"`
|
||||||
Preview string `json:"preview"`
|
Preview string `json:"preview"`
|
||||||
PublicURL string `json:"public_url"`
|
PublicURL string `json:"public_url"`
|
||||||
OriginPath string `json:"origin_path"`
|
OriginPath string `json:"origin_path"`
|
||||||
Modified string `json:"modified"`
|
Modified string `json:"modified"`
|
||||||
Path string `json:"path"`
|
Path string `json:"path"`
|
||||||
Md5 string `json:"md5"`
|
Md5 string `json:"md5"`
|
||||||
ResourceType string `json:"type"`
|
ResourceType string `json:"type"`
|
||||||
MimeType string `json:"mime_type"`
|
MimeType string `json:"mime_type"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Embedded *ResourceListResponse `json:"_embedded"`
|
Embedded *ResourceListResponse `json:"_embedded"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceListResponse struct
|
// ResourceListResponse struct
|
||||||
@ -64,7 +64,7 @@ type AsyncStatus struct {
|
|||||||
|
|
||||||
// CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
// CustomPropertyResponse struct we send and is returned by the API for CustomProperty request.
|
||||||
type CustomPropertyResponse struct {
|
type CustomPropertyResponse struct {
|
||||||
CustomProperties map[string]interface{} `json:"custom_properties"`
|
CustomProperties map[string]any `json:"custom_properties"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SortMode struct - sort mode
|
// SortMode struct - sort mode
|
||||||
|
@ -1024,7 +1024,7 @@ func (o *Object) setCustomProperty(ctx context.Context, property string, value s
|
|||||||
}
|
}
|
||||||
|
|
||||||
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
opts.Parameters.Set("path", o.fs.opt.Enc.FromStandardPath(o.filePath()))
|
||||||
rcm := map[string]interface{}{
|
rcm := map[string]any{
|
||||||
property: value,
|
property: value,
|
||||||
}
|
}
|
||||||
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
|
cpr := api.CustomPropertyResponse{CustomProperties: rcm}
|
||||||
|
@ -82,7 +82,7 @@ Note to run these commands on a running backend then see
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// Run the command
|
// Run the command
|
||||||
var out interface{}
|
var out any
|
||||||
switch name {
|
switch name {
|
||||||
case "help":
|
case "help":
|
||||||
return showHelp(fsInfo)
|
return showHelp(fsInfo)
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Names comprises a set of file names
|
// Names comprises a set of file names
|
||||||
type Names map[string]interface{}
|
type Names map[string]any
|
||||||
|
|
||||||
// ToNames converts string slice to a set of names
|
// ToNames converts string slice to a set of names
|
||||||
func ToNames(list []string) Names {
|
func ToNames(list []string) Names {
|
||||||
|
@ -627,7 +627,7 @@ func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
|
|||||||
testFunc := func() {
|
testFunc := func() {
|
||||||
src := filepath.Join(b.dataDir, "file7.txt")
|
src := filepath.Join(b.dataDir, "file7.txt")
|
||||||
|
|
||||||
for i := 0; i < 50; i++ {
|
for i := range 50 {
|
||||||
dst := "file" + fmt.Sprint(i) + ".txt"
|
dst := "file" + fmt.Sprint(i) + ".txt"
|
||||||
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
err := b.copyFile(ctx, src, b.replaceHex(b.path2), dst)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1606,7 +1606,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||||||
s = pathReplacer.Replace(strings.TrimSpace(s))
|
s = pathReplacer.Replace(strings.TrimSpace(s))
|
||||||
|
|
||||||
// Apply regular expression replacements
|
// Apply regular expression replacements
|
||||||
for i := 0; i < len(repFrom); i++ {
|
for i := range repFrom {
|
||||||
s = repFrom[i].ReplaceAllString(s, repTo[i])
|
s = repFrom[i].ReplaceAllString(s, repTo[i])
|
||||||
}
|
}
|
||||||
s = strings.TrimSpace(s)
|
s = strings.TrimSpace(s)
|
||||||
@ -1621,7 +1621,7 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
|
|||||||
// Sort consecutive groups of naturally unordered lines.
|
// Sort consecutive groups of naturally unordered lines.
|
||||||
// Any such group must end before the log ends or it might be lost.
|
// Any such group must end before the log ends or it might be lost.
|
||||||
absorbed := false
|
absorbed := false
|
||||||
for i := 0; i < len(dampers); i++ {
|
for i := range dampers {
|
||||||
match := false
|
match := false
|
||||||
if s != "" && !absorbed {
|
if s != "" && !absorbed {
|
||||||
match = hoppers[i].MatchString(s)
|
match = hoppers[i].MatchString(s)
|
||||||
@ -1869,7 +1869,7 @@ func fileType(fileName string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// logPrintf prints a message to stdout and to the test log
|
// logPrintf prints a message to stdout and to the test log
|
||||||
func (b *bisyncTest) logPrintf(text string, args ...interface{}) {
|
func (b *bisyncTest) logPrintf(text string, args ...any) {
|
||||||
line := fmt.Sprintf(text, args...)
|
line := fmt.Sprintf(text, args...)
|
||||||
fs.Log(nil, line)
|
fs.Log(nil, line)
|
||||||
if b.logFile != nil {
|
if b.logFile != nil {
|
||||||
@ -1936,7 +1936,7 @@ func ctxNoDsStore(ctx context.Context, t *testing.T) (context.Context, *filter.F
|
|||||||
return ctxNoDsStore, fi
|
return ctxNoDsStore, fi
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkError(t *testing.T, err error, msgAndArgs ...interface{}) {
|
func checkError(t *testing.T, err error, msgAndArgs ...any) {
|
||||||
if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
|
if errors.Is(err, fs.ErrorCantUploadEmptyFiles) {
|
||||||
t.Skipf("Skip test because remote cannot upload empty files")
|
t.Skipf("Skip test because remote cannot upload empty files")
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/rclone/rclone/lib/terminal"
|
"github.com/rclone/rclone/lib/terminal"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *bisyncRun) indentf(tag, file, format string, args ...interface{}) {
|
func (b *bisyncRun) indentf(tag, file, format string, args ...any) {
|
||||||
b.indent(tag, file, fmt.Sprintf(format, args...))
|
b.indent(tag, file, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -524,7 +524,7 @@ func (b *bisyncRun) testFn() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bisyncRun) handleErr(o interface{}, msg string, err error, critical, retryable bool) {
|
func (b *bisyncRun) handleErr(o any, msg string, err error, critical, retryable bool) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if retryable {
|
if retryable {
|
||||||
b.retryable = true
|
b.retryable = true
|
||||||
@ -624,7 +624,7 @@ func (b *bisyncRun) debugFn(nametocheck string, fn func()) {
|
|||||||
// waitFor runs fn() until it returns true or the timeout expires
|
// waitFor runs fn() until it returns true or the timeout expires
|
||||||
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
func waitFor(msg string, totalWait time.Duration, fn func() bool) (ok bool) {
|
||||||
const individualWait = 1 * time.Second
|
const individualWait = 1 * time.Second
|
||||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
for i := range int(totalWait / individualWait) {
|
||||||
ok = fn()
|
ok = fn()
|
||||||
if ok {
|
if ok {
|
||||||
return ok
|
return ok
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
@ -282,11 +283,8 @@ func (s *server) handleInitRemote() error {
|
|||||||
|
|
||||||
if s.configRcloneRemoteName != ":local" {
|
if s.configRcloneRemoteName != ":local" {
|
||||||
var remoteExists bool
|
var remoteExists bool
|
||||||
for _, remoteName := range config.FileSections() {
|
if slices.Contains(config.FileSections(), trimmedName) {
|
||||||
if remoteName == trimmedName {
|
remoteExists = true
|
||||||
remoteExists = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if !remoteExists {
|
if !remoteExists {
|
||||||
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
|
s.sendMsg("INITREMOTE-FAILURE remote does not exist: " + s.configRcloneRemoteName)
|
||||||
|
@ -273,7 +273,7 @@ func showBackends() {
|
|||||||
fmt.Printf(" rclone help backend <name>\n")
|
fmt.Printf(" rclone help backend <name>\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func quoteString(v interface{}) string {
|
func quoteString(v any) string {
|
||||||
switch v.(type) {
|
switch v.(type) {
|
||||||
case string:
|
case string:
|
||||||
return fmt.Sprintf("%q", v)
|
return fmt.Sprintf("%q", v)
|
||||||
|
@ -78,7 +78,7 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
|||||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||||
|
|
||||||
if opt.DebugFUSE {
|
if opt.DebugFUSE {
|
||||||
fuse.Debug = func(msg interface{}) {
|
fuse.Debug = func(msg any) {
|
||||||
fs.Debugf("fuse", "%v", msg)
|
fs.Debugf("fuse", "%v", msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ func (u *UI) Print(x, y int, style tcell.Style, msg string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Printf a string
|
// Printf a string
|
||||||
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...interface{}) {
|
func (u *UI) Printf(x, y int, style tcell.Style, format string, args ...any) {
|
||||||
s := fmt.Sprintf(format, args...)
|
s := fmt.Sprintf(format, args...)
|
||||||
u.Print(x, y, style, s)
|
u.Print(x, y, style, s)
|
||||||
}
|
}
|
||||||
@ -207,7 +207,7 @@ func (u *UI) Line(x, y, xmax int, style tcell.Style, spacer rune, msg string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Linef a string
|
// Linef a string
|
||||||
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...interface{}) {
|
func (u *UI) Linef(x, y, xmax int, style tcell.Style, spacer rune, format string, args ...any) {
|
||||||
s := fmt.Sprintf(format, args...)
|
s := fmt.Sprintf(format, args...)
|
||||||
u.Line(x, y, xmax, style, spacer, s)
|
u.Line(x, y, xmax, style, spacer, s)
|
||||||
}
|
}
|
||||||
@ -273,11 +273,7 @@ func (u *UI) Box() {
|
|||||||
xmax := x + boxWidth
|
xmax := x + boxWidth
|
||||||
if len(u.boxMenu) != 0 {
|
if len(u.boxMenu) != 0 {
|
||||||
count := lineOptionLength(u.boxMenu)
|
count := lineOptionLength(u.boxMenu)
|
||||||
if x+boxWidth > x+count {
|
xmax = max(x+boxWidth, x+count)
|
||||||
xmax = x + boxWidth
|
|
||||||
} else {
|
|
||||||
xmax = x + count
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
ymax := y + len(u.boxText)
|
ymax := y + len(u.boxText)
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -111,7 +112,7 @@ func newDir(parent *Dir, dirPath string, entries fs.DirEntries, err error) *Dir
|
|||||||
|
|
||||||
// Entries returns a copy of the entries in the directory
|
// Entries returns a copy of the entries in the directory
|
||||||
func (d *Dir) Entries() fs.DirEntries {
|
func (d *Dir) Entries() fs.DirEntries {
|
||||||
return append(fs.DirEntries(nil), d.entries...)
|
return slices.Clone(d.entries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove removes the i-th entry from the
|
// Remove removes the i-th entry from the
|
||||||
@ -146,7 +147,7 @@ func (d *Dir) remove(i int) {
|
|||||||
d.size -= size
|
d.size -= size
|
||||||
d.count -= count
|
d.count -= count
|
||||||
d.countUnknownSize -= countUnknownSize
|
d.countUnknownSize -= countUnknownSize
|
||||||
d.entries = append(d.entries[:i], d.entries[i+1:]...)
|
d.entries = slices.Delete(d.entries, i, i+1)
|
||||||
|
|
||||||
dir := d
|
dir := d
|
||||||
// populate changed size and count to parent(s)
|
// populate changed size and count to parent(s)
|
||||||
|
@ -40,7 +40,7 @@ func startProgress() func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Intercept output from functions such as HashLister to stdout
|
// Intercept output from functions such as HashLister to stdout
|
||||||
operations.SyncPrintf = func(format string, a ...interface{}) {
|
operations.SyncPrintf = func(format string, a ...any) {
|
||||||
printProgress(fmt.Sprintf(format, a...))
|
printProgress(fmt.Sprintf(format, a...))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ func printProgress(logMessage string) {
|
|||||||
out(terminal.MoveUp)
|
out(terminal.MoveUp)
|
||||||
}
|
}
|
||||||
// Move to the start of the block we wrote erasing all the previous lines
|
// Move to the start of the block we wrote erasing all the previous lines
|
||||||
for i := 0; i < nlines-1; i++ {
|
for range nlines - 1 {
|
||||||
out(terminal.EraseLine)
|
out(terminal.EraseLine)
|
||||||
out(terminal.MoveUp)
|
out(terminal.MoveUp)
|
||||||
}
|
}
|
||||||
|
@ -312,12 +312,12 @@ func list(ctx context.Context) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to list: %w", err)
|
return fmt.Errorf("failed to list: %w", err)
|
||||||
}
|
}
|
||||||
commands, ok := list["commands"].([]interface{})
|
commands, ok := list["commands"].([]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("bad JSON")
|
return errors.New("bad JSON")
|
||||||
}
|
}
|
||||||
for _, command := range commands {
|
for _, command := range commands {
|
||||||
info, ok := command.(map[string]interface{})
|
info, ok := command.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
return errors.New("bad JSON")
|
return errors.New("bad JSON")
|
||||||
}
|
}
|
||||||
|
@ -327,7 +327,7 @@ func makeRandomExeName(baseName, extension string) (string, error) {
|
|||||||
extension += ".exe"
|
extension += ".exe"
|
||||||
}
|
}
|
||||||
|
|
||||||
for attempt := 0; attempt < maxAttempts; attempt++ {
|
for range maxAttempts {
|
||||||
filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension)
|
filename := fmt.Sprintf("%s.%s.%s", baseName, random.String(4), extension)
|
||||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||||
return filename, nil
|
return filename, nil
|
||||||
|
@ -34,7 +34,7 @@ var mediaMimeTypeRegexp = regexp.MustCompile("^(video|audio|image)/")
|
|||||||
|
|
||||||
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
// Turns the given entry and DMS host into a UPnP object. A nil object is
|
||||||
// returned if the entry is not of interest.
|
// returned if the entry is not of interest.
|
||||||
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret interface{}, err error) {
|
func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fileInfo vfs.Node, resources vfs.Nodes, host string) (ret any, err error) {
|
||||||
obj := upnpav.Object{
|
obj := upnpav.Object{
|
||||||
ID: cdsObject.ID(),
|
ID: cdsObject.ID(),
|
||||||
Restricted: 1,
|
Restricted: 1,
|
||||||
@ -127,7 +127,7 @@ func (cds *contentDirectoryService) cdsObjectToUpnpavObject(cdsObject object, fi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns all the upnpav objects in a directory.
|
// Returns all the upnpav objects in a directory.
|
||||||
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []interface{}, err error) {
|
func (cds *contentDirectoryService) readContainer(o object, host string) (ret []any, err error) {
|
||||||
node, err := cds.vfs.Stat(o.Path)
|
node, err := cds.vfs.Stat(o.Path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
@ -295,10 +295,7 @@ func (cds *contentDirectoryService) Handle(action string, argsXML []byte, r *htt
|
|||||||
}
|
}
|
||||||
totalMatches := len(objs)
|
totalMatches := len(objs)
|
||||||
objs = objs[func() (low int) {
|
objs = objs[func() (low int) {
|
||||||
low = browse.StartingIndex
|
low = min(browse.StartingIndex, len(objs))
|
||||||
if low > len(objs) {
|
|
||||||
low = len(objs)
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}():]
|
}():]
|
||||||
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
|
if browse.RequestedCount != 0 && browse.RequestedCount < len(objs) {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user