From 69897b97fb982f6c23ff6c8633744ae342dac806 Mon Sep 17 00:00:00 2001 From: Simon Bos Date: Sun, 6 Oct 2024 14:11:11 +0200 Subject: [PATCH 01/15] dlna: fix loggingResponseWriter disregarding log level --- cmd/serve/dlna/dlna_util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/serve/dlna/dlna_util.go b/cmd/serve/dlna/dlna_util.go index ca61488af..c8d931009 100644 --- a/cmd/serve/dlna/dlna_util.go +++ b/cmd/serve/dlna/dlna_util.go @@ -108,7 +108,7 @@ func (lrw *loggingResponseWriter) logRequest(code int, err interface{}) { err = "" } - fs.LogPrintf(level, lrw.request.URL, "%s %s %d %s %s", + fs.LogLevelPrintf(level, lrw.request.URL, "%s %s %d %s %s", lrw.request.RemoteAddr, lrw.request.Method, code, lrw.request.Header.Get("SOAPACTION"), err) } From 589458d1feaf8634ac74afdde459b25dbecd6821 Mon Sep 17 00:00:00 2001 From: Alexandre Hamez <199517+ahamez@users.noreply.github.com> Date: Thu, 10 Oct 2024 09:57:43 +0200 Subject: [PATCH 02/15] docs: fix Scaleway Glacier website URL --- docs/content/s3.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/s3.md b/docs/content/s3.md index d5192542f..0a0f553f1 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -3654,8 +3654,8 @@ chunk_size = 5M copy_cutoff = 5M ``` -[C14 Cold Storage](https://www.online.net/en/storage/c14-cold-storage) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`. -So you can configure your remote with the `storage_class = GLACIER` option to upload directly to C14. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above) +[Scaleway Glacier](https://www.scaleway.com/en/glacier-cold-storage/) is the low-cost S3 Glacier alternative from Scaleway and it works the same way as on S3 by accepting the "GLACIER" `storage_class`. +So you can configure your remote with the `storage_class = GLACIER` option to upload directly to Scaleway Glacier. Don't forget that in this state you can't read files back after, you will need to restore them to "STANDARD" storage_class first before being able to read them (see "restore" section above) ### Seagate Lyve Cloud {#lyve} From 3e2c0f8c045a5cea804a0267d6fab09f9ec32ae4 Mon Sep 17 00:00:00 2001 From: Randy Bush Date: Mon, 14 Oct 2024 04:25:25 -0700 Subject: [PATCH 03/15] docs: fix forward refs in step 9 of using your own client id --- docs/content/drive.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/content/drive.md b/docs/content/drive.md index 444886677..aa8734f82 100644 --- a/docs/content/drive.md +++ b/docs/content/drive.md @@ -1810,9 +1810,9 @@ then select "OAuth client ID". 9. It will show you a client ID and client secret. Make a note of these. - (If you selected "External" at Step 5 continue to Step 9. + (If you selected "External" at Step 5 continue to Step 10. If you chose "Internal" you don't need to publish and can skip straight to - Step 10 but your destination drive must be part of the same Google Workspace.) + Step 11 but your destination drive must be part of the same Google Workspace.) 10. Go to "Oauth consent screen" and then click "PUBLISH APP" button and confirm. You will also want to add yourself as a test user. From a19ddffe92b905ec535758f5f9fe6cd6451f806b Mon Sep 17 00:00:00 2001 From: Diego Monti Date: Sun, 13 Oct 2024 18:35:40 +0200 Subject: [PATCH 04/15] s3: add Wasabi eu-south-1 region Ref. https://docs.wasabi.com/docs/what-are-the-service-urls-for-wasabi-s-different-storage-regions --- backend/s3/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index 38c62c811..ccec63e89 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -1423,6 +1423,10 @@ func init() { Value: "s3.eu-west-2.wasabisys.com", Help: "Wasabi EU West 2 (Paris)", Provider: "Wasabi", + }, { + Value: "s3.eu-south-1.wasabisys.com", + Help: "Wasabi EU South 1 (Milan)", + Provider: "Wasabi", }, { Value: "s3.ap-northeast-1.wasabisys.com", Help: "Wasabi AP Northeast 1 (Tokyo) endpoint", From 8ca7b2af072e5b8dbafbf28ad00685225d6c9f5d Mon Sep 17 00:00:00 2001 From: tgfisher Date: Mon, 21 Oct 2024 00:10:09 -0700 Subject: [PATCH 05/15] docs: mention that inline comments are not supported in a filter-file --- docs/content/filtering.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/content/filtering.md b/docs/content/filtering.md index ffc898c25..c73e41f7b 100644 --- a/docs/content/filtering.md +++ b/docs/content/filtering.md @@ -505,6 +505,8 @@ processed in. Arrange the order of filter rules with the most restrictive first and work down. +Lines starting with # or ; are ignored, and can be used to write comments. Inline comments are not supported. _Use `-vv --dump filters` to see how they appear in the final regexp._ + E.g. for `filter-file.txt`: # a sample filter rule file @@ -512,6 +514,7 @@ E.g. for `filter-file.txt`: + *.jpg + *.png + file2.avi + - /dir/tmp/** # WARNING! This text will be treated as part of the path. - /dir/Trash/** + /dir/** # exclude everything else From 11a90917ec013ddd0bf4fc35d57ad2ecfe0ce100 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 21 Oct 2024 10:14:41 +0100 Subject: [PATCH 06/15] Add Simon Bos to contributors --- docs/content/authors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/authors.md b/docs/content/authors.md index beac7491e..876a7f0d9 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -900,3 +900,4 @@ put them back in again.` >}} * lostb1t * Matthias Gatto * André Tran + * Simon Bos From 9f2c590e13e974e7a04d34483fccd8d855ac66ef Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 21 Oct 2024 10:14:41 +0100 Subject: [PATCH 07/15] Add Alexandre Hamez to contributors --- docs/content/authors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/authors.md b/docs/content/authors.md index 876a7f0d9..89a752cbe 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -901,3 +901,4 @@ put them back in again.` >}} * Matthias Gatto * André Tran * Simon Bos + * Alexandre Hamez <199517+ahamez@users.noreply.github.com> From 82a510e79346c69f6b2b9a9f5604afd4343ad848 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 21 Oct 2024 10:14:41 +0100 Subject: [PATCH 08/15] Add Randy Bush to contributors --- docs/content/authors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/authors.md b/docs/content/authors.md index 89a752cbe..07bbc47ae 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -902,3 +902,4 @@ put them back in again.` >}} * André Tran * Simon Bos * Alexandre Hamez <199517+ahamez@users.noreply.github.com> + * Randy Bush From d97492cbc329c773047feb5dc991fef33f752c51 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 21 Oct 2024 10:14:41 +0100 Subject: [PATCH 09/15] Add Diego Monti to contributors --- docs/content/authors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/authors.md b/docs/content/authors.md index 07bbc47ae..a7f7a6d85 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -903,3 +903,4 @@ put them back in again.` >}} * Simon Bos * Alexandre Hamez <199517+ahamez@users.noreply.github.com> * Randy Bush + * Diego Monti From 1b10cd3732e790d4d1820db1faa854a74613f17c Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Mon, 21 Oct 2024 10:14:41 +0100 Subject: [PATCH 10/15] Add tgfisher to contributors --- docs/content/authors.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/content/authors.md b/docs/content/authors.md index a7f7a6d85..e2a6f847c 100644 --- a/docs/content/authors.md +++ b/docs/content/authors.md @@ -904,3 +904,4 @@ put them back in again.` >}} * Alexandre Hamez <199517+ahamez@users.noreply.github.com> * Randy Bush * Diego Monti + * tgfisher From 264c9fb2c00d85cc9cf294797d72e4f2af5c931d Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Thu, 18 Jul 2024 16:40:48 +0100 Subject: [PATCH 11/15] drive: implement rclone backend rescue to rescue orphaned files Fixes #4166 --- backend/drive/drive.go | 103 ++++++++++++++++++++++++++++++++++++++--- 1 file changed, 97 insertions(+), 6 deletions(-) diff --git a/backend/drive/drive.go b/backend/drive/drive.go index f39800af0..f49603437 100644 --- a/backend/drive/drive.go +++ b/backend/drive/drive.go @@ -3559,7 +3559,8 @@ func (f *Fs) copyID(ctx context.Context, id, dest string) (err error) { return nil } -func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) { +// Run the drive query calling fn on each entry found +func (f *Fs) queryFn(ctx context.Context, query string, fn func(*drive.File)) (err error) { list := f.svc.Files.List() if query != "" { list.Q(query) @@ -3578,10 +3579,7 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er if f.rootFolderID == "appDataFolder" { list.Spaces("appDataFolder") } - fields := fmt.Sprintf("files(%s),nextPageToken,incompleteSearch", f.getFileFields(ctx)) - - var results []*drive.File for { var files *drive.FileList err = f.pacer.Call(func() (bool, error) { @@ -3589,20 +3587,66 @@ func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, er return f.shouldRetry(ctx, err) }) if err != nil { - return nil, fmt.Errorf("failed to execute query: %w", err) + return fmt.Errorf("failed to execute query: %w", err) } if files.IncompleteSearch { fs.Errorf(f, "search result INCOMPLETE") } - results = append(results, files.Files...) + for _, item := range files.Files { + fn(item) + } if files.NextPageToken == "" { break } list.PageToken(files.NextPageToken) } + return nil +} + +// Run the drive query returning the entries found +func (f *Fs) query(ctx context.Context, query string) (entries []*drive.File, err error) { + var results []*drive.File + err = f.queryFn(ctx, query, func(item *drive.File) { + results = append(results, item) + }) + if err != nil { + return nil, err + } return results, nil } +// Rescue, list or delete orphaned files +func (f *Fs) rescue(ctx context.Context, dirID string, delete bool) (err error) { + return f.queryFn(ctx, "'me' in owners and trashed=false", func(item *drive.File) { + if len(item.Parents) != 0 { + return + } + // Have found an orphaned entry + if delete { + fs.Infof(item.Name, "Deleting orphan %q into trash", item.Id) + err = f.delete(ctx, item.Id, true) + if err != nil { + fs.Errorf(item.Name, "Failed to delete orphan %q: %v", item.Id, err) + } + } else if dirID == "" { + operations.SyncPrintf("%q, %q\n", item.Name, item.Id) + } else { + fs.Infof(item.Name, "Rescuing orphan %q", item.Id) + err = f.pacer.Call(func() (bool, error) { + _, err = f.svc.Files.Update(item.Id, nil). + AddParents(dirID). + Fields(f.getFileFields(ctx)). + SupportsAllDrives(true). + Context(ctx).Do() + return f.shouldRetry(ctx, err) + }) + if err != nil { + fs.Errorf(item.Name, "Failed to rescue orphan %q: %v", item.Id, err) + } + } + }) +} + var commandHelp = []fs.CommandHelp{{ Name: "get", Short: "Get command for fetching the drive config parameters", @@ -3794,6 +3838,37 @@ The result is a JSON array of matches, for example: "webViewLink": "https://drive.google.com/file/d/0AxBe_CDEF4zkGHI4d0FjYko2QkD/view?usp=drivesdk\u0026resourcekey=0-ABCDEFGHIXJQpIGqBJq3MC" } ]`, +}, { + Name: "rescue", + Short: "Rescue or delete any orphaned files", + Long: `This command rescues or deletes any orphaned files or directories. + +Sometimes files can get orphaned in Google Drive. This means that they +are no longer in any folder in Google Drive. + +This command finds those files and either rescues them to a directory +you specify or deletes them. + +Usage: + +This can be used in 3 ways. + +First, list all orphaned files + + rclone backend rescue drive: + +Second rescue all orphaned files to the directory indicated + + rclone backend rescue drive: "relative/path/to/rescue/directory" + +e.g. To rescue all orphans to a directory called "Orphans" in the top level + + rclone backend rescue drive: Orphans + +Third delete all orphaned files to the trash + + rclone backend rescue drive: -o delete +`, }} // Command the backend to run a named command @@ -3922,6 +3997,22 @@ func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[str } else { return nil, errors.New("need a query argument") } + case "rescue": + dirID := "" + _, delete := opt["delete"] + if len(arg) == 0 { + // no arguments - list only + } else if !delete && len(arg) == 1 { + dir := arg[0] + dirID, err = f.dirCache.FindDir(ctx, dir, true) + if err != nil { + return nil, fmt.Errorf("failed to find or create rescue directory %q: %w", dir, err) + } + fs.Infof(f, "Rescuing orphans into %q", dir) + } else { + return nil, errors.New("syntax error: need 0 or 1 args or -o delete") + } + return nil, f.rescue(ctx, dirID, delete) default: return nil, fs.ErrorCommandNotFound } From 8b4b59412df4c7052c4402284daf9d998d636b4e Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Tue, 8 Oct 2024 10:35:29 +0100 Subject: [PATCH 12/15] fs: fix Don't know how to set key "chunkSize" on upload errors in tests Before this testing any backend which implemented the OpenChunkWriter gave this error: ERROR : writer-at-subdir/writer-at-file: Don't know how to set key "chunkSize" on upload This was due to the ChunkOption incorrectly rendering into HTTP headers which weren't understood by the backend. --- fs/open_options.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/open_options.go b/fs/open_options.go index cbcd98cb7..cb48a930e 100644 --- a/fs/open_options.go +++ b/fs/open_options.go @@ -293,7 +293,7 @@ type ChunkOption struct { // Header formats the option as an http header func (o *ChunkOption) Header() (key string, value string) { - return "chunkSize", fmt.Sprintf("%v", o.ChunkSize) + return "", "" } // Mandatory returns whether the option must be parsed or can be ignored From 53ff3b3b32cb904e3a45226921c8bbdfcec2e014 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Wed, 9 Oct 2024 10:08:08 +0100 Subject: [PATCH 13/15] s3: add Selectel as a provider --- README.md | 1 + backend/s3/s3.go | 31 +++++++++-- docs/content/_index.md | 1 + docs/content/s3.md | 120 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 149 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index d1e859244..1f47ee914 100644 --- a/README.md +++ b/README.md @@ -111,6 +111,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and * Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway) * Seafile [:page_facing_up:](https://rclone.org/seafile/) * SeaweedFS [:page_facing_up:](https://rclone.org/s3/#seaweedfs) + * Selectel Object Storage [:page_facing_up:](https://rclone.org/s3/#selectel) * SFTP [:page_facing_up:](https://rclone.org/sftp/) * SMB / CIFS [:page_facing_up:](https://rclone.org/smb/) * StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index ccec63e89..eda36a259 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -154,6 +154,9 @@ var providerOption = fs.Option{ }, { Value: "SeaweedFS", Help: "SeaweedFS S3", + }, { + Value: "Selectel", + Help: "Selectel Object Storage", }, { Value: "StackPath", Help: "StackPath Object Storage", @@ -551,10 +554,19 @@ func init() { Value: "tw-001", Help: "Asia (Taiwan)", }}, + }, { + // See endpoints for object storage regions: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains + Name: "region", + Help: "Region where your data stored.\n", + Provider: "Selectel", + Examples: []fs.OptionExample{{ + Value: "ru-1", + Help: "St. Petersburg", + }}, }, { Name: "region", Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.", - Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive", + Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Magalu,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive", Examples: []fs.OptionExample{{ Value: "", Help: "Use this if unsure.\nWill use v4 signatures and an empty region.", @@ -1319,10 +1331,19 @@ func init() { Value: "s3-ap-northeast-1.qiniucs.com", Help: "Northeast Asia Endpoint 1", }}, + }, { + // Selectel endpoints: https://docs.selectel.ru/en/cloud/object-storage/manage/domains/#s3-api-domains + Name: "endpoint", + Help: "Endpoint for Selectel Object Storage.", + Provider: "Selectel", + Examples: []fs.OptionExample{{ + Value: "s3.ru-1.storage.selcloud.ru", + Help: "Saint Petersburg", + }}, }, { Name: "endpoint", Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.", - Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox", + Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,MagaluCloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox", Examples: []fs.OptionExample{{ Value: "objects-us-east-1.dream.io", Help: "Dream Objects endpoint", @@ -1845,7 +1866,7 @@ func init() { }, { Name: "location_constraint", Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.", - Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox", + Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox", }, { Name: "acl", Help: `Canned ACL used when creating buckets and storing or copying objects. @@ -1860,7 +1881,7 @@ doesn't copy the ACL from the source but rather writes a fresh one. If the acl is an empty string then no X-Amz-Acl: header is added and the default (private) will be used. `, - Provider: "!Storj,Synology,Cloudflare", + Provider: "!Storj,Selectel,Synology,Cloudflare", Examples: []fs.OptionExample{{ Value: "default", Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).", @@ -3430,6 +3451,8 @@ func setQuirks(opt *Options) { } urlEncodeListings = true useAlreadyExists = true + case "Selectel": + urlEncodeListings = false case "SeaweedFS": listObjectsV2 = false // untested virtualHostStyle = false diff --git a/docs/content/_index.md b/docs/content/_index.md index 45abcfef0..711b55041 100644 --- a/docs/content/_index.md +++ b/docs/content/_index.md @@ -178,6 +178,7 @@ WebDAV or S3, that work out of the box.) {{< provider name="Seafile" home="https://www.seafile.com/" config="/seafile/" >}} {{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}} {{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}} +{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}} {{< provider name="SFTP" home="https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol" config="/sftp/" >}} {{< provider name="Sia" home="https://sia.tech/" config="/sia/" >}} {{< provider name="SMB / CIFS" home="https://en.wikipedia.org/wiki/Server_Message_Block" config="/smb/" >}} diff --git a/docs/content/s3.md b/docs/content/s3.md index 0a0f553f1..58b16b79a 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -35,6 +35,7 @@ The S3 backend can be used with a number of different providers: {{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}} {{< provider name="Seagate Lyve Cloud" home="https://www.seagate.com/gb/en/services/cloud/storage/" config="/s3/#lyve" >}} {{< provider name="SeaweedFS" home="https://github.com/chrislusf/seaweedfs/" config="/s3/#seaweedfs" >}} +{{< provider name="Selectel" home="https://selectel.ru/services/cloud/storage/" config="/s3/#selectel" >}} {{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}} {{< provider name="Storj" home="https://storj.io/" config="/s3/#storj" >}} {{< provider name="Synology C2 Object Storage" home="https://c2.synology.com/en-global/object-storage/overview" config="/s3/#synology-c2" >}} @@ -3850,6 +3851,125 @@ So once set up, for example to copy files into a bucket rclone copy /path/to/files seaweedfs_s3:foo ``` +### Selectel + +[Selectel Cloud Storage](https://selectel.ru/services/cloud/storage/) +is an S3 compatible storage system which features triple redundancy +storage, automatic scaling, high availability and a comprehensive IAM +system. + +Selectel have a section on their website for [configuring +rclone](https://docs.selectel.ru/en/cloud/object-storage/tools/rclone/) +which shows how to make the right API keys. + +From rclone v1.69 Selectel is a supported operator - please choose the +`Selectel` provider type. + +Note that you should use "vHosted" access for the buckets (which is +the recommended default), not "path style". + +You can use `rclone config` to make a new provider like this + +``` +No remotes found, make a new one? +n) New remote +s) Set configuration password +q) Quit config +n/s/q> n + +Enter name for new remote. +name> selectel + +Option Storage. +Type of storage to configure. +Choose a number from below, or type in your own value. +[snip] +XX / Amazon S3 Compliant Storage Providers including ..., Selectel, ... + \ (s3) +[snip] +Storage> s3 + +Option provider. +Choose your S3 provider. +Choose a number from below, or type in your own value. +Press Enter to leave empty. +[snip] +XX / Selectel Object Storage + \ (Selectel) +[snip] +provider> Selectel + +Option env_auth. +Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). +Only applies if access_key_id and secret_access_key is blank. +Choose a number from below, or type in your own boolean value (true or false). +Press Enter for the default (false). + 1 / Enter AWS credentials in the next step. + \ (false) + 2 / Get AWS credentials from the environment (env vars or IAM). + \ (true) +env_auth> 1 + +Option access_key_id. +AWS Access Key ID. +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +access_key_id> ACCESS_KEY + +Option secret_access_key. +AWS Secret Access Key (password). +Leave blank for anonymous access or runtime credentials. +Enter a value. Press Enter to leave empty. +secret_access_key> SECRET_ACCESS_KEY + +Option region. +Region where your data stored. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / St. Petersburg + \ (ru-1) +region> 1 + +Option endpoint. +Endpoint for Selectel Object Storage. +Choose a number from below, or type in your own value. +Press Enter to leave empty. + 1 / Saint Petersburg + \ (s3.ru-1.storage.selcloud.ru) +endpoint> 1 + +Edit advanced config? +y) Yes +n) No (default) +y/n> n + +Configuration complete. +Options: +- type: s3 +- provider: Selectel +- access_key_id: ACCESS_KEY +- secret_access_key: SECRET_ACCESS_KEY +- region: ru-1 +- endpoint: s3.ru-1.storage.selcloud.ru +Keep this "selectel" remote? +y) Yes this is OK (default) +e) Edit this remote +d) Delete this remote +y/e/d> y +``` + +And your config should end up looking like this: + +``` +[selectel] +type = s3 +provider = Selectel +access_key_id = ACCESS_KEY +secret_access_key = SECRET_ACCESS_KEY +region = ru-1 +endpoint = s3.ru-1.storage.selcloud.ru +``` + ### Wasabi [Wasabi](https://wasabi.com) is a cloud-based object storage service for a From 75257fc9cdcaba71aba29e733c75c253fcf1540d Mon Sep 17 00:00:00 2001 From: Kaloyan Raev Date: Wed, 16 Oct 2024 17:33:01 +0300 Subject: [PATCH 14/15] s3: Storj provider: fix server-side copy of files bigger than 5GB Like some other S3-compatible providers, Storj does not currently implements UploadPartCopy and returns NotImplemented errors for multi-part server side copies. This patch works around the problem by raising --s3-copy-cutoff for Storj to the maximum. This means that rclone will never use multi-part copies for files in Storj. This includes files larger than 5GB which (according to AWS documentation) must be copied with multi-part copy. This works fine for Storj. See https://github.com/storj/roadmap/issues/40 --- backend/s3/s3.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/backend/s3/s3.go b/backend/s3/s3.go index eda36a259..3c62dd3c5 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -3470,6 +3470,10 @@ func setQuirks(opt *Options) { opt.ChunkSize = 64 * fs.Mebi } useAlreadyExists = false // returns BucketAlreadyExists + // Storj doesn't support multi-part server side copy: + // https://github.com/storj/roadmap/issues/40 + // So make cutoff very large which it does support + opt.CopyCutoff = math.MaxInt64 case "Synology": useMultipartEtag = false useAlreadyExists = false // untested From 175aa07cddff5352857f54ea0fea92dc940afde7 Mon Sep 17 00:00:00 2001 From: Nick Craig-Wood Date: Thu, 3 Oct 2024 10:29:07 +0100 Subject: [PATCH 15/15] onedrive: fix Retry-After handling to look at 503 errors also According to the Microsoft docs a Retry-After header can be returned on 429 errors and 503 errors, but before this change we were only checking for it on 429 errors. See: https://forum.rclone.org/t/onedrive-503-response-retry-after-not-used/48045 --- backend/onedrive/onedrive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/onedrive/onedrive.go b/backend/onedrive/onedrive.go index 3ceac3f55..f7555017e 100644 --- a/backend/onedrive/onedrive.go +++ b/backend/onedrive/onedrive.go @@ -827,7 +827,7 @@ func shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, err retry = true fs.Debugf(nil, "HTTP 401: Unable to initialize RPS. Trying again.") } - case 429: // Too Many Requests. + case 429, 503: // Too Many Requests, Server Too Busy // see https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online if values := resp.Header["Retry-After"]; len(values) == 1 && values[0] != "" { retryAfter, parseErr := strconv.Atoi(values[0])