s3: add in config for all the supported S3 providers #2140

These are AWS, Ceph, Dreamhost, IBM COS S3, Minio, Wasabi and Other.

This configures endpoints where known and makes sure config doesn't
appear where it isn't valid where possible.
This commit is contained in:
Nick Craig-Wood 2018-04-13 16:08:00 +01:00
parent 8c3740c2c5
commit dc247d21ff
2 changed files with 542 additions and 516 deletions

View File

@ -54,60 +54,53 @@ func init() {
Description: "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)",
NewFs: NewFs,
Options: []fs.Option{{
Name: "Provider",
Help: "Choose the S3 provider.",
Examples: []fs.OptionExample{
{
Name: fs.ConfigProvider,
Help: "Choose your S3 provider.",
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Choose this option to configure Storage to AWS S3",
},
{
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Ceph",
Help: "Choose this option to configure Storage to Ceph Systems",
},
{
Help: "Ceph Object Storage",
}, {
Value: "DigitalOcean",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: " Choose this option to configure Storage to Dreamhost",
},
{
Help: "Dreamhost DreamObjects",
}, {
Value: "IBMCOS",
Help: "Choose this option to the configure Storage to IBM COS S3",
},
{
Help: "IBM COS S3",
}, {
Value: "Minio",
Help: "Choose this option to the configure Storage to Minio",
},
{
Help: "Minio Object Storage",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Other",
Help: "Choose this option to the configure any other S3 Storage",
},
},
},
{
Help: "Any other S3 compatible provider",
}},
}, {
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Examples: []fs.OptionExample{
{
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
},
},
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID - leave blank for anonymous access or runtime credentials.",
Provider: "AWS,Ceph,Dreamhost,IBMCOS,Minio,Other",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.",
Provider: "AWS,Ceph,Dreamhost,IBMCOS,Minio,Other",
}, {
Name: "region",
Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
@ -150,14 +143,22 @@ func init() {
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "region",
Help: "Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.\nSet this and make sure you set the endpoint.",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
}, {
Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
@ -265,14 +266,38 @@ func init() {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS",
Examples: []fs.OptionExample{{
Value: "objects-us-west-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi Object Storage",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region. Used when creating buckets only.",
Provider: "AWS,Ceph,Dreamhost,Minio",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
@ -414,34 +439,37 @@ func init() {
Value: "tor01-flex",
Help: "Toronto Flex",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region. Leave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS",
}, {
Name: "acl",
Help: "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl",
Provider: "AWS,Ceph,Dreamhost,IBMCOS,Minio,Other",
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-full-control",
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "!IBMCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
@ -462,7 +490,7 @@ func init() {
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
@ -473,7 +501,7 @@ func init() {
}, {
Name: "storage_class",
Help: "The storage class to use when storing objects in S3.",
Provider: "AWS,Ceph,Dreamhost,Minio,Other",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",

View File

@ -6,19 +6,42 @@ date: "2016-07-11"
<i class="fa fa-amazon"></i> Amazon S3 Storage Providers
--------------------------------------------------------
* {{< provider name="AWS S3" home="https://aws.amazon.com/s3/" config="/s3/" >}}
The S3 backend can be used with a number of different providers:
* {{< provider name="AWS S3" home="https://aws.amazon.com/s3/" config="/s3/#amazon-s3" >}}
* {{< provider name="Ceph" home="http://ceph.com/" config="/s3/#ceph" >}}
* {{< provider name="DigitalOcean Spaces" home="https://www.digitalocean.com/products/object-storage/" config="/s3/#digitalocean-spaces" >}}
* {{< provider name="Dreamhost" home="https://www.dreamhost.com/cloud/storage/" config="/s3/#dreamhost" >}}
* {{< provider name="IBM COS S3" home="http://www.ibm.com/cloud/object-storage" config="/s3/#ibm-cos-s3" >}}
* {{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
* {{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
## AWS S3 {#amazon-s3}
Paths are specified as `remote:bucket` (or `remote:` for the `lsd`
command.) You may put subdirectories in too, eg `remote:bucket/path/to/dir`.
Once you have made a remote (see the provider specific section above)
you can use it like this:
See all buckets
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any excess
files in the bucket.
rclone sync /home/local/directory remote:bucket
## AWS S3 {#amazon-s3}
Here is an example of making an s3 configuration. First run
rclone config
@ -38,27 +61,33 @@ Choose a number from below, or type in your own value
\ "alias"
2 / Amazon Drive
\ "amazon cloud drive"
3 / Amazon S3 Complaint Storage Providers (Dreamhost, Ceph, Minio, IBM COS)
3 / Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)
\ "s3"
4 / Backblaze B2
\ "b2"
[snip]
23 / http Connection
\ "http"
Storage> 3
Choose the S3 provider.
Storage> s3
Choose your S3 provider.
Choose a number from below, or type in your own value
1 / Choose this option to configure Storage to AWS S3
1 / Amazon Web Services (AWS) S3
\ "AWS"
2 / Choose this option to configure Storage to Ceph Systems
2 / Ceph Object Storage
\ "Ceph"
3 / Choose this option to configure Storage to Dreamhost
3 / Digital Ocean Spaces
\ "DigitalOcean"
4 / Dreamhost DreamObjects
\ "Dreamhost"
4 / Choose this option to the configure Storage to IBM COS S3
5 / IBM COS S3
\ "IBMCOS"
5 / Choose this option to the configure Storage to Minio
6 / Minio Object Storage
\ "Minio"
Provider>1
7 / Wasabi Object Storage
\ "Wasabi"
8 / Any other S3 compatible provider
\ "Other"
provider> 1
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own value
1 / Enter AWS credentials in the next step
@ -70,7 +99,7 @@ AWS Access Key ID - leave blank for anonymous access or runtime credentials.
access_key_id> XXX
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
secret_access_key> YYY
Region to connect to. Leave blank if you are using an S3 clone and you don't have a region.
Region to connect to.
Choose a number from below, or type in your own value
/ The default endpoint - a good choice if you are unsure.
1 | US Region, Northern Virginia or Pacific Northwest.
@ -115,13 +144,9 @@ Choose a number from below, or type in your own value
/ South America (Sao Paulo) Region
14 | Needs location constraint sa-east-1.
\ "sa-east-1"
/ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.
15 | Set this and make sure you set the endpoint.
\ "other-v2-signature"
region> 1
Endpoint for S3 API.
Leave blank if using AWS to use the default endpoint for the region.
Specify if using an S3 clone such as Ceph.
endpoint>
Location constraint - must be set to match the Region. Used when creating buckets only.
Choose a number from below, or type in your own value
@ -196,6 +221,8 @@ storage_class> 1
Remote config
--------------------
[remote]
type = s3
provider = AWS
env_auth = false
access_key_id = XXX
secret_access_key = YYY
@ -209,28 +236,9 @@ storage_class =
y) Yes this is OK
e) Edit this remote
d) Delete this remote
y/e/d> y
y/e/d>
```
This remote is called `remote` and can now be used like this
See all buckets
rclone lsd remote:
Make a new bucket
rclone mkdir remote:bucket
List the contents of a bucket
rclone ls remote:bucket
Sync `/home/local/directory` to the remote bucket, deleting any excess
files in the bucket.
rclone sync /home/local/directory remote:bucket
### --fast-list ###
This remote supports `--fast-list` which allows you to use fewer
@ -270,6 +278,7 @@ you will get an error, `incorrect region, the bucket is not in 'XXX'
region`.
### Authentication ###
There are two ways to supply `rclone` with a set of AWS
credentials. In order of precedence:
@ -282,8 +291,8 @@ credentials. In order of precedence:
- Access Key ID: `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY`
- Secret Access Key: `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`
- Session Token: `AWS_SESSION_TOKEN`
- Running `rclone` in an ECS task with an IAM role
- Running `rclone` on an EC2 instance with an IAM role
- Running `rclone` in an ECS task with an IAM role (AWS only)
- Running `rclone` on an EC2 instance with an IAM role (AWS only)
If none of these option actually end up providing `rclone` with AWS
credentials then S3 interaction will be non-authenticated (see below).
@ -387,44 +396,25 @@ Note that 2 chunks of this size are buffered in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.
### Anonymous access to public buckets ###
If you want to use rclone to access a public bucket, configure with a
blank `access_key_id` and `secret_access_key`. Eg
blank `access_key_id` and `secret_access_key`. Your config should end
up looking like this:
```
No remotes found - make a new one
n) New remote
q) Quit config
n/q> n
name> anons3
What type of source is it?
Choose a number from below
1) amazon cloud drive
2) b2
3) drive
4) dropbox
5) google cloud storage
6) swift
7) hubic
8) local
9) onedrive
10) s3
11) yandex
type> 10
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). Only applies if access_key_id and secret_access_key is blank.
Choose a number from below, or type in your own value
* Enter AWS credentials in the next step
1) false
* Get AWS credentials from the environment (env vars or IAM)
2) true
env_auth> 1
AWS Access Key ID - leave blank for anonymous access or runtime credentials.
access_key_id>
AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials.
secret_access_key>
...
[anons3]
type = s3
provider = AWS
env_auth = false
access_key_id =
secret_access_key =
region = us-east-1
endpoint =
location_constraint =
acl = private
server_side_encryption =
storage_class =
```
Then use it as normal with the name of the public bucket, eg
@ -447,6 +437,7 @@ your config:
```
[ceph]
type = s3
provider = Ceph
env_auth = false
access_key_id = XXX
secret_access_key = YYY
@ -494,6 +485,8 @@ your config:
```
[dreamobjects]
type = s3
provider = DreamHost
env_auth = false
access_key_id = your_access_key
secret_access_key = your_secret_key
@ -505,7 +498,6 @@ server_side_encryption =
storage_class =
```
### DigitalOcean Spaces ###
[Spaces](https://www.digitalocean.com/products/object-storage/) is an [S3-interoperable](https://developers.digitalocean.com/documentation/spaces/) object storage service from cloud provider DigitalOcean.
@ -533,6 +525,7 @@ The resulting configuration file should look like:
```
[spaces]
type = s3
provider = DigitalOcean
env_auth = false
access_key_id = YOUR_ACCESS_KEY
secret_access_key = YOUR_SECRET_KEY
@ -552,6 +545,7 @@ rclone copy /path/to/files spaces:my-new-space
```
### IBM COS (S3) ###
Information stored with IBM Cloud Object Storage is encrypted and dispersed across multiple geographic locations, and accessed through an implementation of the S3 API. This service makes use of the distributed storage technologies provided by IBMs Cloud Object Storage System (formerly Cleversafe). For more information visit: (http://www.ibm.com/cloud/object-storage)
To configure access to IBM COS S3, follow the steps below:
@ -772,6 +766,8 @@ Which makes the config file look like this
```
[minio]
type = s3
provider = Minio
env_auth = false
access_key_id = USWUXHGYZQYFYFFIT3RE
secret_access_key = MOJRH0mkL1IPauahWITSVvyDrQbEEIwljvmxdq03
@ -888,10 +884,12 @@ This will leave the config file looking like this.
```
[wasabi]
type = s3
provider = Wasabi
env_auth = false
access_key_id = YOURACCESSKEY
secret_access_key = YOURSECRETACCESSKEY
region = us-east-1
region =
endpoint = s3.wasabisys.com
location_constraint =
acl =