2015-09-22 19:47:16 +02:00
// Package s3 provides an interface to Amazon S3 oject storage
2013-06-27 21:13:07 +02:00
package s3
2013-01-08 19:53:35 +01:00
import (
2019-12-31 00:17:06 +01:00
"bytes"
2019-06-17 10:34:30 +02:00
"context"
2019-12-31 00:17:06 +01:00
"crypto/md5"
2020-10-13 18:11:22 +02:00
"crypto/tls"
2018-01-06 15:30:10 +01:00
"encoding/base64"
"encoding/hex"
2019-09-16 21:25:55 +02:00
"encoding/xml"
2013-01-08 19:53:35 +01:00
"fmt"
"io"
2016-02-01 14:11:27 +01:00
"net/http"
2019-07-23 13:24:10 +02:00
"net/url"
2013-01-08 19:53:35 +01:00
"path"
"regexp"
2019-12-31 00:17:06 +01:00
"sort"
2019-10-04 17:49:06 +02:00
"strconv"
2013-01-08 19:53:35 +01:00
"strings"
2019-11-06 11:41:03 +01:00
"sync"
2013-01-08 19:53:35 +01:00
"time"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/corehandlers"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws/credentials"
2016-02-01 14:11:27 +01:00
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
2020-01-05 20:49:31 +01:00
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
2017-11-22 22:21:36 +01:00
"github.com/aws/aws-sdk-go/aws/defaults"
2016-02-01 14:11:27 +01:00
"github.com/aws/aws-sdk-go/aws/ec2metadata"
2020-06-26 15:09:29 +02:00
"github.com/aws/aws-sdk-go/aws/endpoints"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/request"
2015-10-30 12:50:45 +01:00
"github.com/aws/aws-sdk-go/aws/session"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/service/s3"
2014-03-15 17:06:11 +01:00
"github.com/ncw/swift"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/fs/config"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
2020-06-24 12:02:34 +02:00
"github.com/rclone/rclone/fs/operations"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/walk"
2020-06-04 12:09:27 +02:00
"github.com/rclone/rclone/lib/atexit"
2019-08-09 12:29:36 +02:00
"github.com/rclone/rclone/lib/bucket"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/lib/encoder"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/pacer"
2020-02-19 11:17:25 +01:00
"github.com/rclone/rclone/lib/pool"
2019-12-31 00:17:06 +01:00
"github.com/rclone/rclone/lib/readers"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/rest"
2020-06-05 12:45:54 +02:00
"github.com/rclone/rclone/lib/structs"
2019-12-31 00:17:06 +01:00
"golang.org/x/sync/errgroup"
2014-03-15 17:06:11 +01:00
)
2013-06-27 21:13:07 +02:00
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "s3" ,
2020-10-14 00:14:36 +02:00
Description : "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS" ,
2016-02-15 19:11:53 +01:00
NewFs : NewFs ,
2020-06-24 12:02:34 +02:00
CommandHelp : commandHelp ,
2014-03-15 17:06:11 +01:00
Options : [ ] fs . Option { {
2018-04-13 17:08:00 +02:00
Name : fs . ConfigProvider ,
Help : "Choose your S3 provider." ,
Examples : [ ] fs . OptionExample { {
Value : "AWS" ,
Help : "Amazon Web Services (AWS) S3" ,
2019-01-12 17:46:45 +01:00
} , {
Value : "Alibaba" ,
Help : "Alibaba Cloud Object Storage System (OSS) formerly Aliyun" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "Ceph" ,
Help : "Ceph Object Storage" ,
} , {
Value : "DigitalOcean" ,
Help : "Digital Ocean Spaces" ,
} , {
Value : "Dreamhost" ,
Help : "Dreamhost DreamObjects" ,
} , {
Value : "IBMCOS" ,
Help : "IBM COS S3" ,
} , {
Value : "Minio" ,
Help : "Minio Object Storage" ,
2019-01-12 17:46:45 +01:00
} , {
Value : "Netease" ,
Help : "Netease Object Storage (NOS)" ,
2020-06-12 17:04:16 +02:00
} , {
Value : "Scaleway" ,
Help : "Scaleway Object Storage" ,
2020-01-31 00:21:24 +01:00
} , {
Value : "StackPath" ,
Help : "StackPath Object Storage" ,
2020-09-08 17:34:25 +02:00
} , {
Value : "TencentCOS" ,
Help : "Tencent Cloud Object Storage (COS)" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "Wasabi" ,
Help : "Wasabi Object Storage" ,
} , {
Value : "Other" ,
Help : "Any other S3 compatible provider" ,
} } ,
} , {
2018-05-14 19:06:57 +02:00
Name : "env_auth" ,
Help : "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank." ,
Default : false ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
Value : "false" ,
Help : "Enter AWS credentials in the next step" ,
} , {
Value : "true" ,
Help : "Get AWS credentials from the environment (env vars or IAM)" ,
} } ,
} , {
Name : "access_key_id" ,
2018-05-14 19:06:57 +02:00
Help : "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials." ,
2018-04-13 17:08:00 +02:00
} , {
Name : "secret_access_key" ,
2018-05-14 19:06:57 +02:00
Help : "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials." ,
2018-04-13 17:08:00 +02:00
} , {
2020-10-03 10:44:25 +02:00
// References:
// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
2018-04-13 17:08:00 +02:00
Name : "region" ,
Help : "Region to connect to." ,
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "us-east-1" ,
2020-10-13 23:43:00 +02:00
Help : "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "us-east-2" ,
Help : "US East (Ohio) Region\nNeeds location constraint us-east-2." ,
} , {
Value : "us-west-1" ,
Help : "US West (Northern California) Region\nNeeds location constraint us-west-1." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "us-west-2" ,
Help : "US West (Oregon) Region\nNeeds location constraint us-west-2." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "ca-central-1" ,
Help : "Canada (Central) Region\nNeeds location constraint ca-central-1." ,
} , {
Value : "eu-west-1" ,
Help : "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1." ,
} , {
Value : "eu-west-2" ,
Help : "EU (London) Region\nNeeds location constraint eu-west-2." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "eu-west-3" ,
Help : "EU (Paris) Region\nNeeds location constraint eu-west-3." ,
2019-02-02 20:44:26 +01:00
} , {
Value : "eu-north-1" ,
Help : "EU (Stockholm) Region\nNeeds location constraint eu-north-1." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "eu-south-1" ,
Help : "EU (Milan) Region\nNeeds location constraint eu-south-1." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "eu-central-1" ,
Help : "EU (Frankfurt) Region\nNeeds location constraint eu-central-1." ,
} , {
Value : "ap-southeast-1" ,
Help : "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1." ,
} , {
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2." ,
} , {
Value : "ap-northeast-1" ,
Help : "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1." ,
} , {
Value : "ap-northeast-2" ,
Help : "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "ap-northeast-3" ,
Help : "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "ap-south-1" ,
Help : "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1." ,
2020-01-02 12:10:48 +01:00
} , {
Value : "ap-east-1" ,
2020-09-18 13:03:13 +02:00
Help : "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "sa-east-1" ,
Help : "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "me-south-1" ,
Help : "Middle East (Bahrain) Region\nNeeds location constraint me-south-1." ,
} , {
Value : "af-south-1" ,
Help : "Africa (Cape Town) Region\nNeeds location constraint af-south-1." ,
} , {
Value : "cn-north-1" ,
Help : "China (Beijing) Region\nNeeds location constraint cn-north-1." ,
} , {
Value : "cn-northwest-1" ,
Help : "China (Ningxia) Region\nNeeds location constraint cn-northwest-1." ,
} , {
Value : "us-gov-east-1" ,
Help : "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1." ,
} , {
Value : "us-gov-west-1" ,
Help : "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1." ,
2018-04-13 17:08:00 +02:00
} } ,
2020-06-12 17:04:16 +02:00
} , {
Name : "region" ,
Help : "Region to connect to." ,
Provider : "Scaleway" ,
Examples : [ ] fs . OptionExample { {
Value : "nl-ams" ,
Help : "Amsterdam, The Netherlands" ,
} , {
Value : "fr-par" ,
Help : "Paris, France" ,
} } ,
2018-04-13 17:08:00 +02:00
} , {
Name : "region" ,
2018-05-14 19:06:57 +02:00
Help : "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region." ,
2020-09-08 17:34:25 +02:00
Provider : "!AWS,Alibaba,Scaleway,TencentCOS" ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Use this if unsure. Will use v4 signatures and an empty region." ,
} , {
Value : "other-v2-signature" ,
2020-10-13 23:49:58 +02:00
Help : "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH." ,
2018-04-13 17:08:00 +02:00
} } ,
} , {
Name : "endpoint" ,
Help : "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region." ,
Provider : "AWS" ,
} , {
Name : "endpoint" ,
Help : "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise." ,
Provider : "IBMCOS" ,
Examples : [ ] fs . OptionExample { {
2020-08-30 18:21:11 +02:00
Value : "s3.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.dal.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Dallas Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.wdc.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Washington DC Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.sjc.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region San Jose Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.dal.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Dallas Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.wdc.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region Washington DC Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.sjc.us.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Cross Region San Jose Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.us-east.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Region East Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.us-east.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Region East Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.us-south.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Region South Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.us-south.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "US Region South Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.fra.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Frankfurt Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.mil.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Milan Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.ams.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Amsterdam Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.fra.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Frankfurt Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.mil.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Milan Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.ams.eu.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "EU Cross Region Amsterdam Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.eu-gb.cloud-object-storage.appdomain.cloud" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Endpoint" ,
2018-04-13 17:08:00 +02:00
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.eu-gb.cloud-object-storage.appdomain.cloud" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Private Endpoint" ,
2018-04-13 17:08:00 +02:00
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.eu-de.cloud-object-storage.appdomain.cloud" ,
Help : "EU Region DE Endpoint" ,
} , {
Value : "s3.private.eu-de.cloud-object-storage.appdomain.cloud" ,
Help : "EU Region DE Private Endpoint" ,
} , {
Value : "s3.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.tok.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Tokyo Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.hkg.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional HongKong Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.seo.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Seoul Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.tok.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Tokyo Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional HongKong Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.seo.ap.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "APAC Cross Regional Seoul Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.jp-tok.cloud-object-storage.appdomain.cloud" ,
Help : "APAC Region Japan Endpoint" ,
} , {
Value : "s3.private.jp-tok.cloud-object-storage.appdomain.cloud" ,
Help : "APAC Region Japan Private Endpoint" ,
} , {
Value : "s3.au-syd.cloud-object-storage.appdomain.cloud" ,
Help : "APAC Region Australia Endpoint" ,
} , {
Value : "s3.private.au-syd.cloud-object-storage.appdomain.cloud" ,
Help : "APAC Region Australia Private Endpoint" ,
} , {
Value : "s3.ams03.cloud-object-storage.appdomain.cloud" ,
Help : "Amsterdam Single Site Endpoint" ,
} , {
Value : "s3.private.ams03.cloud-object-storage.appdomain.cloud" ,
Help : "Amsterdam Single Site Private Endpoint" ,
} , {
Value : "s3.che01.cloud-object-storage.appdomain.cloud" ,
Help : "Chennai Single Site Endpoint" ,
} , {
Value : "s3.private.che01.cloud-object-storage.appdomain.cloud" ,
Help : "Chennai Single Site Private Endpoint" ,
} , {
Value : "s3.mel01.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "Melbourne Single Site Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.mel01.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "Melbourne Single Site Private Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.osl01.cloud-object-storage.appdomain.cloud" ,
Help : "Oslo Single Site Endpoint" ,
} , {
Value : "s3.private.osl01.cloud-object-storage.appdomain.cloud" ,
Help : "Oslo Single Site Private Endpoint" ,
} , {
Value : "s3.tor01.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "Toronto Single Site Endpoint" ,
} , {
2020-08-30 18:21:11 +02:00
Value : "s3.private.tor01.cloud-object-storage.appdomain.cloud" ,
2018-04-13 17:08:00 +02:00
Help : "Toronto Single Site Private Endpoint" ,
2020-08-30 18:21:11 +02:00
} , {
Value : "s3.seo01.cloud-object-storage.appdomain.cloud" ,
Help : "Seoul Single Site Endpoint" ,
} , {
Value : "s3.private.seo01.cloud-object-storage.appdomain.cloud" ,
Help : "Seoul Single Site Private Endpoint" ,
} , {
Value : "s3.mon01.cloud-object-storage.appdomain.cloud" ,
Help : "Montreal Single Site Endpoint" ,
} , {
Value : "s3.private.mon01.cloud-object-storage.appdomain.cloud" ,
Help : "Montreal Single Site Private Endpoint" ,
} , {
Value : "s3.mex01.cloud-object-storage.appdomain.cloud" ,
Help : "Mexico Single Site Endpoint" ,
} , {
Value : "s3.private.mex01.cloud-object-storage.appdomain.cloud" ,
Help : "Mexico Single Site Private Endpoint" ,
} , {
Value : "s3.sjc04.cloud-object-storage.appdomain.cloud" ,
Help : "San Jose Single Site Endpoint" ,
} , {
Value : "s3.private.sjc04.cloud-object-storage.appdomain.cloud" ,
Help : "San Jose Single Site Private Endpoint" ,
} , {
Value : "s3.mil01.cloud-object-storage.appdomain.cloud" ,
Help : "Milan Single Site Endpoint" ,
} , {
Value : "s3.private.mil01.cloud-object-storage.appdomain.cloud" ,
Help : "Milan Single Site Private Endpoint" ,
} , {
Value : "s3.hkg02.cloud-object-storage.appdomain.cloud" ,
Help : "Hong Kong Single Site Endpoint" ,
} , {
Value : "s3.private.hkg02.cloud-object-storage.appdomain.cloud" ,
Help : "Hong Kong Single Site Private Endpoint" ,
} , {
Value : "s3.par01.cloud-object-storage.appdomain.cloud" ,
Help : "Paris Single Site Endpoint" ,
} , {
Value : "s3.private.par01.cloud-object-storage.appdomain.cloud" ,
Help : "Paris Single Site Private Endpoint" ,
} , {
Value : "s3.sng01.cloud-object-storage.appdomain.cloud" ,
Help : "Singapore Single Site Endpoint" ,
} , {
Value : "s3.private.sng01.cloud-object-storage.appdomain.cloud" ,
Help : "Singapore Single Site Private Endpoint" ,
2018-04-13 17:08:00 +02:00
} } ,
2019-01-12 17:46:45 +01:00
} , {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name : "endpoint" ,
Help : "Endpoint for OSS API." ,
Provider : "Alibaba" ,
Examples : [ ] fs . OptionExample { {
Value : "oss-cn-hangzhou.aliyuncs.com" ,
Help : "East China 1 (Hangzhou)" ,
} , {
Value : "oss-cn-shanghai.aliyuncs.com" ,
Help : "East China 2 (Shanghai)" ,
} , {
Value : "oss-cn-qingdao.aliyuncs.com" ,
Help : "North China 1 (Qingdao)" ,
} , {
Value : "oss-cn-beijing.aliyuncs.com" ,
Help : "North China 2 (Beijing)" ,
} , {
Value : "oss-cn-zhangjiakou.aliyuncs.com" ,
Help : "North China 3 (Zhangjiakou)" ,
} , {
Value : "oss-cn-huhehaote.aliyuncs.com" ,
Help : "North China 5 (Huhehaote)" ,
} , {
Value : "oss-cn-shenzhen.aliyuncs.com" ,
Help : "South China 1 (Shenzhen)" ,
} , {
Value : "oss-cn-hongkong.aliyuncs.com" ,
Help : "Hong Kong (Hong Kong)" ,
} , {
Value : "oss-us-west-1.aliyuncs.com" ,
Help : "US West 1 (Silicon Valley)" ,
} , {
Value : "oss-us-east-1.aliyuncs.com" ,
Help : "US East 1 (Virginia)" ,
} , {
Value : "oss-ap-southeast-1.aliyuncs.com" ,
Help : "Southeast Asia Southeast 1 (Singapore)" ,
} , {
Value : "oss-ap-southeast-2.aliyuncs.com" ,
Help : "Asia Pacific Southeast 2 (Sydney)" ,
} , {
Value : "oss-ap-southeast-3.aliyuncs.com" ,
Help : "Southeast Asia Southeast 3 (Kuala Lumpur)" ,
} , {
Value : "oss-ap-southeast-5.aliyuncs.com" ,
Help : "Asia Pacific Southeast 5 (Jakarta)" ,
} , {
Value : "oss-ap-northeast-1.aliyuncs.com" ,
Help : "Asia Pacific Northeast 1 (Japan)" ,
} , {
Value : "oss-ap-south-1.aliyuncs.com" ,
Help : "Asia Pacific South 1 (Mumbai)" ,
} , {
Value : "oss-eu-central-1.aliyuncs.com" ,
Help : "Central Europe 1 (Frankfurt)" ,
} , {
Value : "oss-eu-west-1.aliyuncs.com" ,
Help : "West Europe (London)" ,
} , {
Value : "oss-me-east-1.aliyuncs.com" ,
Help : "Middle East 1 (Dubai)" ,
} } ,
2020-06-12 17:04:16 +02:00
} , {
Name : "endpoint" ,
Help : "Endpoint for Scaleway Object Storage." ,
Provider : "Scaleway" ,
Examples : [ ] fs . OptionExample { {
Value : "s3.nl-ams.scw.cloud" ,
Help : "Amsterdam Endpoint" ,
} , {
Value : "s3.fr-par.scw.cloud" ,
Help : "Paris Endpoint" ,
} } ,
2020-01-31 00:21:24 +01:00
} , {
Name : "endpoint" ,
Help : "Endpoint for StackPath Object Storage." ,
Provider : "StackPath" ,
Examples : [ ] fs . OptionExample { {
Value : "s3.us-east-2.stackpathstorage.com" ,
Help : "US East Endpoint" ,
} , {
Value : "s3.us-west-1.stackpathstorage.com" ,
Help : "US West Endpoint" ,
} , {
Value : "s3.eu-central-1.stackpathstorage.com" ,
Help : "EU Endpoint" ,
} } ,
2020-09-08 17:34:25 +02:00
} , {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name : "endpoint" ,
Help : "Endpoint for Tencent COS API." ,
Provider : "TencentCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "cos.ap-beijing.myqcloud.com" ,
Help : "Beijing Region." ,
} , {
Value : "cos.ap-nanjing.myqcloud.com" ,
Help : "Nanjing Region." ,
} , {
Value : "cos.ap-shanghai.myqcloud.com" ,
Help : "Shanghai Region." ,
} , {
Value : "cos.ap-guangzhou.myqcloud.com" ,
Help : "Guangzhou Region." ,
} , {
Value : "cos.ap-nanjing.myqcloud.com" ,
Help : "Nanjing Region." ,
} , {
Value : "cos.ap-chengdu.myqcloud.com" ,
Help : "Chengdu Region." ,
} , {
Value : "cos.ap-chongqing.myqcloud.com" ,
Help : "Chongqing Region." ,
} , {
Value : "cos.ap-hongkong.myqcloud.com" ,
Help : "Hong Kong (China) Region." ,
} , {
Value : "cos.ap-singapore.myqcloud.com" ,
Help : "Singapore Region." ,
} , {
Value : "cos.ap-mumbai.myqcloud.com" ,
Help : "Mumbai Region." ,
} , {
Value : "cos.ap-seoul.myqcloud.com" ,
Help : "Seoul Region." ,
} , {
Value : "cos.ap-bangkok.myqcloud.com" ,
Help : "Bangkok Region." ,
} , {
Value : "cos.ap-tokyo.myqcloud.com" ,
Help : "Tokyo Region." ,
} , {
Value : "cos.na-siliconvalley.myqcloud.com" ,
Help : "Silicon Valley Region." ,
} , {
Value : "cos.na-ashburn.myqcloud.com" ,
Help : "Virginia Region." ,
} , {
Value : "cos.na-toronto.myqcloud.com" ,
Help : "Toronto Region." ,
} , {
Value : "cos.eu-frankfurt.myqcloud.com" ,
Help : "Frankfurt Region." ,
} , {
Value : "cos.eu-moscow.myqcloud.com" ,
Help : "Moscow Region." ,
} , {
Value : "cos.accelerate.myqcloud.com" ,
Help : "Use Tencent COS Accelerate Endpoint." ,
} } ,
2018-04-13 17:08:00 +02:00
} , {
Name : "endpoint" ,
Help : "Endpoint for S3 API.\nRequired when using an S3 clone." ,
2020-09-08 17:34:25 +02:00
Provider : "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath" ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
2019-02-13 22:10:43 +01:00
Value : "objects-us-east-1.dream.io" ,
2018-04-13 17:08:00 +02:00
Help : "Dream Objects endpoint" ,
Provider : "Dreamhost" ,
} , {
Value : "nyc3.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces New York 3" ,
Provider : "DigitalOcean" ,
} , {
Value : "ams3.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces Amsterdam 3" ,
Provider : "DigitalOcean" ,
} , {
Value : "sgp1.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces Singapore 1" ,
Provider : "DigitalOcean" ,
} , {
Value : "s3.wasabisys.com" ,
2018-11-17 17:24:00 +01:00
Help : "Wasabi US East endpoint" ,
Provider : "Wasabi" ,
} , {
Value : "s3.us-west-1.wasabisys.com" ,
Help : "Wasabi US West endpoint" ,
2018-04-13 17:08:00 +02:00
Provider : "Wasabi" ,
2019-05-15 10:22:06 +02:00
} , {
Value : "s3.eu-central-1.wasabisys.com" ,
Help : "Wasabi EU Central endpoint" ,
Provider : "Wasabi" ,
2018-04-13 17:08:00 +02:00
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must be set to match the Region.\nUsed when creating buckets only." ,
2018-04-13 17:08:00 +02:00
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
2020-10-13 23:43:00 +02:00
Help : "Empty for US Region, Northern Virginia, or Pacific Northwest." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "us-east-2" ,
Help : "US East (Ohio) Region." ,
} , {
Value : "us-west-1" ,
Help : "US West (Northern California) Region." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "us-west-2" ,
Help : "US West (Oregon) Region." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "ca-central-1" ,
Help : "Canada (Central) Region." ,
} , {
Value : "eu-west-1" ,
Help : "EU (Ireland) Region." ,
} , {
Value : "eu-west-2" ,
Help : "EU (London) Region." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "eu-west-3" ,
Help : "EU (Paris) Region." ,
2019-02-02 20:44:26 +01:00
} , {
Value : "eu-north-1" ,
Help : "EU (Stockholm) Region." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "eu-south-1" ,
Help : "EU (Milan) Region." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "EU" ,
Help : "EU Region." ,
} , {
Value : "ap-southeast-1" ,
Help : "Asia Pacific (Singapore) Region." ,
} , {
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region." ,
} , {
Value : "ap-northeast-1" ,
Help : "Asia Pacific (Tokyo) Region." ,
} , {
Value : "ap-northeast-2" ,
2020-10-03 10:44:25 +02:00
Help : "Asia Pacific (Seoul) Region." ,
} , {
Value : "ap-northeast-3" ,
Help : "Asia Pacific (Osaka-Local) Region." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "ap-south-1" ,
2020-10-03 10:44:25 +02:00
Help : "Asia Pacific (Mumbai) Region." ,
2020-01-02 12:10:48 +01:00
} , {
Value : "ap-east-1" ,
2020-10-03 10:44:25 +02:00
Help : "Asia Pacific (Hong Kong) Region." ,
2018-04-13 17:08:00 +02:00
} , {
Value : "sa-east-1" ,
Help : "South America (Sao Paulo) Region." ,
2020-10-03 10:44:25 +02:00
} , {
Value : "me-south-1" ,
Help : "Middle East (Bahrain) Region." ,
} , {
Value : "af-south-1" ,
Help : "Africa (Cape Town) Region." ,
} , {
Value : "cn-north-1" ,
Help : "China (Beijing) Region" ,
} , {
Value : "cn-northwest-1" ,
Help : "China (Ningxia) Region." ,
} , {
Value : "us-gov-east-1" ,
Help : "AWS GovCloud (US-East) Region." ,
} , {
Value : "us-gov-west-1" ,
Help : "AWS GovCloud (US) Region." ,
2018-04-13 17:08:00 +02:00
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter" ,
2018-04-13 17:08:00 +02:00
Provider : "IBMCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "us-standard" ,
Help : "US Cross Region Standard" ,
} , {
Value : "us-vault" ,
Help : "US Cross Region Vault" ,
} , {
Value : "us-cold" ,
Help : "US Cross Region Cold" ,
} , {
Value : "us-flex" ,
Help : "US Cross Region Flex" ,
} , {
Value : "us-east-standard" ,
Help : "US East Region Standard" ,
} , {
Value : "us-east-vault" ,
Help : "US East Region Vault" ,
} , {
Value : "us-east-cold" ,
Help : "US East Region Cold" ,
} , {
Value : "us-east-flex" ,
Help : "US East Region Flex" ,
} , {
Value : "us-south-standard" ,
2019-02-07 18:41:17 +01:00
Help : "US South Region Standard" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "us-south-vault" ,
Help : "US South Region Vault" ,
} , {
Value : "us-south-cold" ,
Help : "US South Region Cold" ,
} , {
Value : "us-south-flex" ,
Help : "US South Region Flex" ,
} , {
Value : "eu-standard" ,
Help : "EU Cross Region Standard" ,
} , {
Value : "eu-vault" ,
Help : "EU Cross Region Vault" ,
} , {
Value : "eu-cold" ,
Help : "EU Cross Region Cold" ,
} , {
Value : "eu-flex" ,
Help : "EU Cross Region Flex" ,
} , {
Value : "eu-gb-standard" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Standard" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "eu-gb-vault" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Vault" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "eu-gb-cold" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Cold" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "eu-gb-flex" ,
2019-02-07 18:41:17 +01:00
Help : "Great Britain Flex" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "ap-standard" ,
Help : "APAC Standard" ,
} , {
Value : "ap-vault" ,
Help : "APAC Vault" ,
} , {
Value : "ap-cold" ,
Help : "APAC Cold" ,
} , {
Value : "ap-flex" ,
Help : "APAC Flex" ,
} , {
Value : "mel01-standard" ,
Help : "Melbourne Standard" ,
} , {
Value : "mel01-vault" ,
Help : "Melbourne Vault" ,
} , {
Value : "mel01-cold" ,
Help : "Melbourne Cold" ,
} , {
Value : "mel01-flex" ,
Help : "Melbourne Flex" ,
} , {
Value : "tor01-standard" ,
Help : "Toronto Standard" ,
} , {
Value : "tor01-vault" ,
Help : "Toronto Vault" ,
} , {
Value : "tor01-cold" ,
Help : "Toronto Cold" ,
} , {
Value : "tor01-flex" ,
Help : "Toronto Flex" ,
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only." ,
2020-09-08 17:34:25 +02:00
Provider : "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Name : "acl" ,
2018-10-25 23:19:36 +02:00
Help : ` Canned ACL used when creating buckets and storing or copying objects .
2019-01-16 18:23:37 +01:00
This ACL is used for creating objects and if bucket_acl isn ' t set , for creating buckets too .
2018-10-25 23:19:36 +02:00
For more info visit https : //docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
2020-10-13 23:43:40 +02:00
Note that this ACL is applied when server - side copying objects as S3
2018-10-25 23:19:36 +02:00
doesn ' t copy the ACL from the source but rather writes a fresh one . ` ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
2020-09-08 17:34:25 +02:00
Value : "default" ,
Help : "Owner gets Full_CONTROL. No one else has access rights (default)." ,
Provider : "TencentCOS" ,
} , {
2018-04-13 17:08:00 +02:00
Value : "private" ,
Help : "Owner gets FULL_CONTROL. No one else has access rights (default)." ,
2020-09-08 17:34:25 +02:00
Provider : "!IBMCOS,TencentCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "public-read" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ access." ,
Provider : "!IBMCOS" ,
} , {
Value : "public-read-write" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended." ,
Provider : "!IBMCOS" ,
} , {
Value : "authenticated-read" ,
Help : "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access." ,
Provider : "!IBMCOS" ,
} , {
Value : "bucket-owner-read" ,
Help : "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it." ,
Provider : "!IBMCOS" ,
} , {
Value : "bucket-owner-full-control" ,
Help : "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it." ,
Provider : "!IBMCOS" ,
} , {
Value : "private" ,
Help : "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS" ,
2018-04-12 18:05:53 +02:00
Provider : "IBMCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "public-read" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS" ,
Provider : "IBMCOS" ,
} , {
Value : "public-read-write" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS" ,
2018-04-12 18:05:53 +02:00
Provider : "IBMCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "authenticated-read" ,
Help : "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS" ,
Provider : "IBMCOS" ,
} } ,
2019-01-16 18:23:37 +01:00
} , {
Name : "bucket_acl" ,
Help : ` Canned ACL used when creating buckets .
For more info visit https : //docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets . If it
isn ' t set then "acl" is used instead . ` ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "private" ,
Help : "Owner gets FULL_CONTROL. No one else has access rights (default)." ,
} , {
Value : "public-read" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ access." ,
} , {
Value : "public-read-write" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended." ,
} , {
Value : "authenticated-read" ,
Help : "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access." ,
} } ,
2018-04-13 17:08:00 +02:00
} , {
Name : "server_side_encryption" ,
Help : "The server-side encryption algorithm used when storing this object in S3." ,
2020-03-30 12:26:52 +02:00
Provider : "AWS,Ceph,Minio" ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} , {
Value : "AES256" ,
Help : "AES256" ,
2018-08-30 18:08:27 +02:00
} , {
Value : "aws:kms" ,
Help : "aws:kms" ,
} } ,
2020-03-30 12:26:52 +02:00
} , {
Name : "sse_customer_algorithm" ,
Help : "If using SSE-C, the server-side encryption algorithm used when storing this object in S3." ,
Provider : "AWS,Ceph,Minio" ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} , {
Value : "AES256" ,
Help : "AES256" ,
} } ,
2018-08-30 18:08:27 +02:00
} , {
Name : "sse_kms_key_id" ,
Help : "If using KMS ID you must provide the ARN of Key." ,
2020-03-30 12:26:52 +02:00
Provider : "AWS,Ceph,Minio" ,
2018-08-30 18:08:27 +02:00
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} , {
Value : "arn:aws:kms:us-east-1:*" ,
Help : "arn:aws:kms:*" ,
2018-04-13 17:08:00 +02:00
} } ,
2020-03-30 12:26:52 +02:00
} , {
Name : "sse_customer_key" ,
2020-05-20 12:39:20 +02:00
Help : "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data." ,
2020-03-30 12:26:52 +02:00
Provider : "AWS,Ceph,Minio" ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} } ,
} , {
2020-11-20 12:15:48 +01:00
Name : "sse_customer_key_md5" ,
Help : ` If using SSE - C you may provide the secret encryption key MD5 checksum ( optional ) .
If you leave it blank , this is calculated automatically from the sse_customer_key provided .
` ,
2020-03-30 12:26:52 +02:00
Provider : "AWS,Ceph,Minio" ,
Advanced : true ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} } ,
2018-04-13 17:08:00 +02:00
} , {
Name : "storage_class" ,
2018-10-01 19:36:15 +02:00
Help : "The storage class to use when storing new objects in S3." ,
2018-04-13 17:08:00 +02:00
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Default" ,
} , {
Value : "STANDARD" ,
Help : "Standard storage class" ,
} , {
Value : "REDUCED_REDUNDANCY" ,
Help : "Reduced redundancy storage class" ,
} , {
Value : "STANDARD_IA" ,
Help : "Standard Infrequent Access storage class" ,
} , {
Value : "ONEZONE_IA" ,
Help : "One Zone Infrequent Access storage class" ,
2018-12-06 22:53:05 +01:00
} , {
Value : "GLACIER" ,
Help : "Glacier storage class" ,
2019-04-05 05:14:05 +02:00
} , {
Value : "DEEP_ARCHIVE" ,
Help : "Glacier Deep Archive storage class" ,
2019-04-04 19:04:28 +02:00
} , {
Value : "INTELLIGENT_TIERING" ,
Help : "Intelligent-Tiering storage class" ,
2018-04-13 17:08:00 +02:00
} } ,
2019-01-12 17:46:45 +01:00
} , {
2019-01-12 21:41:47 +01:00
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
2019-01-12 17:46:45 +01:00
Name : "storage_class" ,
Help : "The storage class to use when storing new objects in OSS." ,
Provider : "Alibaba" ,
Examples : [ ] fs . OptionExample { {
2019-01-12 21:41:47 +01:00
Value : "" ,
Help : "Default" ,
} , {
Value : "STANDARD" ,
2019-01-12 17:46:45 +01:00
Help : "Standard storage class" ,
} , {
2019-01-12 21:41:47 +01:00
Value : "GLACIER" ,
2019-01-12 17:46:45 +01:00
Help : "Archive storage mode." ,
} , {
2019-01-12 21:41:47 +01:00
Value : "STANDARD_IA" ,
2019-01-12 17:46:45 +01:00
Help : "Infrequent access storage mode." ,
} } ,
2020-09-08 17:34:25 +02:00
} , {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
Name : "storage_class" ,
Help : "The storage class to use when storing new objects in Tencent COS." ,
Provider : "TencentCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Default" ,
} , {
Value : "STANDARD" ,
Help : "Standard storage class" ,
} , {
Value : "ARCHIVE" ,
Help : "Archive storage mode." ,
} , {
Value : "STANDARD_IA" ,
Help : "Infrequent access storage mode." ,
} } ,
2020-06-12 17:04:16 +02:00
} , {
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
Name : "storage_class" ,
Help : "The storage class to use when storing new objects in S3." ,
Provider : "Scaleway" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Default" ,
} , {
Value : "STANDARD" ,
Help : "The Standard class for any upload; suitable for on-demand content like streaming or CDN." ,
} , {
Value : "GLACIER" ,
Help : "Archived storage; prices are lower, but it needs to be restored first to be accessed." ,
} } ,
2018-11-26 22:09:23 +01:00
} , {
Name : "upload_cutoff" ,
Help : ` Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size .
The minimum is 0 and the maximum is 5 GB . ` ,
Default : defaultUploadCutoff ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Help : ` Chunk size to use for uploading .
2019-11-06 11:41:03 +01:00
When uploading files larger than upload_cutoff or files with unknown
2020-10-13 23:49:58 +02:00
size ( e . g . from "rclone rcat" or uploaded with "rclone mount" or google
2019-11-06 11:41:03 +01:00
photos or google docs ) they will be uploaded as multipart uploads
using this chunk size .
2018-10-01 19:36:15 +02:00
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer .
2020-10-13 23:50:53 +02:00
If you are transferring large files over high - speed links and you have
2019-11-06 11:41:03 +01:00
enough memory , then increasing this will speed up the transfers .
Rclone will automatically increase the chunk size when uploading a
large file of known size to stay below the 10 , 000 chunks limit .
Files of unknown size are uploaded with the configured
chunk_size . Since the default chunk size is 5 MB and there can be at
most 10 , 000 chunks , this means that by default the maximum size of
2020-10-13 23:52:30 +02:00
a file you can stream upload is 48 GB . If you wish to stream upload
2019-11-06 11:41:03 +01:00
larger files then you will need to increase chunk_size . ` ,
2018-09-07 13:02:27 +02:00
Default : minChunkSize ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2020-06-08 19:22:34 +02:00
} , {
Name : "max_upload_parts" ,
Help : ` Maximum number of parts in a multipart upload .
This option defines the maximum number of multipart chunks to use
when doing a multipart upload .
This can be useful if a service does not support the AWS S3
specification of 10 , 000 chunks .
Rclone will automatically increase the chunk size when uploading a
large file of a known size to stay below this number of chunks limit .
` ,
Default : maxUploadParts ,
Advanced : true ,
2019-12-02 18:14:57 +01:00
} , {
Name : "copy_cutoff" ,
Help : ` Cutoff for switching to multipart copy
2020-10-13 23:43:40 +02:00
Any files larger than this that need to be server - side copied will be
2019-12-02 18:14:57 +01:00
copied in chunks of this size .
The minimum is 0 and the maximum is 5 GB . ` ,
Default : fs . SizeSuffix ( maxSizeForCopy ) ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2020-04-23 20:47:48 +02:00
Name : "disable_checksum" ,
Help : ` Don ' t store MD5 checksum with object metadata
Normally rclone will calculate the MD5 checksum of the input before
uploading it so it can add it to metadata on the object . This is great
for data integrity checking but can cause long delays for large files
to start uploading . ` ,
2018-05-14 19:06:57 +02:00
Default : false ,
Advanced : true ,
2020-06-26 12:14:40 +02:00
} , {
Name : "shared_credentials_file" ,
Help : ` Path to the shared credentials file
If env_auth = true then rclone can use a shared credentials file .
If this variable is empty rclone will look for the
"AWS_SHARED_CREDENTIALS_FILE" env variable . If the env value is empty
it will default to the current user ' s home directory .
Linux / OSX : "$HOME/.aws/credentials"
Windows : "%USERPROFILE%\.aws\credentials"
` ,
Advanced : true ,
} , {
Name : "profile" ,
Help : ` Profile to use in the shared credentials file
If env_auth = true then rclone can use a shared credentials file . This
variable controls which profile is used in that file .
If empty it will default to the environment variable "AWS_PROFILE" or
"default" if that environment variable is also not set .
` ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
Name : "session_token" ,
Help : "An AWS session token" ,
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "upload_concurrency" ,
Help : ` Concurrency for multipart uploads .
This is the number of chunks of the same file that are uploaded
concurrently .
2020-10-13 23:59:13 +02:00
If you are uploading small numbers of large files over high - speed links
2018-10-01 19:36:15 +02:00
and these uploads do not fully utilize your bandwidth , then increasing
this may help to speed up the transfers . ` ,
2018-12-02 18:51:14 +01:00
Default : 4 ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2018-07-18 17:40:59 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "force_path_style" ,
Help : ` If true use path style access if false use virtual hosted style .
If this is true ( the default ) then rclone will use path style access ,
if false then rclone will use virtual path style . See [ the AWS S3
docs ] ( https : //docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info .
2020-10-13 23:43:00 +02:00
Some providers ( e . g . AWS , Aliyun OSS , Netease COS , or Tencent COS ) require this set to
2020-01-05 18:53:45 +01:00
false - rclone will do this automatically based on the provider
setting . ` ,
2018-07-18 17:40:59 +02:00
Default : true ,
Advanced : true ,
2018-10-09 14:03:37 +02:00
} , {
Name : "v2_auth" ,
Help : ` If true use v2 authentication .
If this is false ( the default ) then rclone will use v4 authentication .
If it is set then rclone will use v2 authentication .
2020-10-13 23:49:58 +02:00
Use this only if v4 signatures don ' t work , e . g . pre Jewel / v10 CEPH . ` ,
2018-10-09 14:03:37 +02:00
Default : false ,
Advanced : true ,
2019-04-26 11:19:00 +02:00
} , {
Name : "use_accelerate_endpoint" ,
Provider : "AWS" ,
Help : ` If true use the AWS S3 accelerated endpoint .
See : [ AWS S3 Transfer acceleration ] ( https : //docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default : false ,
Advanced : true ,
2019-07-24 11:03:38 +02:00
} , {
Name : "leave_parts_on_error" ,
Provider : "AWS" ,
Help : ` If true avoid calling abort upload on a failure , leaving all successfully uploaded parts on S3 for manual recovery .
It should be set to true for resuming uploads across different sessions .
WARNING : Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up .
` ,
Default : false ,
Advanced : true ,
2019-12-26 12:05:00 +01:00
} , {
Name : "list_chunk" ,
Help : ` Size of listing chunk ( response list for each ListObject S3 request ) .
This option is also known as "MaxKeys" , "max-items" , or "page-size" from the AWS S3 specification .
Most services truncate the response list to 1000 objects even if requested more than that .
In AWS S3 this is a global maximum and cannot be changed , see [ AWS S3 ] ( https : //docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
In Ceph , this can be increased with the "rgw list buckets max chunk" option .
` ,
Default : 1000 ,
Advanced : true ,
2020-07-22 13:02:17 +02:00
} , {
Name : "no_check_bucket" ,
2020-10-13 23:55:21 +02:00
Help : ` If set , don ' t attempt to check the bucket exists or create it
2020-07-22 13:02:17 +02:00
This can be useful when trying to minimise the number of transactions
rclone does if you know the bucket exists already .
` ,
Default : false ,
Advanced : true ,
2020-01-14 18:33:35 +01:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-14 22:51:49 +01:00
// Any UTF-8 character is valid in a key, however it can't handle
// invalid UTF-8 and / have a special meaning.
//
// The SDK can't seem to handle uploading files called '.'
//
// FIXME would be nice to add
// - initial / encoding
// - doubled / encoding
// - trailing / encoding
// so that AWS keys are always valid file names
2020-02-19 11:17:25 +01:00
Default : encoder . EncodeInvalidUtf8 |
2020-01-14 22:51:49 +01:00
encoder . EncodeSlash |
2020-02-19 11:17:25 +01:00
encoder . EncodeDot ,
} , {
Name : "memory_pool_flush_time" ,
Default : memoryPoolFlushTime ,
Advanced : true ,
Help : ` How often internal memory buffer pools will be flushed .
Uploads which requires additional buffers ( f . e multipart ) will use memory pool for allocations .
This option controls how often unused buffers will be removed from the pool . ` ,
} , {
Name : "memory_pool_use_mmap" ,
Default : memoryPoolUseMmap ,
Advanced : true ,
Help : ` Whether to use mmap buffers in internal memory pool. ` ,
2020-10-13 18:11:22 +02:00
} , {
Name : "disable_http2" ,
Default : false ,
Advanced : true ,
Help : ` Disable usage of http2 for S3 backends
There is currently an unsolved issue with the s3 ( specifically minio ) backend
and HTTP / 2. HTTP / 2 is enabled by default for the s3 backend but can be
disabled here . When the issue is solved this flag will be removed .
See : https : //github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
` ,
2020-02-19 11:17:25 +01:00
} ,
} } )
2013-06-27 21:13:07 +02:00
}
2013-01-08 19:53:35 +01:00
// Constants
const (
2020-10-13 23:49:58 +02:00
metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
2020-09-01 19:53:29 +02:00
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
// The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility
// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
maxSizeForCopy = 4768 * 1024 * 1024
maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload
2019-12-31 00:17:06 +01:00
minChunkSize = fs . SizeSuffix ( 1024 * 1024 * 5 )
2018-11-26 22:09:23 +01:00
defaultUploadCutoff = fs . SizeSuffix ( 200 * 1024 * 1024 )
maxUploadCutoff = fs . SizeSuffix ( 5 * 1024 * 1024 * 1024 )
minSleep = 10 * time . Millisecond // In case of error, start at 10ms sleep.
2020-02-19 11:17:25 +01:00
memoryPoolFlushTime = fs . Duration ( time . Minute ) // flush the cached buffers after this long
memoryPoolUseMmap = false
2020-06-18 18:50:50 +02:00
maxExpireDuration = fs . Duration ( 7 * 24 * time . Hour ) // max expiry is 1 week
2013-01-08 19:53:35 +01:00
)
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2020-01-14 18:33:35 +01:00
Provider string ` config:"provider" `
EnvAuth bool ` config:"env_auth" `
AccessKeyID string ` config:"access_key_id" `
SecretAccessKey string ` config:"secret_access_key" `
Region string ` config:"region" `
Endpoint string ` config:"endpoint" `
LocationConstraint string ` config:"location_constraint" `
ACL string ` config:"acl" `
BucketACL string ` config:"bucket_acl" `
ServerSideEncryption string ` config:"server_side_encryption" `
SSEKMSKeyID string ` config:"sse_kms_key_id" `
2020-03-30 12:26:52 +02:00
SSECustomerAlgorithm string ` config:"sse_customer_algorithm" `
SSECustomerKey string ` config:"sse_customer_key" `
SSECustomerKeyMD5 string ` config:"sse_customer_key_md5" `
2020-01-14 18:33:35 +01:00
StorageClass string ` config:"storage_class" `
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
CopyCutoff fs . SizeSuffix ` config:"copy_cutoff" `
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
2020-06-08 19:22:34 +02:00
MaxUploadParts int64 ` config:"max_upload_parts" `
2020-01-14 18:33:35 +01:00
DisableChecksum bool ` config:"disable_checksum" `
2020-06-26 12:14:40 +02:00
SharedCredentialsFile string ` config:"shared_credentials_file" `
Profile string ` config:"profile" `
2020-01-14 18:33:35 +01:00
SessionToken string ` config:"session_token" `
UploadConcurrency int ` config:"upload_concurrency" `
ForcePathStyle bool ` config:"force_path_style" `
V2Auth bool ` config:"v2_auth" `
UseAccelerateEndpoint bool ` config:"use_accelerate_endpoint" `
LeavePartsOnError bool ` config:"leave_parts_on_error" `
ListChunk int64 ` config:"list_chunk" `
2020-07-22 13:02:17 +02:00
NoCheckBucket bool ` config:"no_check_bucket" `
2020-01-14 18:33:35 +01:00
Enc encoder . MultiEncoder ` config:"encoding" `
2020-02-19 11:17:25 +01:00
MemoryPoolFlushTime fs . Duration ` config:"memory_pool_flush_time" `
MemoryPoolUseMmap bool ` config:"memory_pool_use_mmap" `
2020-10-13 18:11:22 +02:00
DisableHTTP2 bool ` config:"disable_http2" `
2018-05-14 19:06:57 +02:00
}
2016-09-01 23:27:50 +02:00
2015-11-07 12:14:46 +01:00
// Fs represents a remote s3 server
type Fs struct {
2020-04-09 12:18:58 +02:00
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features * fs . Features // optional features
c * s3 . S3 // the connection to the s3 server
ses * session . Session // the s3 session
rootBucket string // bucket part of root (if any)
rootDirectory string // directory part of root (if any)
cache * bucket . Cache // cache for bucket creation status
pacer * fs . Pacer // To pace the API calls
srv * http . Client // a plain http client
pool * pool . Pool // memory pool
2020-11-20 13:15:56 +01:00
etagIsNotMD5 bool // if set ETags are not MD5s
2013-01-08 19:53:35 +01:00
}
2015-11-07 12:14:46 +01:00
// Object describes a s3 object
type Object struct {
2013-01-08 19:53:35 +01:00
// Will definitely have everything but meta which may be nil
//
2016-09-21 23:13:24 +02:00
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
2015-11-07 12:14:46 +01:00
fs * Fs // what this object is part of
2014-12-23 13:09:02 +01:00
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time . Time // Last modified
meta map [ string ] * string // The object metadata if known - may be nil
2016-09-21 23:13:24 +02:00
mimeType string // MimeType of object - may be ""
2020-10-13 23:49:58 +02:00
storageClass string // e.g. GLACIER
2013-01-08 19:53:35 +01:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2019-08-09 12:29:36 +02:00
return f . root
2015-09-01 21:45:27 +02:00
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2019-08-09 12:29:36 +02:00
if f . rootBucket == "" {
return fmt . Sprintf ( "S3 root" )
}
if f . rootDirectory == "" {
return fmt . Sprintf ( "S3 bucket %s" , f . rootBucket )
2014-05-05 19:25:32 +02:00
}
2019-08-09 12:29:36 +02:00
return fmt . Sprintf ( "S3 bucket %s path %s" , f . rootBucket , f . rootDirectory )
2013-01-08 19:53:35 +01:00
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2018-09-03 06:41:04 +02:00
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = [ ] int {
2020-02-11 16:30:32 +01:00
500 , // Internal Server Error - "We encountered an internal error. Please try again."
2018-09-03 06:41:04 +02:00
503 , // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
2019-01-16 14:35:19 +01:00
func ( f * Fs ) shouldRetry ( err error ) ( bool , error ) {
2018-09-03 06:41:04 +02:00
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError , ok := err . ( awserr . Error ) ; ok {
2019-02-07 18:41:17 +01:00
// Simple case, check the original embedded error in case it's generically retryable
2018-09-03 06:41:04 +02:00
if fserrors . ShouldRetry ( awsError . OrigErr ( ) ) {
return true , err
}
2019-01-12 17:46:45 +01:00
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
2018-09-03 06:41:04 +02:00
if reqErr , ok := err . ( awserr . RequestFailure ) ; ok {
2019-08-09 12:29:36 +02:00
// 301 if wrong region for bucket - can only update if running from a bucket
if f . rootBucket != "" {
if reqErr . StatusCode ( ) == http . StatusMovedPermanently {
urfbErr := f . updateRegionForBucket ( f . rootBucket )
if urfbErr != nil {
fs . Errorf ( f , "Failed to update region for bucket: %v" , urfbErr )
return false , err
}
return true , err
2019-01-16 14:35:19 +01:00
}
}
2018-09-03 06:41:04 +02:00
for _ , e := range retryErrorCodes {
if reqErr . StatusCode ( ) == e {
return true , err
}
}
}
}
2019-01-12 17:46:45 +01:00
// Ok, not an awserr, check for generic failure conditions
2018-09-03 06:41:04 +02:00
return fserrors . ShouldRetry ( err ) , err
}
2019-08-09 12:29:36 +02:00
// parsePath parses a remote 'url'
func parsePath ( path string ) ( root string ) {
root = strings . Trim ( path , "/" )
2013-01-08 19:53:35 +01:00
return
}
2019-08-09 12:29:36 +02:00
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func ( f * Fs ) split ( rootRelativePath string ) ( bucketName , bucketPath string ) {
2018-11-02 13:15:30 +01:00
bucketName , bucketPath = bucket . Split ( path . Join ( f . root , rootRelativePath ) )
2020-01-14 18:33:35 +01:00
return f . opt . Enc . FromStandardName ( bucketName ) , f . opt . Enc . FromStandardPath ( bucketPath )
2019-08-09 12:29:36 +02:00
}
// split returns bucket and bucketPath from the object
func ( o * Object ) split ( ) ( bucket , bucketPath string ) {
return o . fs . split ( o . remote )
}
2020-10-13 18:11:22 +02:00
// getClient makes an http client according to the options
func getClient ( opt * Options ) * http . Client {
// TODO: Do we need cookies too?
t := fshttp . NewTransportCustom ( fs . Config , func ( t * http . Transport ) {
if opt . DisableHTTP2 {
t . TLSNextProto = map [ string ] func ( string , * tls . Conn ) http . RoundTripper { }
}
} )
return & http . Client {
Transport : t ,
}
}
2013-01-08 19:53:35 +01:00
// s3Connection makes a connection to s3
2018-05-14 19:06:57 +02:00
func s3Connection ( opt * Options ) ( * s3 . S3 , * session . Session , error ) {
2013-01-08 19:53:35 +01:00
// Make the auth
2016-02-01 14:11:27 +01:00
v := credentials . Value {
2018-05-14 19:06:57 +02:00
AccessKeyID : opt . AccessKeyID ,
SecretAccessKey : opt . SecretAccessKey ,
SessionToken : opt . SessionToken ,
2016-02-01 14:11:27 +01:00
}
2017-11-22 22:21:36 +01:00
lowTimeoutClient := & http . Client { Timeout : 1 * time . Second } // low timeout to ec2 metadata service
2020-10-13 18:11:22 +02:00
2017-11-22 22:21:36 +01:00
def := defaults . Get ( )
def . Config . HTTPClient = lowTimeoutClient
2020-03-04 01:19:59 +01:00
// start a new AWS session
awsSession , err := session . NewSession ( )
if err != nil {
return nil , nil , errors . Wrap ( err , "NewSession" )
}
2016-02-01 14:11:27 +01:00
// first provider to supply a credential set "wins"
providers := [ ] credentials . Provider {
// use static credentials if they're present (checked by provider)
& credentials . StaticProvider { Value : v } ,
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
& credentials . EnvProvider { } ,
2018-04-16 13:14:35 +02:00
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
2020-06-26 12:14:40 +02:00
& credentials . SharedCredentialsProvider {
Filename : opt . SharedCredentialsFile , // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable.
Profile : opt . Profile , // If empty will look gor "AWS_PROFILE" env var or "default" if not set.
} ,
2018-04-16 13:14:35 +02:00
2017-11-22 22:21:36 +01:00
// Pick up IAM role if we're in an ECS task
defaults . RemoteCredProvider ( * def . Config , def . Handlers ) ,
2016-02-01 14:11:27 +01:00
// Pick up IAM role in case we're on EC2
& ec2rolecreds . EC2RoleProvider {
2020-03-04 01:19:59 +01:00
Client : ec2metadata . New ( awsSession , & aws . Config {
2017-11-22 22:21:36 +01:00
HTTPClient : lowTimeoutClient ,
2016-02-01 14:11:27 +01:00
} ) ,
2019-10-31 16:42:16 +01:00
ExpiryWindow : 3 * time . Minute ,
2016-02-01 14:11:27 +01:00
} ,
2020-01-05 20:49:31 +01:00
// Pick up IAM role if we are in EKS
& stscreds . WebIdentityRoleProvider {
ExpiryWindow : 3 * time . Minute ,
} ,
2016-02-01 14:11:27 +01:00
}
cred := credentials . NewChainCredentials ( providers )
2015-09-29 10:58:03 +02:00
switch {
2018-05-14 19:06:57 +02:00
case opt . EnvAuth :
2016-02-09 18:19:33 +01:00
// No need for empty checks if "env_auth" is true
case v . AccessKeyID == "" && v . SecretAccessKey == "" :
2016-02-01 14:11:27 +01:00
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials . AnonymousCredentials
case v . AccessKeyID == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "access_key_id not found" )
2016-02-01 14:11:27 +01:00
case v . SecretAccessKey == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "secret_access_key not found" )
2013-01-08 19:53:35 +01:00
}
2018-05-14 19:06:57 +02:00
if opt . Region == "" {
opt . Region = "us-east-1"
2013-01-08 19:53:35 +01:00
}
2020-09-08 17:34:25 +02:00
if opt . Provider == "AWS" || opt . Provider == "Alibaba" || opt . Provider == "Netease" || opt . Provider == "Scaleway" || opt . Provider == "TencentCOS" || opt . UseAccelerateEndpoint {
2019-01-12 17:46:45 +01:00
opt . ForcePathStyle = false
}
2020-06-12 17:04:16 +02:00
if opt . Provider == "Scaleway" && opt . MaxUploadParts > 1000 {
opt . MaxUploadParts = 1000
}
2014-12-23 13:09:02 +01:00
awsConfig := aws . NewConfig ( ) .
2020-03-14 18:28:29 +01:00
WithMaxRetries ( 0 ) . // Rely on rclone's retry logic
2016-02-01 14:11:27 +01:00
WithCredentials ( cred ) .
2020-10-13 18:11:22 +02:00
WithHTTPClient ( getClient ( opt ) ) .
2019-04-26 11:19:00 +02:00
WithS3ForcePathStyle ( opt . ForcePathStyle ) .
2020-06-26 15:09:29 +02:00
WithS3UseAccelerate ( opt . UseAccelerateEndpoint ) .
WithS3UsEast1RegionalEndpoint ( endpoints . RegionalS3UsEast1Endpoint )
2019-01-16 14:35:19 +01:00
if opt . Region != "" {
awsConfig . WithRegion ( opt . Region )
}
if opt . Endpoint != "" {
awsConfig . WithEndpoint ( opt . Endpoint )
}
2015-08-10 12:02:34 +02:00
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
2018-11-07 03:50:28 +01:00
awsSessionOpts := session . Options {
Config : * awsConfig ,
}
if opt . EnvAuth && opt . AccessKeyID == "" && opt . SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts . SharedConfigState = session . SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts . Config . Credentials = nil
}
ses , err := session . NewSessionWithOptions ( awsSessionOpts )
if err != nil {
return nil , nil , err
}
c := s3 . New ( ses )
2018-10-09 14:03:37 +02:00
if opt . V2Auth || opt . Region == "other-v2-signature" {
2018-05-14 19:06:57 +02:00
fs . Debugf ( nil , "Using v2 auth" )
2015-08-28 09:47:41 +02:00
signer := func ( req * request . Request ) {
2015-08-10 12:02:34 +02:00
// Ignore AnonymousCredentials object
2015-10-30 12:50:45 +01:00
if req . Config . Credentials == credentials . AnonymousCredentials {
2015-08-10 12:02:34 +02:00
return
}
2016-02-01 14:11:27 +01:00
sign ( v . AccessKeyID , v . SecretAccessKey , req . HTTPRequest )
2015-08-10 12:02:34 +02:00
}
c . Handlers . Sign . Clear ( )
2015-08-28 09:47:41 +02:00
c . Handlers . Sign . PushBackNamed ( corehandlers . BuildContentLengthHandler )
2015-08-10 12:02:34 +02:00
c . Handlers . Sign . PushBack ( signer )
2013-01-08 19:53:35 +01:00
}
2015-10-30 12:50:45 +01:00
return c , ses , nil
2013-01-08 19:53:35 +01:00
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
if cs < minChunkSize {
return errors . Errorf ( "%s is less than %s" , cs , minChunkSize )
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2018-11-26 22:09:23 +01:00
func checkUploadCutoff ( cs fs . SizeSuffix ) error {
if cs > maxUploadCutoff {
return errors . Errorf ( "%s is greater than %s" , cs , maxUploadCutoff )
}
return nil
}
func ( f * Fs ) setUploadCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadCutoff ( cs )
if err == nil {
old , f . opt . UploadCutoff = f . opt . UploadCutoff , cs
}
return
}
2019-08-09 12:29:36 +02:00
// setRoot changes the root of the Fs
func ( f * Fs ) setRoot ( root string ) {
f . root = parsePath ( root )
f . rootBucket , f . rootDirectory = bucket . Split ( f . root )
}
2016-09-01 23:27:50 +02:00
// NewFs constructs an Fs from the path, bucket:path
2020-11-05 16:18:51 +01:00
func NewFs ( ctx context . Context , name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
return nil , errors . Wrap ( err , "s3: chunk size" )
2018-05-14 19:06:57 +02:00
}
2018-11-26 22:09:23 +01:00
err = checkUploadCutoff ( opt . UploadCutoff )
if err != nil {
return nil , errors . Wrap ( err , "s3: upload cutoff" )
}
2019-01-16 18:23:37 +01:00
if opt . ACL == "" {
opt . ACL = "private"
}
if opt . BucketACL == "" {
opt . BucketACL = opt . ACL
}
2020-11-20 12:15:48 +01:00
if opt . SSECustomerKey != "" && opt . SSECustomerKeyMD5 == "" {
// calculate CustomerKeyMD5 if not supplied
md5sumBinary := md5 . Sum ( [ ] byte ( opt . SSECustomerKey ) )
opt . SSECustomerKeyMD5 = base64 . StdEncoding . EncodeToString ( md5sumBinary [ : ] )
}
2018-05-14 19:06:57 +02:00
c , ses , err := s3Connection ( opt )
2013-01-08 19:53:35 +01:00
if err != nil {
return nil , err
}
2020-02-19 11:17:25 +01:00
2015-11-07 12:14:46 +01:00
f := & Fs {
2019-08-09 12:29:36 +02:00
name : name ,
opt : * opt ,
c : c ,
ses : ses ,
2020-03-14 18:28:29 +01:00
pacer : fs . NewPacer ( pacer . NewS3 ( pacer . MinSleep ( minSleep ) ) ) ,
2019-08-09 12:29:36 +02:00
cache : bucket . NewCache ( ) ,
2020-10-13 18:11:22 +02:00
srv : getClient ( opt ) ,
2020-04-09 12:18:58 +02:00
pool : pool . New (
time . Duration ( opt . MemoryPoolFlushTime ) ,
int ( opt . ChunkSize ) ,
opt . UploadConcurrency * fs . Config . Transfers ,
opt . MemoryPoolUseMmap ,
) ,
2019-08-09 12:29:36 +02:00
}
2020-11-20 13:15:56 +01:00
if opt . ServerSideEncryption == "aws:kms" || opt . SSECustomerAlgorithm != "" {
// From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
//
// Objects encrypted by SSE-S3 or plaintext have ETags that are an MD5
// digest of their data.
//
// Objects encrypted by SSE-C or SSE-KMS have ETags that are not an
// MD5 digest of their object data.
f . etagIsNotMD5 = true
}
2019-08-09 12:29:36 +02:00
f . setRoot ( root )
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
2019-08-09 12:29:36 +02:00
ReadMimeType : true ,
WriteMimeType : true ,
BucketBased : true ,
BucketBasedRootOK : true ,
2019-09-09 21:44:50 +02:00
SetTier : true ,
GetTier : true ,
2020-06-19 11:28:34 +02:00
SlowModTime : true ,
2020-11-05 17:00:40 +01:00
} ) . Fill ( ctx , f )
2019-08-09 12:29:36 +02:00
if f . rootBucket != "" && f . rootDirectory != "" {
2020-11-20 12:15:48 +01:00
// Check to see if the (bucket,directory) is actually an existing file
oldRoot := f . root
newRoot , leaf := path . Split ( oldRoot )
f . setRoot ( newRoot )
_ , err := f . NewObject ( ctx , leaf )
if err != nil {
if err == fs . ErrorObjectNotFound || err == fs . ErrorNotAFile {
// File doesn't exist or is a directory so return old f
f . setRoot ( oldRoot )
return f , nil
2014-05-05 20:52:52 +02:00
}
2020-11-20 12:15:48 +01:00
return nil , err
2014-05-05 20:52:52 +02:00
}
2020-11-20 12:15:48 +01:00
// return an error with an fs which points to the parent
return f , fs . ErrorIsFile
2014-05-05 20:52:52 +02:00
}
2014-12-23 13:09:02 +01:00
// f.listMultipartUploads()
2013-01-08 19:53:35 +01:00
return f , nil
}
2016-06-25 22:58:34 +02:00
// Return an Object from a path
2013-01-08 19:53:35 +01:00
//
2016-06-25 22:23:20 +02:00
//If it can't be found it returns the error ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * s3 . Object ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
o := & Object {
fs : f ,
2013-01-08 19:53:35 +01:00
remote : remote ,
}
if info != nil {
// Set info but not meta
2014-12-23 13:09:02 +01:00
if info . LastModified == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read last modified" )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * info . LastModified
2013-01-08 19:53:35 +01:00
}
2020-11-20 13:15:56 +01:00
if o . fs . etagIsNotMD5 {
o . etag = ""
} else {
o . etag = aws . StringValue ( info . ETag )
}
2014-12-23 13:09:02 +01:00
o . bytes = aws . Int64Value ( info . Size )
2019-09-09 21:44:50 +02:00
o . storageClass = aws . StringValue ( info . StorageClass )
2013-01-08 19:53:35 +01:00
} else {
2019-06-17 10:34:30 +02:00
err := o . readMetaData ( ctx ) // reads info and meta, returning an error
2013-01-08 19:53:35 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2013-01-08 19:53:35 +01:00
}
}
2016-06-25 22:23:20 +02:00
return o , nil
2013-01-08 19:53:35 +01:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
return f . newObjectWithInfo ( ctx , remote , nil )
2013-01-08 19:53:35 +01:00
}
2019-01-16 14:35:19 +01:00
// Gets the bucket location
2019-08-09 12:29:36 +02:00
func ( f * Fs ) getBucketLocation ( bucket string ) ( string , error ) {
2019-01-16 14:35:19 +01:00
req := s3 . GetBucketLocationInput {
2019-08-09 12:29:36 +02:00
Bucket : & bucket ,
2019-01-16 14:35:19 +01:00
}
var resp * s3 . GetBucketLocationOutput
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . c . GetBucketLocation ( & req )
return f . shouldRetry ( err )
} )
if err != nil {
return "" , err
}
return s3 . NormalizeBucketLocation ( aws . StringValue ( resp . LocationConstraint ) ) , nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
2019-08-09 12:29:36 +02:00
func ( f * Fs ) updateRegionForBucket ( bucket string ) error {
region , err := f . getBucketLocation ( bucket )
2019-01-16 14:35:19 +01:00
if err != nil {
return errors . Wrap ( err , "reading bucket location failed" )
}
if aws . StringValue ( f . c . Config . Endpoint ) != "" {
return errors . Errorf ( "can't set region to %q as endpoint is set" , region )
}
if aws . StringValue ( f . c . Config . Region ) == region {
return errors . Errorf ( "region is already %q - not updating" , region )
}
// Make a new session with the new region
oldRegion := f . opt . Region
f . opt . Region = region
c , ses , err := s3Connection ( & f . opt )
if err != nil {
return errors . Wrap ( err , "creating new session failed" )
}
f . c = c
f . ses = ses
fs . Logf ( f , "Switched region to %q from %q" , region , oldRegion )
return nil
}
2016-04-21 21:06:21 +02:00
// listFn is called from list to handle an object.
type listFn func ( remote string , object * s3 . Object , isDirectory bool ) error
2019-08-09 12:29:36 +02:00
// list lists the objects into the function supplied from
// the bucket and directory supplied. The remote has prefix
// removed from it and if addBucket is set then it adds the
// bucket to the start.
2016-04-23 22:46:52 +02:00
//
2017-06-11 23:43:31 +02:00
// Set recurse to read sub directories
2019-08-09 12:29:36 +02:00
func ( f * Fs ) list ( ctx context . Context , bucket , directory , prefix string , addBucket bool , recurse bool , fn listFn ) error {
if prefix != "" {
prefix += "/"
}
if directory != "" {
directory += "/"
2016-04-23 22:46:52 +02:00
}
2014-05-05 19:25:32 +02:00
delimiter := ""
2017-06-11 23:43:31 +02:00
if ! recurse {
2014-05-05 19:25:32 +02:00
delimiter = "/"
}
2014-12-23 13:09:02 +01:00
var marker * string
2019-09-16 21:25:55 +02:00
// URL encode the listings so we can use control characters in object names
// See: https://github.com/aws/aws-sdk-go/issues/1914
//
// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
// it doesn't encode CommonPrefixes.
// See: https://tracker.ceph.com/issues/41870
//
// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
// though maybe it does on some versions.
//
// This does work with minio but was only added relatively recently
// https://github.com/minio/minio/pull/7265
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
2020-09-08 17:34:25 +02:00
var urlEncodeListings = ( f . opt . Provider == "AWS" || f . opt . Provider == "Wasabi" || f . opt . Provider == "Alibaba" || f . opt . Provider == "Minio" || f . opt . Provider == "TencentCOS" )
2015-02-10 18:58:29 +01:00
for {
2014-12-23 13:09:02 +01:00
// FIXME need to implement ALL loop
req := s3 . ListObjectsInput {
2019-09-16 21:25:55 +02:00
Bucket : & bucket ,
Delimiter : & delimiter ,
Prefix : & directory ,
2019-12-26 12:05:00 +01:00
MaxKeys : & f . opt . ListChunk ,
2019-09-16 21:25:55 +02:00
Marker : marker ,
}
if urlEncodeListings {
req . EncodingType = aws . String ( s3 . EncodingTypeUrl )
2014-12-23 13:09:02 +01:00
}
2018-09-03 06:41:04 +02:00
var resp * s3 . ListObjectsOutput
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-06-17 10:34:30 +02:00
resp , err = f . c . ListObjectsWithContext ( ctx , & req )
2019-09-16 21:25:55 +02:00
if err != nil && ! urlEncodeListings {
if awsErr , ok := err . ( awserr . RequestFailure ) ; ok {
if origErr := awsErr . OrigErr ( ) ; origErr != nil {
if _ , ok := origErr . ( * xml . SyntaxError ) ; ok {
// Retry the listing with URL encoding as there were characters that XML can't encode
urlEncodeListings = true
req . EncodingType = aws . String ( s3 . EncodingTypeUrl )
fs . Debugf ( f , "Retrying listing because of characters which can't be XML encoded" )
return true , err
}
}
}
}
2019-01-16 14:35:19 +01:00
return f . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2015-02-10 18:58:29 +01:00
if err != nil {
2017-06-11 23:43:31 +02:00
if awsErr , ok := err . ( awserr . RequestFailure ) ; ok {
if awsErr . StatusCode ( ) == http . StatusNotFound {
err = fs . ErrorDirNotFound
}
}
2019-08-09 12:29:36 +02:00
if f . rootBucket == "" {
// if listing from the root ignore wrong region requests returning
// empty directory
if reqErr , ok := err . ( awserr . RequestFailure ) ; ok {
// 301 if wrong region for bucket
if reqErr . StatusCode ( ) == http . StatusMovedPermanently {
fs . Errorf ( f , "Can't change region for bucket %q with no bucket specified" , bucket )
return nil
}
}
}
2016-04-21 21:06:21 +02:00
return err
}
2017-06-11 23:43:31 +02:00
if ! recurse {
2016-04-21 21:06:21 +02:00
for _ , commonPrefix := range resp . CommonPrefixes {
if commonPrefix . Prefix == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Nil common prefix received" )
2016-04-21 21:06:21 +02:00
continue
2015-02-10 18:58:29 +01:00
}
2016-04-21 21:06:21 +02:00
remote := * commonPrefix . Prefix
2019-09-16 21:25:55 +02:00
if urlEncodeListings {
remote , err = url . QueryUnescape ( remote )
if err != nil {
fs . Logf ( f , "failed to URL decode %q in listing common prefix: %v" , * commonPrefix . Prefix , err )
continue
}
2019-07-23 13:24:10 +02:00
}
2020-01-14 18:33:35 +01:00
remote = f . opt . Enc . ToStandardPath ( remote )
2019-08-09 12:29:36 +02:00
if ! strings . HasPrefix ( remote , prefix ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Odd name received %q" , remote )
2016-04-21 21:06:21 +02:00
continue
}
2019-08-09 12:29:36 +02:00
remote = remote [ len ( prefix ) : ]
if addBucket {
remote = path . Join ( bucket , remote )
}
2016-04-21 21:06:21 +02:00
if strings . HasSuffix ( remote , "/" ) {
remote = remote [ : len ( remote ) - 1 ]
}
err = fn ( remote , & s3 . Object { Key : & remote } , true )
if err != nil {
return err
2013-01-08 19:53:35 +01:00
}
}
2016-04-21 21:06:21 +02:00
}
for _ , object := range resp . Contents {
2019-08-09 12:29:36 +02:00
remote := aws . StringValue ( object . Key )
2019-09-16 21:25:55 +02:00
if urlEncodeListings {
remote , err = url . QueryUnescape ( remote )
if err != nil {
fs . Logf ( f , "failed to URL decode %q in listing: %v" , aws . StringValue ( object . Key ) , err )
continue
}
2019-07-23 13:24:10 +02:00
}
2020-01-14 18:33:35 +01:00
remote = f . opt . Enc . ToStandardPath ( remote )
2019-08-09 12:29:36 +02:00
if ! strings . HasPrefix ( remote , prefix ) {
fs . Logf ( f , "Odd name received %q" , remote )
2016-04-21 21:06:21 +02:00
continue
2014-12-23 13:09:02 +01:00
}
2019-08-09 12:29:36 +02:00
remote = remote [ len ( prefix ) : ]
2020-03-31 12:43:51 +02:00
isDirectory := remote == "" || strings . HasSuffix ( remote , "/" )
2019-08-09 12:29:36 +02:00
if addBucket {
remote = path . Join ( bucket , remote )
}
2018-03-19 18:41:46 +01:00
// is this a directory marker?
2019-08-09 12:29:36 +02:00
if isDirectory && object . Size != nil && * object . Size == 0 {
2018-03-19 18:41:46 +01:00
continue // skip directory marker
}
2016-04-21 21:06:21 +02:00
err = fn ( remote , object , false )
if err != nil {
return err
2014-12-23 13:09:02 +01:00
}
2015-02-10 18:58:29 +01:00
}
2016-04-21 21:06:21 +02:00
if ! aws . BoolValue ( resp . IsTruncated ) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp . NextMarker == nil || * resp . NextMarker == "" {
2017-12-20 17:40:41 +01:00
if len ( resp . Contents ) == 0 {
return errors . New ( "s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents" )
}
2016-04-21 21:06:21 +02:00
marker = resp . Contents [ len ( resp . Contents ) - 1 ] . Key
} else {
marker = resp . NextMarker
}
2019-12-11 18:23:52 +01:00
if urlEncodeListings {
* marker , err = url . QueryUnescape ( * marker )
if err != nil {
return errors . Wrapf ( err , "failed to URL decode NextMarker %q" , * marker )
}
}
2014-05-05 19:25:32 +02:00
}
2016-04-21 21:06:21 +02:00
return nil
2014-05-05 19:25:32 +02:00
}
2017-06-30 11:54:14 +02:00
// Convert a list item into a DirEntry
2019-06-17 10:34:30 +02:00
func ( f * Fs ) itemToDirEntry ( ctx context . Context , remote string , object * s3 . Object , isDirectory bool ) ( fs . DirEntry , error ) {
2017-06-11 23:43:31 +02:00
if isDirectory {
size := int64 ( 0 )
if object . Size != nil {
size = * object . Size
}
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( remote , time . Time { } ) . SetSize ( size )
2017-06-11 23:43:31 +02:00
return d , nil
2016-04-21 21:06:21 +02:00
}
2019-06-17 10:34:30 +02:00
o , err := f . newObjectWithInfo ( ctx , remote , object )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
return o , nil
}
// listDir lists files and directories to out
2019-08-09 12:29:36 +02:00
func ( f * Fs ) listDir ( ctx context . Context , bucket , directory , prefix string , addBucket bool ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
// List the objects and directories
2019-08-09 12:29:36 +02:00
err = f . list ( ctx , bucket , directory , prefix , addBucket , false , func ( remote string , object * s3 . Object , isDirectory bool ) error {
2019-06-17 10:34:30 +02:00
entry , err := f . itemToDirEntry ( ctx , remote , object , isDirectory )
2017-06-11 23:43:31 +02:00
if err != nil {
return err
}
if entry != nil {
entries = append ( entries , entry )
2016-04-21 21:06:21 +02:00
}
return nil
} )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
2019-08-09 12:29:36 +02:00
f . cache . MarkOK ( bucket )
2017-06-11 23:43:31 +02:00
return entries , nil
2016-04-21 21:06:21 +02:00
}
// listBuckets lists the buckets to out
2019-08-22 22:30:55 +02:00
func ( f * Fs ) listBuckets ( ctx context . Context ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
req := s3 . ListBucketsInput { }
2018-09-03 06:41:04 +02:00
var resp * s3 . ListBucketsOutput
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-06-17 10:34:30 +02:00
resp , err = f . c . ListBucketsWithContext ( ctx , & req )
2019-01-16 14:35:19 +01:00
return f . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2016-04-21 21:06:21 +02:00
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
for _ , bucket := range resp . Buckets {
2020-01-14 18:33:35 +01:00
bucketName := f . opt . Enc . ToStandardName ( aws . StringValue ( bucket . Name ) )
2019-08-09 12:29:36 +02:00
f . cache . MarkOK ( bucketName )
d := fs . NewDir ( bucketName , aws . TimeValue ( bucket . CreationDate ) )
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2014-05-05 19:25:32 +02:00
}
2017-06-11 23:43:31 +02:00
return entries , nil
2013-01-08 19:53:35 +01:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
2019-08-09 12:29:36 +02:00
bucket , directory := f . split ( dir )
if bucket == "" {
2019-08-22 22:30:55 +02:00
if directory != "" {
return nil , fs . ErrorListBucketRequired
}
return f . listBuckets ( ctx )
2014-05-05 19:25:32 +02:00
}
2019-08-09 12:29:36 +02:00
return f . listDir ( ctx , bucket , directory , f . rootDirectory , f . rootBucket == "" )
2013-01-23 23:43:20 +01:00
}
2017-06-05 17:14:24 +02:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2017-06-11 23:43:31 +02:00
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
2019-08-09 12:29:36 +02:00
// of listing recursively than doing a directory traversal.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
2019-08-09 12:29:36 +02:00
bucket , directory := f . split ( dir )
2018-01-12 17:30:54 +01:00
list := walk . NewListRHelper ( callback )
2019-08-09 12:29:36 +02:00
listR := func ( bucket , directory , prefix string , addBucket bool ) error {
return f . list ( ctx , bucket , directory , prefix , addBucket , true , func ( remote string , object * s3 . Object , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( ctx , remote , object , isDirectory )
if err != nil {
return err
}
return list . Add ( entry )
} )
}
if bucket == "" {
2019-08-22 22:30:55 +02:00
entries , err := f . listBuckets ( ctx )
2019-08-09 12:29:36 +02:00
if err != nil {
return err
}
for _ , entry := range entries {
err = list . Add ( entry )
if err != nil {
return err
}
bucket := entry . Remote ( )
err = listR ( bucket , "" , f . rootDirectory , true )
if err != nil {
return err
}
2019-08-22 22:30:55 +02:00
// bucket must be present if listing succeeded
f . cache . MarkOK ( bucket )
2019-08-09 12:29:36 +02:00
}
} else {
err = listR ( bucket , directory , f . rootDirectory , f . rootBucket == "" )
2017-06-11 23:43:31 +02:00
if err != nil {
return err
}
2019-08-22 22:30:55 +02:00
// bucket must be present if listing succeeded
f . cache . MarkOK ( bucket )
2017-06-11 23:43:31 +02:00
}
return list . Flush ( )
2017-06-05 17:14:24 +02:00
}
2016-06-25 22:58:34 +02:00
// Put the Object into the bucket
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
// Temporary Object under construction
fs := & Object {
fs : f ,
2016-02-18 12:35:25 +01:00
remote : src . Remote ( ) ,
2015-11-07 12:14:46 +01:00
}
2019-06-17 10:34:30 +02:00
return fs , fs . Update ( ctx , in , src , options ... )
2013-01-08 19:53:35 +01:00
}
2017-09-15 20:20:32 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( ctx , in , src , options ... )
2017-09-15 20:20:32 +02:00
}
2016-02-24 02:58:55 +01:00
// Check if the bucket exists
2017-06-29 13:26:14 +02:00
//
// NB this can return incorrect results if called immediately after bucket deletion
2019-08-09 12:29:36 +02:00
func ( f * Fs ) bucketExists ( ctx context . Context , bucket string ) ( bool , error ) {
2016-02-24 02:58:55 +01:00
req := s3 . HeadBucketInput {
2019-08-09 12:29:36 +02:00
Bucket : & bucket ,
2016-02-24 02:58:55 +01:00
}
2018-09-03 06:41:04 +02:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2019-06-17 10:34:30 +02:00
_ , err := f . c . HeadBucketWithContext ( ctx , & req )
2019-01-16 14:35:19 +01:00
return f . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2016-02-24 02:58:55 +01:00
if err == nil {
return true , nil
}
if err , ok := err . ( awserr . RequestFailure ) ; ok {
if err . StatusCode ( ) == http . StatusNotFound {
return false , nil
}
}
return false , err
}
2013-01-08 19:53:35 +01:00
// Mkdir creates the bucket if it doesn't exist
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2019-08-09 12:29:36 +02:00
bucket , _ := f . split ( dir )
2019-08-22 22:30:55 +02:00
return f . makeBucket ( ctx , bucket )
}
// makeBucket creates the bucket if it doesn't exist
func ( f * Fs ) makeBucket ( ctx context . Context , bucket string ) error {
2020-07-22 13:02:17 +02:00
if f . opt . NoCheckBucket {
return nil
}
2019-08-09 12:29:36 +02:00
return f . cache . Create ( bucket , func ( ) error {
req := s3 . CreateBucketInput {
Bucket : & bucket ,
ACL : & f . opt . BucketACL ,
2017-06-29 13:26:14 +02:00
}
2019-08-09 12:29:36 +02:00
if f . opt . LocationConstraint != "" {
req . CreateBucketConfiguration = & s3 . CreateBucketConfiguration {
LocationConstraint : & f . opt . LocationConstraint ,
}
2017-06-29 13:26:14 +02:00
}
2019-08-09 12:29:36 +02:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . CreateBucketWithContext ( ctx , & req )
return f . shouldRetry ( err )
} )
if err == nil {
fs . Infof ( f , "Bucket %q created with ACL %q" , bucket , f . opt . BucketACL )
2014-12-23 13:09:02 +01:00
}
2020-04-22 19:01:59 +02:00
if awsErr , ok := err . ( awserr . Error ) ; ok {
if code := awsErr . Code ( ) ; code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
2019-08-09 12:29:36 +02:00
err = nil
}
2013-01-08 23:31:16 +01:00
}
2020-04-15 14:13:13 +02:00
return err
2019-08-09 12:29:36 +02:00
} , func ( ) ( bool , error ) {
return f . bucketExists ( ctx , bucket )
} )
2013-01-08 19:53:35 +01:00
}
2015-11-07 16:31:04 +01:00
// Rmdir deletes the bucket if the fs is at the root
2013-01-08 19:53:35 +01:00
//
// Returns an error if it isn't empty
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
2019-08-09 12:29:36 +02:00
bucket , directory := f . split ( dir )
if bucket == "" || directory != "" {
2015-11-07 16:31:04 +01:00
return nil
}
2019-08-09 12:29:36 +02:00
return f . cache . Remove ( bucket , func ( ) error {
req := s3 . DeleteBucketInput {
Bucket : & bucket ,
}
err := f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . DeleteBucketWithContext ( ctx , & req )
return f . shouldRetry ( err )
} )
if err == nil {
fs . Infof ( f , "Bucket %q deleted" , bucket )
}
return err
2018-09-03 06:41:04 +02:00
} )
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Precision of the remote
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2013-01-19 00:21:02 +01:00
return time . Nanosecond
}
2018-01-23 11:50:50 +01:00
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape ( s string ) string {
return strings . Replace ( rest . URLPathEscape ( s ) , "+" , "%2B" , - 1 )
}
2020-10-13 23:43:40 +02:00
// copy does a server-side copy
2019-09-09 21:44:50 +02:00
//
// It adds the boiler plate to the req passed in and calls the s3
// method
2020-07-30 11:52:32 +02:00
func ( f * Fs ) copy ( ctx context . Context , req * s3 . CopyObjectInput , dstBucket , dstPath , srcBucket , srcPath string , src * Object ) error {
2019-09-09 21:44:50 +02:00
req . Bucket = & dstBucket
req . ACL = & f . opt . ACL
req . Key = & dstPath
source := pathEscape ( path . Join ( srcBucket , srcPath ) )
req . CopySource = & source
if f . opt . ServerSideEncryption != "" {
req . ServerSideEncryption = & f . opt . ServerSideEncryption
}
2020-11-20 12:15:48 +01:00
if f . opt . SSECustomerAlgorithm != "" {
req . SSECustomerAlgorithm = & f . opt . SSECustomerAlgorithm
req . CopySourceSSECustomerAlgorithm = & f . opt . SSECustomerAlgorithm
}
if f . opt . SSECustomerKey != "" {
req . SSECustomerKey = & f . opt . SSECustomerKey
req . CopySourceSSECustomerKey = & f . opt . SSECustomerKey
}
if f . opt . SSECustomerKeyMD5 != "" {
req . SSECustomerKeyMD5 = & f . opt . SSECustomerKeyMD5
req . CopySourceSSECustomerKeyMD5 = & f . opt . SSECustomerKeyMD5
}
2019-09-09 21:44:50 +02:00
if f . opt . SSEKMSKeyID != "" {
req . SSEKMSKeyId = & f . opt . SSEKMSKeyID
}
if req . StorageClass == nil && f . opt . StorageClass != "" {
req . StorageClass = & f . opt . StorageClass
}
2019-10-04 17:49:06 +02:00
2020-07-30 11:52:32 +02:00
if src . bytes >= int64 ( f . opt . CopyCutoff ) {
return f . copyMultipart ( ctx , req , dstBucket , dstPath , srcBucket , srcPath , src )
2019-10-04 17:49:06 +02:00
}
2019-09-09 21:44:50 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . CopyObjectWithContext ( ctx , req )
return f . shouldRetry ( err )
} )
}
2019-10-04 17:49:06 +02:00
func calculateRange ( partSize , partIndex , numParts , totalSize int64 ) string {
start := partIndex * partSize
var ends string
if partIndex == numParts - 1 {
2019-12-02 18:00:54 +01:00
if totalSize >= 1 {
ends = strconv . FormatInt ( totalSize - 1 , 10 )
2019-10-04 17:49:06 +02:00
}
} else {
ends = strconv . FormatInt ( start + partSize - 1 , 10 )
}
return fmt . Sprintf ( "bytes=%v-%v" , start , ends )
}
2020-07-30 11:52:32 +02:00
func ( f * Fs ) copyMultipart ( ctx context . Context , copyReq * s3 . CopyObjectInput , dstBucket , dstPath , srcBucket , srcPath string , src * Object ) ( err error ) {
info , err := src . headObject ( ctx )
if err != nil {
return err
}
req := & s3 . CreateMultipartUploadInput { }
// Fill in the request from the head info
structs . SetFrom ( req , info )
// If copy metadata was set then set the Metadata to that read
// from the head request
if aws . StringValue ( copyReq . MetadataDirective ) == s3 . MetadataDirectiveCopy {
copyReq . Metadata = info . Metadata
}
// Overwrite any from the copyReq
structs . SetFrom ( req , copyReq )
req . Bucket = & dstBucket
req . Key = & dstPath
2019-10-04 17:49:06 +02:00
var cout * s3 . CreateMultipartUploadOutput
if err := f . pacer . Call ( func ( ) ( bool , error ) {
var err error
2020-07-30 11:52:32 +02:00
cout , err = f . c . CreateMultipartUploadWithContext ( ctx , req )
2019-10-04 17:49:06 +02:00
return f . shouldRetry ( err )
} ) ; err != nil {
return err
}
uid := cout . UploadId
2020-06-04 12:09:27 +02:00
defer atexit . OnError ( & err , func ( ) {
// Try to abort the upload, but ignore the error.
2020-07-30 11:52:32 +02:00
fs . Debugf ( src , "Cancelling multipart copy" )
2020-06-04 12:09:27 +02:00
_ = f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . AbortMultipartUploadWithContext ( context . Background ( ) , & s3 . AbortMultipartUploadInput {
Bucket : & dstBucket ,
Key : & dstPath ,
UploadId : uid ,
RequestPayer : req . RequestPayer ,
2019-10-04 17:49:06 +02:00
} )
2020-06-04 12:09:27 +02:00
return f . shouldRetry ( err )
} )
} ) ( )
2019-10-04 17:49:06 +02:00
2020-07-30 11:52:32 +02:00
srcSize := src . bytes
2019-12-02 18:14:57 +01:00
partSize := int64 ( f . opt . CopyCutoff )
2019-10-04 17:49:06 +02:00
numParts := ( srcSize - 1 ) / partSize + 1
2020-07-30 11:52:32 +02:00
fs . Debugf ( src , "Starting multipart copy with %d parts" , numParts )
2019-10-04 17:49:06 +02:00
var parts [ ] * s3 . CompletedPart
for partNum := int64 ( 1 ) ; partNum <= numParts ; partNum ++ {
if err := f . pacer . Call ( func ( ) ( bool , error ) {
partNum := partNum
2020-07-30 11:52:32 +02:00
uploadPartReq := & s3 . UploadPartCopyInput { }
structs . SetFrom ( uploadPartReq , copyReq )
uploadPartReq . Bucket = & dstBucket
uploadPartReq . Key = & dstPath
uploadPartReq . PartNumber = & partNum
uploadPartReq . UploadId = uid
uploadPartReq . CopySourceRange = aws . String ( calculateRange ( partSize , partNum - 1 , numParts , srcSize ) )
2019-10-04 17:49:06 +02:00
uout , err := f . c . UploadPartCopyWithContext ( ctx , uploadPartReq )
if err != nil {
return f . shouldRetry ( err )
}
parts = append ( parts , & s3 . CompletedPart {
PartNumber : & partNum ,
ETag : uout . CopyPartResult . ETag ,
} )
return false , nil
} ) ; err != nil {
return err
}
}
return f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . CompleteMultipartUploadWithContext ( ctx , & s3 . CompleteMultipartUploadInput {
Bucket : & dstBucket ,
Key : & dstPath ,
MultipartUpload : & s3 . CompletedMultipartUpload {
Parts : parts ,
} ,
RequestPayer : req . RequestPayer ,
UploadId : uid ,
} )
return f . shouldRetry ( err )
} )
}
2020-10-13 23:43:40 +02:00
// Copy src to this remote using server-side copy operations.
2015-02-14 19:48:08 +01:00
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2019-08-09 12:29:36 +02:00
dstBucket , dstPath := f . split ( remote )
2019-08-22 22:30:55 +02:00
err := f . makeBucket ( ctx , dstBucket )
2017-06-28 22:14:53 +02:00
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
srcObj , ok := src . ( * Object )
2015-02-14 19:48:08 +01:00
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-02-14 19:48:08 +01:00
return nil , fs . ErrorCantCopy
}
2019-08-09 12:29:36 +02:00
srcBucket , srcPath := srcObj . split ( )
2015-02-14 19:48:08 +01:00
req := s3 . CopyObjectInput {
MetadataDirective : aws . String ( s3 . MetadataDirectiveCopy ) ,
}
2020-07-30 11:52:32 +02:00
err = f . copy ( ctx , & req , dstBucket , dstPath , srcBucket , srcPath , srcObj )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2019-06-17 10:34:30 +02:00
return f . NewObject ( ctx , remote )
2015-02-14 19:48:08 +01:00
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . MD5 )
2016-01-11 13:39:33 +01:00
}
2020-02-19 11:17:25 +01:00
func ( f * Fs ) getMemoryPool ( size int64 ) * pool . Pool {
2020-04-09 12:18:58 +02:00
if size == int64 ( f . opt . ChunkSize ) {
return f . pool
}
2020-02-19 11:17:25 +01:00
2020-04-09 12:18:58 +02:00
return pool . New (
time . Duration ( f . opt . MemoryPoolFlushTime ) ,
int ( size ) ,
f . opt . UploadConcurrency * fs . Config . Transfers ,
f . opt . MemoryPoolUseMmap ,
)
2020-02-19 11:17:25 +01:00
}
2020-06-04 23:03:12 +02:00
// PublicLink generates a public link to the remote path (usually readable by anyone)
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2020-06-18 18:50:50 +02:00
if strings . HasSuffix ( remote , "/" ) {
return "" , fs . ErrorCantShareDirectories
}
2020-06-04 23:03:12 +02:00
if _ , err := f . NewObject ( ctx , remote ) ; err != nil {
return "" , err
}
2020-06-18 18:50:50 +02:00
if expire > maxExpireDuration {
fs . Logf ( f , "Public Link: Reducing expiry to %v as %v is greater than the max time allowed" , maxExpireDuration , expire )
expire = maxExpireDuration
}
2020-06-04 23:03:12 +02:00
bucket , bucketPath := f . split ( remote )
httpReq , _ := f . c . GetObjectRequest ( & s3 . GetObjectInput {
Bucket : & bucket ,
Key : & bucketPath ,
} )
return httpReq . Presign ( time . Duration ( expire ) )
}
2020-06-24 12:02:34 +02:00
var commandHelp = [ ] fs . CommandHelp { {
Name : "restore" ,
Short : "Restore objects from GLACIER to normal storage" ,
Long : ` This command can be used to restore one or more objects from GLACIER
to normal storage .
Usage Examples :
rclone backend restore s3 : bucket / path / to / object [ - o priority = PRIORITY ] [ - o lifetime = DAYS ]
rclone backend restore s3 : bucket / path / to / directory [ - o priority = PRIORITY ] [ - o lifetime = DAYS ]
rclone backend restore s3 : bucket [ - o priority = PRIORITY ] [ - o lifetime = DAYS ]
This flag also obeys the filters . Test first with - i / -- interactive or -- dry - run flags
rclone - i backend restore -- include "*.txt" s3 : bucket / path - o priority = Standard
All the objects shown will be marked for restore , then
rclone backend restore -- include "*.txt" s3 : bucket / path - o priority = Standard
It returns a list of status dictionaries with Remote and Status
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
keys . The Status will be OK if it was successful or an error message
2020-06-24 12:02:34 +02:00
if not .
[
{
"Status" : "OK" ,
"Path" : "test.txt"
} ,
{
"Status" : "OK" ,
"Path" : "test/file4.txt"
}
]
` ,
Opts : map [ string ] string {
"priority" : "Priority of restore: Standard|Expedited|Bulk" ,
"lifetime" : "Lifetime of the active copy in days" ,
"description" : "The optional description for the job." ,
} ,
2020-06-25 17:11:05 +02:00
} , {
Name : "list-multipart-uploads" ,
Short : "List the unfinished multipart uploads" ,
Long : ` This command lists the unfinished multipart uploads in JSON format .
rclone backend list - multipart s3 : bucket / path / to / object
It returns a dictionary of buckets with values as lists of unfinished
multipart uploads .
You can call it with no bucket in which case it lists all bucket , with
a bucket or with a bucket and path .
{
"rclone" : [
{
"Initiated" : "2020-06-26T14:20:36Z" ,
"Initiator" : {
"DisplayName" : "XXX" ,
"ID" : "arn:aws:iam::XXX:user/XXX"
} ,
"Key" : "KEY" ,
"Owner" : {
"DisplayName" : null ,
"ID" : "XXX"
} ,
"StorageClass" : "STANDARD" ,
"UploadId" : "XXX"
}
] ,
"rclone-1000files" : [ ] ,
"rclone-dst" : [ ]
}
` ,
} , {
Name : "cleanup" ,
Short : "Remove unfinished multipart uploads." ,
Long : ` This command removes unfinished multipart uploads of age greater than
max - age which defaults to 24 hours .
Note that you can use - i / -- dry - run with this command to see what it
would do .
rclone backend cleanup s3 : bucket / path / to / object
rclone backend cleanup - o max - age = 7 w s3 : bucket / path / to / object
Durations are parsed as per the rest of rclone , 2 h , 7 d , 7 w etc .
` ,
Opts : map [ string ] string {
"max-age" : "Max age of upload to delete" ,
} ,
2020-06-24 12:02:34 +02:00
} }
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func ( f * Fs ) Command ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
switch name {
case "restore" :
req := s3 . RestoreObjectInput {
//Bucket: &f.rootBucket,
//Key: &encodedDirectory,
RestoreRequest : & s3 . RestoreRequest { } ,
}
if lifetime := opt [ "lifetime" ] ; lifetime != "" {
ilifetime , err := strconv . ParseInt ( lifetime , 10 , 64 )
if err != nil {
return nil , errors . Wrap ( err , "bad lifetime" )
}
req . RestoreRequest . Days = & ilifetime
}
if priority := opt [ "priority" ] ; priority != "" {
req . RestoreRequest . GlacierJobParameters = & s3 . GlacierJobParameters {
Tier : & priority ,
}
}
if description := opt [ "description" ] ; description != "" {
req . RestoreRequest . Description = & description
}
type status struct {
Status string
Remote string
}
var (
outMu sync . Mutex
out = [ ] status { }
)
err = operations . ListFn ( ctx , f , func ( obj fs . Object ) {
// Remember this is run --checkers times concurrently
o , ok := obj . ( * Object )
st := status { Status : "OK" , Remote : obj . Remote ( ) }
defer func ( ) {
outMu . Lock ( )
out = append ( out , st )
outMu . Unlock ( )
} ( )
if operations . SkipDestructive ( ctx , obj , "restore" ) {
return
}
if ! ok {
st . Status = "Not an S3 object"
return
}
bucket , bucketPath := o . split ( )
reqCopy := req
reqCopy . Bucket = & bucket
reqCopy . Key = & bucketPath
err = f . pacer . Call ( func ( ) ( bool , error ) {
_ , err = f . c . RestoreObject ( & reqCopy )
return f . shouldRetry ( err )
} )
if err != nil {
st . Status = err . Error ( )
}
} )
if err != nil {
return out , err
}
return out , nil
2020-06-25 17:11:05 +02:00
case "list-multipart-uploads" :
return f . listMultipartUploadsAll ( ctx )
case "cleanup" :
maxAge := 24 * time . Hour
if opt [ "max-age" ] != "" {
maxAge , err = fs . ParseDuration ( opt [ "max-age" ] )
if err != nil {
return nil , errors . Wrap ( err , "bad max-age" )
}
}
return nil , f . cleanUp ( ctx , maxAge )
2020-06-24 12:02:34 +02:00
default :
return nil , fs . ErrorCommandNotFound
}
}
2020-06-25 17:11:05 +02:00
// listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
//
// Note that rather lazily we treat key as a prefix so it matches
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
// directories and objects. This could surprise the user if they ask
2020-06-25 17:11:05 +02:00
// for "dir" and it returns "dirKey"
func ( f * Fs ) listMultipartUploads ( ctx context . Context , bucket , key string ) ( uploads [ ] * s3 . MultipartUpload , err error ) {
var (
keyMarker * string
uploadIDMarker * string
)
uploads = [ ] * s3 . MultipartUpload { }
for {
req := s3 . ListMultipartUploadsInput {
Bucket : & bucket ,
MaxUploads : & f . opt . ListChunk ,
KeyMarker : keyMarker ,
UploadIdMarker : uploadIDMarker ,
Prefix : & key ,
}
var resp * s3 . ListMultipartUploadsOutput
err = f . pacer . Call ( func ( ) ( bool , error ) {
resp , err = f . c . ListMultipartUploads ( & req )
return f . shouldRetry ( err )
} )
if err != nil {
return nil , errors . Wrapf ( err , "list multipart uploads bucket %q key %q" , bucket , key )
}
uploads = append ( uploads , resp . Uploads ... )
if ! aws . BoolValue ( resp . IsTruncated ) {
break
}
keyMarker = resp . NextKeyMarker
uploadIDMarker = resp . NextUploadIdMarker
}
return uploads , nil
}
func ( f * Fs ) listMultipartUploadsAll ( ctx context . Context ) ( uploadsMap map [ string ] [ ] * s3 . MultipartUpload , err error ) {
uploadsMap = make ( map [ string ] [ ] * s3 . MultipartUpload )
bucket , directory := f . split ( "" )
if bucket != "" {
uploads , err := f . listMultipartUploads ( ctx , bucket , directory )
if err != nil {
return uploadsMap , err
}
uploadsMap [ bucket ] = uploads
return uploadsMap , nil
}
entries , err := f . listBuckets ( ctx )
if err != nil {
return uploadsMap , err
}
for _ , entry := range entries {
bucket := entry . Remote ( )
uploads , listErr := f . listMultipartUploads ( ctx , bucket , "" )
if listErr != nil {
err = listErr
fs . Errorf ( f , "%v" , err )
}
uploadsMap [ bucket ] = uploads
}
return uploadsMap , err
}
// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
func ( f * Fs ) cleanUpBucket ( ctx context . Context , bucket string , maxAge time . Duration , uploads [ ] * s3 . MultipartUpload ) ( err error ) {
fs . Infof ( f , "cleaning bucket %q of pending multipart uploads older than %v" , bucket , maxAge )
for _ , upload := range uploads {
if upload . Initiated != nil && upload . Key != nil && upload . UploadId != nil {
age := time . Since ( * upload . Initiated )
what := fmt . Sprintf ( "pending multipart upload for bucket %q key %q dated %v (%v ago)" , bucket , * upload . Key , upload . Initiated , age )
if age > maxAge {
fs . Infof ( f , "removing %s" , what )
if operations . SkipDestructive ( ctx , what , "remove pending upload" ) {
continue
}
req := s3 . AbortMultipartUploadInput {
Bucket : & bucket ,
UploadId : upload . UploadId ,
Key : upload . Key ,
}
_ , abortErr := f . c . AbortMultipartUpload ( & req )
if abortErr != nil {
err = errors . Wrapf ( abortErr , "failed to remove %s" , what )
fs . Errorf ( f , "%v" , err )
}
} else {
fs . Debugf ( f , "ignoring %s" , what )
}
}
}
return err
}
// CleanUp removes all pending multipart uploads
func ( f * Fs ) cleanUp ( ctx context . Context , maxAge time . Duration ) ( err error ) {
uploadsMap , err := f . listMultipartUploadsAll ( ctx )
if err != nil {
return err
}
for bucket , uploads := range uploadsMap {
cleanErr := f . cleanUpBucket ( ctx , bucket , maxAge , uploads )
if err != nil {
fs . Errorf ( f , "Failed to cleanup bucket %q: %v" , bucket , cleanErr )
err = cleanErr
}
}
return err
}
// CleanUp removes all pending multipart uploads older than 24 hours
func ( f * Fs ) CleanUp ( ctx context . Context ) ( err error ) {
return f . cleanUp ( ctx , 24 * time . Hour )
}
2013-01-08 19:53:35 +01:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2014-03-28 18:56:04 +01:00
}
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-03-28 18:56:04 +01:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remote ( ) string {
2013-06-27 21:13:07 +02:00
return o . remote
2013-01-08 19:53:35 +01:00
}
2015-05-09 11:37:43 +02:00
var matchMd5 = regexp . MustCompile ( ` ^[0-9a-f] { 32}$ ` )
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2019-06-17 10:34:30 +02:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2018-01-06 15:30:10 +01:00
hash := strings . Trim ( strings . ToLower ( o . etag ) , ` " ` )
2015-05-09 11:37:43 +02:00
// Check the etag is a valid md5sum
2018-01-06 15:30:10 +01:00
if ! matchMd5 . MatchString ( hash ) {
2019-06-17 10:34:30 +02:00
err := o . readMetaData ( ctx )
2018-01-06 15:30:10 +01:00
if err != nil {
return "" , err
}
if md5sum , ok := o . meta [ metaMD5Hash ] ; ok {
md5sumBytes , err := base64 . StdEncoding . DecodeString ( * md5sum )
if err != nil {
return "" , err
}
hash = hex . EncodeToString ( md5sumBytes )
} else {
hash = ""
}
2015-05-09 11:37:43 +02:00
}
2018-01-06 15:30:10 +01:00
return hash , nil
2013-01-08 19:53:35 +01:00
}
// Size returns the size of an object in bytes
2015-11-07 12:14:46 +01:00
func ( o * Object ) Size ( ) int64 {
2013-06-27 21:13:07 +02:00
return o . bytes
2013-01-08 19:53:35 +01:00
}
2020-07-30 11:52:32 +02:00
func ( o * Object ) headObject ( ctx context . Context ) ( resp * s3 . HeadObjectOutput , err error ) {
2019-08-09 12:29:36 +02:00
bucket , bucketPath := o . split ( )
2014-12-23 13:09:02 +01:00
req := s3 . HeadObjectInput {
2019-08-09 12:29:36 +02:00
Bucket : & bucket ,
Key : & bucketPath ,
2014-07-28 23:32:15 +02:00
}
2020-11-20 12:15:48 +01:00
if o . fs . opt . SSECustomerAlgorithm != "" {
req . SSECustomerAlgorithm = & o . fs . opt . SSECustomerAlgorithm
}
if o . fs . opt . SSECustomerKey != "" {
req . SSECustomerKey = & o . fs . opt . SSECustomerKey
}
if o . fs . opt . SSECustomerKeyMD5 != "" {
req . SSECustomerKeyMD5 = & o . fs . opt . SSECustomerKeyMD5
}
2018-09-03 06:41:04 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
var err error
2019-06-17 10:34:30 +02:00
resp , err = o . fs . c . HeadObjectWithContext ( ctx , & req )
2019-01-16 14:35:19 +01:00
return o . fs . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2013-01-08 19:53:35 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
if awsErr , ok := err . ( awserr . RequestFailure ) ; ok {
if awsErr . StatusCode ( ) == http . StatusNotFound {
2020-07-30 11:52:32 +02:00
return nil , fs . ErrorObjectNotFound
2016-06-25 22:23:20 +02:00
}
}
2020-07-30 11:52:32 +02:00
return nil , err
2013-01-08 19:53:35 +01:00
}
2020-06-02 15:29:42 +02:00
o . fs . cache . MarkOK ( bucket )
2020-07-30 11:52:32 +02:00
return resp , nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func ( o * Object ) readMetaData ( ctx context . Context ) ( err error ) {
if o . meta != nil {
return nil
}
resp , err := o . headObject ( ctx )
if err != nil {
return err
}
2014-05-16 17:27:53 +02:00
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
2014-12-23 13:09:02 +01:00
if resp . ContentLength != nil {
size = * resp . ContentLength
2013-01-08 19:53:35 +01:00
}
2020-11-20 13:15:56 +01:00
if o . fs . etagIsNotMD5 {
o . etag = ""
} else {
o . etag = aws . StringValue ( resp . ETag )
}
2013-06-27 21:13:07 +02:00
o . bytes = size
2014-12-23 13:09:02 +01:00
o . meta = resp . Metadata
2019-10-23 09:16:22 +02:00
if o . meta == nil {
o . meta = map [ string ] * string { }
}
2019-09-09 21:44:50 +02:00
o . storageClass = aws . StringValue ( resp . StorageClass )
2014-12-23 13:09:02 +01:00
if resp . LastModified == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read last modified from HEAD: %v" , err )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * resp . LastModified
2013-01-08 19:53:35 +01:00
}
2016-09-21 23:13:24 +02:00
o . mimeType = aws . StringValue ( resp . ContentType )
2013-01-08 19:53:35 +01:00
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 10:34:30 +02:00
func ( o * Object ) ModTime ( ctx context . Context ) time . Time {
2018-04-13 14:32:17 +02:00
if fs . Config . UseServerModTime {
return o . lastModified
}
2019-06-17 10:34:30 +02:00
err := o . readMetaData ( ctx )
2013-01-08 19:53:35 +01:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2013-01-08 19:53:35 +01:00
return time . Now ( )
}
// read mtime out of metadata if available
2013-06-27 21:13:07 +02:00
d , ok := o . meta [ metaMtime ]
2014-12-23 13:09:02 +01:00
if ! ok || d == nil {
2017-02-09 12:01:20 +01:00
// fs.Debugf(o, "No metadata")
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
modTime , err := swift . FloatStringToTime ( * d )
2013-01-08 19:53:35 +01:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read mtime from object: %v" , err )
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the local fs object
2019-06-17 10:34:30 +02:00
func ( o * Object ) SetModTime ( ctx context . Context , modTime time . Time ) error {
err := o . readMetaData ( ctx )
2013-01-08 23:31:16 +01:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2013-01-08 23:31:16 +01:00
}
2014-12-23 13:09:02 +01:00
o . meta [ metaMtime ] = aws . String ( swift . TimeToFloatString ( modTime ) )
2019-09-09 21:44:50 +02:00
// Can't update metadata here, so return this error to force a recopy
if o . storageClass == "GLACIER" || o . storageClass == "DEEP_ARCHIVE" {
return fs . ErrorCantSetModTime
}
2016-01-02 09:58:48 +01:00
2014-12-23 13:09:02 +01:00
// Copy the object to itself to update the metadata
2019-08-09 12:29:36 +02:00
bucket , bucketPath := o . split ( )
2014-12-23 13:09:02 +01:00
req := s3 . CopyObjectInput {
2019-09-09 21:44:50 +02:00
ContentType : aws . String ( fs . MimeType ( ctx , o ) ) , // Guess the content type
2014-12-23 13:09:02 +01:00
Metadata : o . meta ,
2019-09-09 21:44:50 +02:00
MetadataDirective : aws . String ( s3 . MetadataDirectiveReplace ) , // replace metadata with that passed in
2019-06-03 16:28:19 +02:00
}
2020-07-30 11:52:32 +02:00
return o . fs . copy ( ctx , & req , bucket , bucketPath , bucket , bucketPath , o )
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Storable raturns a boolean indicating if this object is storable
2015-11-07 12:14:46 +01:00
func ( o * Object ) Storable ( ) bool {
2013-01-08 19:53:35 +01:00
return true
}
// Open an object for read
2019-06-17 10:34:30 +02:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2019-08-09 12:29:36 +02:00
bucket , bucketPath := o . split ( )
2014-12-23 13:09:02 +01:00
req := s3 . GetObjectInput {
2019-08-09 12:29:36 +02:00
Bucket : & bucket ,
Key : & bucketPath ,
2014-12-23 13:09:02 +01:00
}
2020-03-30 12:26:52 +02:00
if o . fs . opt . SSECustomerAlgorithm != "" {
req . SSECustomerAlgorithm = & o . fs . opt . SSECustomerAlgorithm
}
if o . fs . opt . SSECustomerKey != "" {
req . SSECustomerKey = & o . fs . opt . SSECustomerKey
}
if o . fs . opt . SSECustomerKeyMD5 != "" {
req . SSECustomerKeyMD5 = & o . fs . opt . SSECustomerKeyMD5
}
2020-02-10 10:02:04 +01:00
httpReq , resp := o . fs . c . GetObjectRequest ( & req )
2019-08-06 16:18:08 +02:00
fs . FixRangeOption ( options , o . bytes )
2016-09-10 12:29:57 +02:00
for _ , option := range options {
switch option . ( type ) {
case * fs . RangeOption , * fs . SeekOption :
_ , value := option . Header ( )
req . Range = & value
2020-02-10 10:02:04 +01:00
case * fs . HTTPOption :
key , value := option . Header ( )
httpReq . HTTPRequest . Header . Add ( key , value )
2016-09-10 12:29:57 +02:00
default :
if option . Mandatory ( ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Unsupported mandatory option: %v" , option )
2016-09-10 12:29:57 +02:00
}
}
}
2018-09-03 06:41:04 +02:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
var err error
2020-02-10 10:02:04 +01:00
httpReq . HTTPRequest = httpReq . HTTPRequest . WithContext ( ctx )
err = httpReq . Send ( )
2019-01-16 14:35:19 +01:00
return o . fs . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2017-09-09 14:02:26 +02:00
if err , ok := err . ( awserr . RequestFailure ) ; ok {
if err . Code ( ) == "InvalidObjectState" {
2019-08-09 12:29:36 +02:00
return nil , errors . Errorf ( "Object in GLACIER, restore first: bucket=%q, key=%q" , bucket , bucketPath )
2017-09-09 14:02:26 +02:00
}
}
2014-12-23 13:09:02 +01:00
if err != nil {
return nil , err
}
return resp . Body , nil
2013-01-08 19:53:35 +01:00
}
2019-11-06 11:41:03 +01:00
var warnStreamUpload sync . Once
2019-12-31 00:17:06 +01:00
func ( o * Object ) uploadMultipart ( ctx context . Context , req * s3 . PutObjectInput , size int64 , in io . Reader ) ( err error ) {
f := o . fs
// make concurrency machinery
concurrency := f . opt . UploadConcurrency
if concurrency < 1 {
concurrency = 1
}
2020-02-19 11:17:25 +01:00
tokens := pacer . NewTokenDispenser ( concurrency )
2019-12-31 00:17:06 +01:00
2020-06-08 19:22:34 +02:00
uploadParts := f . opt . MaxUploadParts
if uploadParts < 1 {
uploadParts = 1
} else if uploadParts > maxUploadParts {
uploadParts = maxUploadParts
}
2019-12-31 00:17:06 +01:00
// calculate size of parts
partSize := int ( f . opt . ChunkSize )
// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
// 48GB which seems like a not too unreasonable limit.
if size == - 1 {
warnStreamUpload . Do ( func ( ) {
fs . Logf ( f , "Streaming uploads using chunk size %v will have maximum file size of %v" ,
2020-06-08 19:22:34 +02:00
f . opt . ChunkSize , fs . SizeSuffix ( int64 ( partSize ) * uploadParts ) )
2019-12-31 00:17:06 +01:00
} )
} else {
// Adjust partSize until the number of parts is small enough.
2020-06-08 19:22:34 +02:00
if size / int64 ( partSize ) >= uploadParts {
2019-12-31 00:17:06 +01:00
// Calculate partition size rounded up to the nearest MB
2020-06-08 19:22:34 +02:00
partSize = int ( ( ( ( size / uploadParts ) >> 20 ) + 1 ) << 20 )
2019-12-31 00:17:06 +01:00
}
}
2020-02-19 11:17:25 +01:00
memPool := f . getMemoryPool ( int64 ( partSize ) )
2020-06-05 12:45:54 +02:00
var mReq s3 . CreateMultipartUploadInput
structs . SetFrom ( & mReq , req )
2019-12-31 00:17:06 +01:00
var cout * s3 . CreateMultipartUploadOutput
err = f . pacer . Call ( func ( ) ( bool , error ) {
var err error
2020-06-05 12:45:54 +02:00
cout , err = f . c . CreateMultipartUploadWithContext ( ctx , & mReq )
2019-12-31 00:17:06 +01:00
return f . shouldRetry ( err )
} )
if err != nil {
return errors . Wrap ( err , "multipart upload failed to initialise" )
}
uid := cout . UploadId
2020-06-04 12:09:27 +02:00
defer atexit . OnError ( & err , func ( ) {
2019-12-31 00:17:06 +01:00
if o . fs . opt . LeavePartsOnError {
return
}
2020-06-04 12:09:27 +02:00
fs . Debugf ( o , "Cancelling multipart upload" )
errCancel := f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . AbortMultipartUploadWithContext ( context . Background ( ) , & s3 . AbortMultipartUploadInput {
Bucket : req . Bucket ,
Key : req . Key ,
UploadId : uid ,
RequestPayer : req . RequestPayer ,
2019-12-31 00:17:06 +01:00
} )
2020-06-04 12:09:27 +02:00
return f . shouldRetry ( err )
} )
if errCancel != nil {
fs . Debugf ( o , "Failed to cancel multipart upload: %v" , errCancel )
2019-12-31 00:17:06 +01:00
}
2020-06-04 12:09:27 +02:00
} ) ( )
2019-12-31 00:17:06 +01:00
var (
g , gCtx = errgroup . WithContext ( ctx )
finished = false
partsMu sync . Mutex // to protect parts
parts [ ] * s3 . CompletedPart
off int64
)
for partNum := int64 ( 1 ) ; ! finished ; partNum ++ {
2020-02-19 11:17:25 +01:00
// Get a block of memory from the pool and token which limits concurrency.
tokens . Get ( )
buf := memPool . Get ( )
2019-12-31 00:17:06 +01:00
2020-05-14 08:48:18 +02:00
free := func ( ) {
// return the memory and token
memPool . Put ( buf )
tokens . Put ( )
}
2020-02-11 17:12:08 +01:00
// Fail fast, in case an errgroup managed function returns an error
// gCtx is cancelled. There is no point in uploading all the other parts.
if gCtx . Err ( ) != nil {
2020-05-14 08:48:18 +02:00
free ( )
2020-02-11 17:12:08 +01:00
break
}
2019-12-31 00:17:06 +01:00
// Read the chunk
var n int
n , err = readers . ReadFill ( in , buf ) // this can never return 0, nil
if err == io . EOF {
2020-01-05 12:24:15 +01:00
if n == 0 && partNum != 1 { // end if no data and if not first chunk
2020-05-14 08:48:18 +02:00
free ( )
2019-12-31 00:17:06 +01:00
break
}
finished = true
} else if err != nil {
2020-05-14 08:48:18 +02:00
free ( )
2019-12-31 00:17:06 +01:00
return errors . Wrap ( err , "multipart upload failed to read source" )
}
buf = buf [ : n ]
partNum := partNum
fs . Debugf ( o , "multipart upload starting chunk %d size %v offset %v/%v" , partNum , fs . SizeSuffix ( n ) , fs . SizeSuffix ( off ) , fs . SizeSuffix ( size ) )
off += int64 ( n )
g . Go ( func ( ) ( err error ) {
2020-05-14 08:48:18 +02:00
defer free ( )
2019-12-31 00:17:06 +01:00
partLength := int64 ( len ( buf ) )
// create checksum of buffer for integrity checking
md5sumBinary := md5 . Sum ( buf )
md5sum := base64 . StdEncoding . EncodeToString ( md5sumBinary [ : ] )
err = f . pacer . Call ( func ( ) ( bool , error ) {
uploadPartReq := & s3 . UploadPartInput {
Body : bytes . NewReader ( buf ) ,
Bucket : req . Bucket ,
Key : req . Key ,
PartNumber : & partNum ,
UploadId : uid ,
ContentMD5 : & md5sum ,
ContentLength : & partLength ,
RequestPayer : req . RequestPayer ,
SSECustomerAlgorithm : req . SSECustomerAlgorithm ,
SSECustomerKey : req . SSECustomerKey ,
SSECustomerKeyMD5 : req . SSECustomerKeyMD5 ,
}
uout , err := f . c . UploadPartWithContext ( gCtx , uploadPartReq )
if err != nil {
if partNum <= int64 ( concurrency ) {
return f . shouldRetry ( err )
}
// retry all chunks once have done the first batch
return true , err
}
partsMu . Lock ( )
parts = append ( parts , & s3 . CompletedPart {
PartNumber : & partNum ,
ETag : uout . ETag ,
} )
partsMu . Unlock ( )
return false , nil
} )
if err != nil {
return errors . Wrap ( err , "multipart upload failed to upload part" )
}
return nil
} )
}
err = g . Wait ( )
if err != nil {
return err
}
// sort the completed parts by part number
sort . Slice ( parts , func ( i , j int ) bool {
return * parts [ i ] . PartNumber < * parts [ j ] . PartNumber
} )
err = f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . c . CompleteMultipartUploadWithContext ( ctx , & s3 . CompleteMultipartUploadInput {
Bucket : req . Bucket ,
Key : req . Key ,
MultipartUpload : & s3 . CompletedMultipartUpload {
Parts : parts ,
} ,
RequestPayer : req . RequestPayer ,
UploadId : uid ,
} )
return f . shouldRetry ( err )
} )
if err != nil {
return errors . Wrap ( err , "multipart upload failed to finalise" )
}
return nil
}
2014-04-18 18:04:21 +02:00
// Update the Object from in with modTime and size
2019-06-17 10:34:30 +02:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2019-08-09 12:29:36 +02:00
bucket , bucketPath := o . split ( )
2019-08-22 22:30:55 +02:00
err := o . fs . makeBucket ( ctx , bucket )
2017-06-07 15:16:50 +02:00
if err != nil {
return err
}
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2018-01-06 15:30:10 +01:00
size := src . Size ( )
2016-02-18 12:35:25 +01:00
2018-11-26 22:09:23 +01:00
multipart := size < 0 || size >= int64 ( o . fs . opt . UploadCutoff )
2014-04-18 18:04:21 +02:00
2014-12-23 13:09:02 +01:00
// Set the mtime in the meta data
metadata := map [ string ] * string {
metaMtime : aws . String ( swift . TimeToFloatString ( modTime ) ) ,
}
2020-01-07 20:23:08 +01:00
// read the md5sum if available
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
// - for non multipart
2020-01-07 20:23:08 +01:00
// - so we can add a ContentMD5
// - for multipart provided checksums aren't disabled
// - so we can add the md5sum in the metadata as metaMD5Hash
2018-11-26 22:09:23 +01:00
var md5sum string
2020-01-07 20:23:08 +01:00
if ! multipart || ! o . fs . opt . DisableChecksum {
2019-06-17 10:34:30 +02:00
hash , err := src . Hash ( ctx , hash . MD5 )
2018-01-06 15:30:10 +01:00
if err == nil && matchMd5 . MatchString ( hash ) {
hashBytes , err := hex . DecodeString ( hash )
if err == nil {
2018-11-26 22:09:23 +01:00
md5sum = base64 . StdEncoding . EncodeToString ( hashBytes )
if multipart {
metadata [ metaMD5Hash ] = & md5sum
}
2018-01-06 15:30:10 +01:00
}
}
}
2014-12-23 13:09:02 +01:00
// Guess the content type
2019-06-17 10:34:30 +02:00
mimeType := fs . MimeType ( ctx , src )
2019-12-31 00:17:06 +01:00
req := s3 . PutObjectInput {
Bucket : & bucket ,
ACL : & o . fs . opt . ACL ,
Key : & bucketPath ,
ContentType : & mimeType ,
Metadata : metadata ,
}
if md5sum != "" {
req . ContentMD5 = & md5sum
}
if o . fs . opt . ServerSideEncryption != "" {
req . ServerSideEncryption = & o . fs . opt . ServerSideEncryption
}
2020-03-30 12:26:52 +02:00
if o . fs . opt . SSECustomerAlgorithm != "" {
req . SSECustomerAlgorithm = & o . fs . opt . SSECustomerAlgorithm
}
if o . fs . opt . SSECustomerKey != "" {
req . SSECustomerKey = & o . fs . opt . SSECustomerKey
}
if o . fs . opt . SSECustomerKeyMD5 != "" {
req . SSECustomerKeyMD5 = & o . fs . opt . SSECustomerKeyMD5
}
2019-12-31 00:17:06 +01:00
if o . fs . opt . SSEKMSKeyID != "" {
req . SSEKMSKeyId = & o . fs . opt . SSEKMSKeyID
}
if o . fs . opt . StorageClass != "" {
req . StorageClass = & o . fs . opt . StorageClass
}
2020-06-05 12:45:54 +02:00
// Apply upload options
for _ , option := range options {
key , value := option . Header ( )
lowerKey := strings . ToLower ( key )
switch lowerKey {
case "" :
// ignore
case "cache-control" :
req . CacheControl = aws . String ( value )
case "content-disposition" :
req . ContentDisposition = aws . String ( value )
case "content-encoding" :
req . ContentEncoding = aws . String ( value )
case "content-language" :
req . ContentLanguage = aws . String ( value )
case "content-type" :
req . ContentType = aws . String ( value )
case "x-amz-tagging" :
req . Tagging = aws . String ( value )
default :
const amzMetaPrefix = "x-amz-meta-"
if strings . HasPrefix ( lowerKey , amzMetaPrefix ) {
metaKey := lowerKey [ len ( amzMetaPrefix ) : ]
req . Metadata [ metaKey ] = aws . String ( value )
} else {
fs . Errorf ( o , "Don't know how to set key %q on upload" , key )
}
}
}
2019-12-31 00:17:06 +01:00
2018-11-26 22:09:23 +01:00
if multipart {
2019-12-31 00:17:06 +01:00
err = o . uploadMultipart ( ctx , & req , size , in )
2018-11-26 22:09:23 +01:00
if err != nil {
return err
}
} else {
// Create the request
putObj , _ := o . fs . c . PutObjectRequest ( & req )
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url , headers , err := putObj . PresignRequest ( 15 * time . Minute )
if err != nil {
return errors . Wrap ( err , "s3 upload: sign request" )
}
2019-09-21 03:30:45 +02:00
if o . fs . opt . V2Auth && headers == nil {
headers = putObj . HTTPRequest . Header
}
2018-11-26 22:09:23 +01:00
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq , err := http . NewRequest ( "PUT" , url , in )
if err != nil {
return errors . Wrap ( err , "s3 upload: new request" )
}
2019-09-04 21:21:10 +02:00
httpReq = httpReq . WithContext ( ctx ) // go1.13 can use NewRequestWithContext
2018-11-26 22:09:23 +01:00
// set the headers we signed and the length
httpReq . Header = headers
httpReq . ContentLength = size
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
resp , err := o . fs . srv . Do ( httpReq )
if err != nil {
2019-01-16 14:35:19 +01:00
return o . fs . shouldRetry ( err )
2018-11-26 22:09:23 +01:00
}
body , err := rest . ReadBody ( resp )
if err != nil {
2019-01-16 14:35:19 +01:00
return o . fs . shouldRetry ( err )
2018-11-26 22:09:23 +01:00
}
if resp . StatusCode >= 200 && resp . StatusCode < 299 {
return false , nil
}
err = errors . Errorf ( "s3 upload: %s: %s" , resp . Status , body )
return fserrors . ShouldRetryHTTP ( resp , retryErrorCodes ) , err
} )
if err != nil {
return err
}
2014-07-19 13:37:11 +02:00
}
2014-12-23 13:09:02 +01:00
2014-07-19 13:37:11 +02:00
// Read the metadata from the newly created object
2014-07-20 12:23:05 +02:00
o . meta = nil // wipe old metadata
2019-06-17 10:34:30 +02:00
err = o . readMetaData ( ctx )
2014-04-18 18:04:21 +02:00
return err
}
2013-01-08 19:53:35 +01:00
// Remove an object
2019-06-17 10:34:30 +02:00
func ( o * Object ) Remove ( ctx context . Context ) error {
2019-08-09 12:29:36 +02:00
bucket , bucketPath := o . split ( )
2014-12-23 13:09:02 +01:00
req := s3 . DeleteObjectInput {
2019-08-09 12:29:36 +02:00
Bucket : & bucket ,
Key : & bucketPath ,
2014-12-23 13:09:02 +01:00
}
2018-09-03 06:41:04 +02:00
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
2019-06-17 10:34:30 +02:00
_ , err := o . fs . c . DeleteObjectWithContext ( ctx , & req )
2019-01-16 14:35:19 +01:00
return o . fs . shouldRetry ( err )
2018-09-03 06:41:04 +02:00
} )
2014-12-23 13:09:02 +01:00
return err
2013-01-08 19:53:35 +01:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
2019-06-17 10:34:30 +02:00
func ( o * Object ) MimeType ( ctx context . Context ) string {
err := o . readMetaData ( ctx )
2016-09-21 23:13:24 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2016-09-21 23:13:24 +02:00
return ""
}
return o . mimeType
}
2019-09-09 21:44:50 +02:00
// SetTier performs changing storage class
func ( o * Object ) SetTier ( tier string ) ( err error ) {
ctx := context . TODO ( )
tier = strings . ToUpper ( tier )
bucket , bucketPath := o . split ( )
req := s3 . CopyObjectInput {
MetadataDirective : aws . String ( s3 . MetadataDirectiveCopy ) ,
StorageClass : aws . String ( tier ) ,
}
2020-07-30 11:52:32 +02:00
err = o . fs . copy ( ctx , & req , bucket , bucketPath , bucket , bucketPath , o )
2019-09-09 21:44:50 +02:00
if err != nil {
return err
}
o . storageClass = tier
return err
}
// GetTier returns storage class as string
func ( o * Object ) GetTier ( ) string {
if o . storageClass == "" {
return "STANDARD"
}
return o . storageClass
}
2013-01-08 19:53:35 +01:00
// Check the interfaces are satisfied
2015-11-07 12:14:46 +01:00
var (
2017-09-15 20:20:32 +02:00
_ fs . Fs = & Fs { }
_ fs . Copier = & Fs { }
_ fs . PutStreamer = & Fs { }
_ fs . ListRer = & Fs { }
2020-06-24 12:02:34 +02:00
_ fs . Commander = & Fs { }
2020-06-25 17:11:05 +02:00
_ fs . CleanUpper = & Fs { }
2017-09-15 20:20:32 +02:00
_ fs . Object = & Object { }
_ fs . MimeTyper = & Object { }
2019-09-09 21:44:50 +02:00
_ fs . GetTierer = & Object { }
_ fs . SetTierer = & Object { }
2015-11-07 12:14:46 +01:00
)