2015-09-22 19:47:16 +02:00
// Package s3 provides an interface to Amazon S3 oject storage
2013-06-27 21:13:07 +02:00
package s3
2013-01-08 19:53:35 +01:00
2013-01-23 23:43:20 +01:00
// FIXME need to prevent anything but ListDir working for s3://
2014-12-23 13:09:02 +01:00
/ *
Progress of port to aws - sdk
* Don ' t really need o . meta at all ?
What happens if you CTRL - C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
* /
2013-01-08 19:53:35 +01:00
import (
2018-01-06 15:30:10 +01:00
"encoding/base64"
"encoding/hex"
2013-01-08 19:53:35 +01:00
"fmt"
"io"
2016-02-01 14:11:27 +01:00
"net/http"
2013-01-08 19:53:35 +01:00
"path"
"regexp"
"strings"
2017-06-07 15:16:50 +02:00
"sync"
2013-01-08 19:53:35 +01:00
"time"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/corehandlers"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws/credentials"
2016-02-01 14:11:27 +01:00
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
2017-11-22 22:21:36 +01:00
"github.com/aws/aws-sdk-go/aws/defaults"
2016-02-01 14:11:27 +01:00
"github.com/aws/aws-sdk-go/aws/ec2metadata"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/request"
2015-10-30 12:50:45 +01:00
"github.com/aws/aws-sdk-go/aws/session"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
2014-03-15 17:06:11 +01:00
"github.com/ncw/rclone/fs"
2018-05-14 19:06:57 +02:00
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
2018-01-11 17:29:20 +01:00
"github.com/ncw/rclone/lib/rest"
2014-03-15 17:06:11 +01:00
"github.com/ncw/swift"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2014-03-15 17:06:11 +01:00
)
2013-06-27 21:13:07 +02:00
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "s3" ,
2018-04-12 18:05:53 +02:00
Description : "Amazon S3 Compliant Storage Providers (AWS, Ceph, Dreamhost, IBM COS, Minio)" ,
2016-02-15 19:11:53 +01:00
NewFs : NewFs ,
2014-03-15 17:06:11 +01:00
Options : [ ] fs . Option { {
2018-04-13 17:08:00 +02:00
Name : fs . ConfigProvider ,
Help : "Choose your S3 provider." ,
Examples : [ ] fs . OptionExample { {
Value : "AWS" ,
Help : "Amazon Web Services (AWS) S3" ,
} , {
Value : "Ceph" ,
Help : "Ceph Object Storage" ,
} , {
Value : "DigitalOcean" ,
Help : "Digital Ocean Spaces" ,
} , {
Value : "Dreamhost" ,
Help : "Dreamhost DreamObjects" ,
} , {
Value : "IBMCOS" ,
Help : "IBM COS S3" ,
} , {
Value : "Minio" ,
Help : "Minio Object Storage" ,
} , {
Value : "Wasabi" ,
Help : "Wasabi Object Storage" ,
} , {
Value : "Other" ,
Help : "Any other S3 compatible provider" ,
} } ,
} , {
2018-05-14 19:06:57 +02:00
Name : "env_auth" ,
Help : "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank." ,
Default : false ,
2018-04-13 17:08:00 +02:00
Examples : [ ] fs . OptionExample { {
Value : "false" ,
Help : "Enter AWS credentials in the next step" ,
} , {
Value : "true" ,
Help : "Get AWS credentials from the environment (env vars or IAM)" ,
} } ,
} , {
Name : "access_key_id" ,
2018-05-14 19:06:57 +02:00
Help : "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials." ,
2018-04-13 17:08:00 +02:00
} , {
Name : "secret_access_key" ,
2018-05-14 19:06:57 +02:00
Help : "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials." ,
2018-04-13 17:08:00 +02:00
} , {
Name : "region" ,
Help : "Region to connect to." ,
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "us-east-1" ,
Help : "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty." ,
} , {
Value : "us-east-2" ,
Help : "US East (Ohio) Region\nNeeds location constraint us-east-2." ,
} , {
Value : "us-west-2" ,
Help : "US West (Oregon) Region\nNeeds location constraint us-west-2." ,
} , {
Value : "us-west-1" ,
Help : "US West (Northern California) Region\nNeeds location constraint us-west-1." ,
} , {
Value : "ca-central-1" ,
Help : "Canada (Central) Region\nNeeds location constraint ca-central-1." ,
} , {
Value : "eu-west-1" ,
Help : "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1." ,
} , {
Value : "eu-west-2" ,
Help : "EU (London) Region\nNeeds location constraint eu-west-2." ,
} , {
Value : "eu-central-1" ,
Help : "EU (Frankfurt) Region\nNeeds location constraint eu-central-1." ,
} , {
Value : "ap-southeast-1" ,
Help : "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1." ,
} , {
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2." ,
} , {
Value : "ap-northeast-1" ,
Help : "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1." ,
} , {
Value : "ap-northeast-2" ,
Help : "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2." ,
} , {
Value : "ap-south-1" ,
Help : "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1." ,
} , {
Value : "sa-east-1" ,
Help : "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1." ,
} } ,
} , {
Name : "region" ,
2018-05-14 19:06:57 +02:00
Help : "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region." ,
2018-04-13 17:08:00 +02:00
Provider : "!AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Use this if unsure. Will use v4 signatures and an empty region." ,
} , {
Value : "other-v2-signature" ,
Help : "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH." ,
} } ,
} , {
Name : "endpoint" ,
Help : "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region." ,
Provider : "AWS" ,
} , {
Name : "endpoint" ,
Help : "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise." ,
Provider : "IBMCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "s3-api.us-geo.objectstorage.softlayer.net" ,
Help : "US Cross Region Endpoint" ,
} , {
Value : "s3-api.dal.us-geo.objectstorage.softlayer.net" ,
Help : "US Cross Region Dallas Endpoint" ,
} , {
Value : "s3-api.wdc-us-geo.objectstorage.softlayer.net" ,
Help : "US Cross Region Washington DC Endpoint" ,
} , {
Value : "s3-api.sjc-us-geo.objectstorage.softlayer.net" ,
Help : "US Cross Region San Jose Endpoint" ,
} , {
Value : "s3-api.us-geo.objectstorage.service.networklayer.com" ,
Help : "US Cross Region Private Endpoint" ,
} , {
Value : "s3-api.dal-us-geo.objectstorage.service.networklayer.com" ,
Help : "US Cross Region Dallas Private Endpoint" ,
} , {
Value : "s3-api.wdc-us-geo.objectstorage.service.networklayer.com" ,
Help : "US Cross Region Washington DC Private Endpoint" ,
} , {
Value : "s3-api.sjc-us-geo.objectstorage.service.networklayer.com" ,
Help : "US Cross Region San Jose Private Endpoint" ,
} , {
Value : "s3.us-east.objectstorage.softlayer.net" ,
Help : "US Region East Endpoint" ,
} , {
Value : "s3.us-east.objectstorage.service.networklayer.com" ,
Help : "US Region East Private Endpoint" ,
} , {
Value : "s3.us-south.objectstorage.softlayer.net" ,
Help : "US Region South Endpoint" ,
} , {
Value : "s3.us-south.objectstorage.service.networklayer.com" ,
Help : "US Region South Private Endpoint" ,
} , {
Value : "s3.eu-geo.objectstorage.softlayer.net" ,
Help : "EU Cross Region Endpoint" ,
} , {
Value : "s3.fra-eu-geo.objectstorage.softlayer.net" ,
Help : "EU Cross Region Frankfurt Endpoint" ,
} , {
Value : "s3.mil-eu-geo.objectstorage.softlayer.net" ,
Help : "EU Cross Region Milan Endpoint" ,
} , {
Value : "s3.ams-eu-geo.objectstorage.softlayer.net" ,
Help : "EU Cross Region Amsterdam Endpoint" ,
} , {
Value : "s3.eu-geo.objectstorage.service.networklayer.com" ,
Help : "EU Cross Region Private Endpoint" ,
} , {
Value : "s3.fra-eu-geo.objectstorage.service.networklayer.com" ,
Help : "EU Cross Region Frankfurt Private Endpoint" ,
} , {
Value : "s3.mil-eu-geo.objectstorage.service.networklayer.com" ,
Help : "EU Cross Region Milan Private Endpoint" ,
} , {
Value : "s3.ams-eu-geo.objectstorage.service.networklayer.com" ,
Help : "EU Cross Region Amsterdam Private Endpoint" ,
} , {
Value : "s3.eu-gb.objectstorage.softlayer.net" ,
Help : "Great Britan Endpoint" ,
} , {
Value : "s3.eu-gb.objectstorage.service.networklayer.com" ,
Help : "Great Britan Private Endpoint" ,
} , {
Value : "s3.ap-geo.objectstorage.softlayer.net" ,
Help : "APAC Cross Regional Endpoint" ,
} , {
Value : "s3.tok-ap-geo.objectstorage.softlayer.net" ,
Help : "APAC Cross Regional Tokyo Endpoint" ,
} , {
Value : "s3.hkg-ap-geo.objectstorage.softlayer.net" ,
Help : "APAC Cross Regional HongKong Endpoint" ,
} , {
Value : "s3.seo-ap-geo.objectstorage.softlayer.net" ,
Help : "APAC Cross Regional Seoul Endpoint" ,
} , {
Value : "s3.ap-geo.objectstorage.service.networklayer.com" ,
Help : "APAC Cross Regional Private Endpoint" ,
} , {
Value : "s3.tok-ap-geo.objectstorage.service.networklayer.com" ,
Help : "APAC Cross Regional Tokyo Private Endpoint" ,
} , {
Value : "s3.hkg-ap-geo.objectstorage.service.networklayer.com" ,
Help : "APAC Cross Regional HongKong Private Endpoint" ,
} , {
Value : "s3.seo-ap-geo.objectstorage.service.networklayer.com" ,
Help : "APAC Cross Regional Seoul Private Endpoint" ,
} , {
Value : "s3.mel01.objectstorage.softlayer.net" ,
Help : "Melbourne Single Site Endpoint" ,
} , {
Value : "s3.mel01.objectstorage.service.networklayer.com" ,
Help : "Melbourne Single Site Private Endpoint" ,
} , {
Value : "s3.tor01.objectstorage.softlayer.net" ,
Help : "Toronto Single Site Endpoint" ,
} , {
Value : "s3.tor01.objectstorage.service.networklayer.com" ,
Help : "Toronto Single Site Private Endpoint" ,
} } ,
} , {
Name : "endpoint" ,
Help : "Endpoint for S3 API.\nRequired when using an S3 clone." ,
Provider : "!AWS,IBMCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "objects-us-west-1.dream.io" ,
Help : "Dream Objects endpoint" ,
Provider : "Dreamhost" ,
} , {
Value : "nyc3.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces New York 3" ,
Provider : "DigitalOcean" ,
} , {
Value : "ams3.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces Amsterdam 3" ,
Provider : "DigitalOcean" ,
} , {
Value : "sgp1.digitaloceanspaces.com" ,
Help : "Digital Ocean Spaces Singapore 1" ,
Provider : "DigitalOcean" ,
} , {
Value : "s3.wasabisys.com" ,
Help : "Wasabi Object Storage" ,
Provider : "Wasabi" ,
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must be set to match the Region.\nUsed when creating buckets only." ,
2018-04-13 17:08:00 +02:00
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Empty for US Region, Northern Virginia or Pacific Northwest." ,
} , {
Value : "us-east-2" ,
Help : "US East (Ohio) Region." ,
} , {
Value : "us-west-2" ,
Help : "US West (Oregon) Region." ,
} , {
Value : "us-west-1" ,
Help : "US West (Northern California) Region." ,
} , {
Value : "ca-central-1" ,
Help : "Canada (Central) Region." ,
} , {
Value : "eu-west-1" ,
Help : "EU (Ireland) Region." ,
} , {
Value : "eu-west-2" ,
Help : "EU (London) Region." ,
} , {
Value : "EU" ,
Help : "EU Region." ,
} , {
Value : "ap-southeast-1" ,
Help : "Asia Pacific (Singapore) Region." ,
} , {
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region." ,
} , {
Value : "ap-northeast-1" ,
Help : "Asia Pacific (Tokyo) Region." ,
} , {
Value : "ap-northeast-2" ,
Help : "Asia Pacific (Seoul)" ,
} , {
Value : "ap-south-1" ,
Help : "Asia Pacific (Mumbai)" ,
} , {
Value : "sa-east-1" ,
Help : "South America (Sao Paulo) Region." ,
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter" ,
2018-04-13 17:08:00 +02:00
Provider : "IBMCOS" ,
Examples : [ ] fs . OptionExample { {
Value : "us-standard" ,
Help : "US Cross Region Standard" ,
} , {
Value : "us-vault" ,
Help : "US Cross Region Vault" ,
} , {
Value : "us-cold" ,
Help : "US Cross Region Cold" ,
} , {
Value : "us-flex" ,
Help : "US Cross Region Flex" ,
} , {
Value : "us-east-standard" ,
Help : "US East Region Standard" ,
} , {
Value : "us-east-vault" ,
Help : "US East Region Vault" ,
} , {
Value : "us-east-cold" ,
Help : "US East Region Cold" ,
} , {
Value : "us-east-flex" ,
Help : "US East Region Flex" ,
} , {
Value : "us-south-standard" ,
Help : "US Sout hRegion Standard" ,
} , {
Value : "us-south-vault" ,
Help : "US South Region Vault" ,
} , {
Value : "us-south-cold" ,
Help : "US South Region Cold" ,
} , {
Value : "us-south-flex" ,
Help : "US South Region Flex" ,
} , {
Value : "eu-standard" ,
Help : "EU Cross Region Standard" ,
} , {
Value : "eu-vault" ,
Help : "EU Cross Region Vault" ,
} , {
Value : "eu-cold" ,
Help : "EU Cross Region Cold" ,
} , {
Value : "eu-flex" ,
Help : "EU Cross Region Flex" ,
} , {
Value : "eu-gb-standard" ,
Help : "Great Britan Standard" ,
} , {
Value : "eu-gb-vault" ,
Help : "Great Britan Vault" ,
} , {
Value : "eu-gb-cold" ,
Help : "Great Britan Cold" ,
} , {
Value : "eu-gb-flex" ,
Help : "Great Britan Flex" ,
} , {
Value : "ap-standard" ,
Help : "APAC Standard" ,
} , {
Value : "ap-vault" ,
Help : "APAC Vault" ,
} , {
Value : "ap-cold" ,
Help : "APAC Cold" ,
} , {
Value : "ap-flex" ,
Help : "APAC Flex" ,
} , {
Value : "mel01-standard" ,
Help : "Melbourne Standard" ,
} , {
Value : "mel01-vault" ,
Help : "Melbourne Vault" ,
} , {
Value : "mel01-cold" ,
Help : "Melbourne Cold" ,
} , {
Value : "mel01-flex" ,
Help : "Melbourne Flex" ,
} , {
Value : "tor01-standard" ,
Help : "Toronto Standard" ,
} , {
Value : "tor01-vault" ,
Help : "Toronto Vault" ,
} , {
Value : "tor01-cold" ,
Help : "Toronto Cold" ,
} , {
Value : "tor01-flex" ,
Help : "Toronto Flex" ,
} } ,
} , {
Name : "location_constraint" ,
2018-05-14 19:06:57 +02:00
Help : "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only." ,
2018-04-13 17:08:00 +02:00
Provider : "!AWS,IBMCOS" ,
} , {
Name : "acl" ,
Help : "Canned ACL used when creating buckets and/or storing objects in S3.\nFor more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl" ,
Examples : [ ] fs . OptionExample { {
Value : "private" ,
Help : "Owner gets FULL_CONTROL. No one else has access rights (default)." ,
Provider : "!IBMCOS" ,
} , {
Value : "public-read" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ access." ,
Provider : "!IBMCOS" ,
} , {
Value : "public-read-write" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended." ,
Provider : "!IBMCOS" ,
} , {
Value : "authenticated-read" ,
Help : "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access." ,
Provider : "!IBMCOS" ,
} , {
Value : "bucket-owner-read" ,
Help : "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it." ,
Provider : "!IBMCOS" ,
} , {
Value : "bucket-owner-full-control" ,
Help : "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it." ,
Provider : "!IBMCOS" ,
} , {
Value : "private" ,
Help : "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS" ,
2018-04-12 18:05:53 +02:00
Provider : "IBMCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "public-read" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS" ,
Provider : "IBMCOS" ,
} , {
Value : "public-read-write" ,
Help : "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS" ,
2018-04-12 18:05:53 +02:00
Provider : "IBMCOS" ,
2018-04-13 17:08:00 +02:00
} , {
Value : "authenticated-read" ,
Help : "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS" ,
Provider : "IBMCOS" ,
} } ,
} , {
Name : "server_side_encryption" ,
Help : "The server-side encryption algorithm used when storing this object in S3." ,
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "None" ,
} , {
Value : "AES256" ,
Help : "AES256" ,
} } ,
} , {
Name : "storage_class" ,
Help : "The storage class to use when storing objects in S3." ,
Provider : "AWS" ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Default" ,
} , {
Value : "STANDARD" ,
Help : "Standard storage class" ,
} , {
Value : "REDUCED_REDUNDANCY" ,
Help : "Reduced redundancy storage class" ,
} , {
Value : "STANDARD_IA" ,
Help : "Standard Infrequent Access storage class" ,
} , {
Value : "ONEZONE_IA" ,
Help : "One Zone Infrequent Access storage class" ,
} } ,
2018-05-14 19:06:57 +02:00
} , {
Name : "chunk_size" ,
Help : "Chunk size to use for uploading" ,
Default : fs . SizeSuffix ( s3manager . MinUploadPartSize ) ,
Advanced : true ,
} , {
Name : "disable_checksum" ,
Help : "Don't store MD5 checksum with object metadata" ,
Default : false ,
Advanced : true ,
} , {
Name : "session_token" ,
Help : "An AWS session token" ,
Hide : fs . OptionHideBoth ,
Advanced : true ,
} , {
Name : "upload_concurrency" ,
Help : "Concurrency for multipart uploads." ,
Default : 2 ,
Advanced : true ,
2018-07-18 17:40:59 +02:00
} , {
Name : "force_path_style" ,
Help : "If true use path style access if false use virtual hosted style.\nSome providers (eg Aliyun OSS or Netease COS) require this." ,
Default : true ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} } ,
2014-03-15 17:06:11 +01:00
} )
2013-06-27 21:13:07 +02:00
}
2013-01-08 19:53:35 +01:00
// Constants
const (
2017-09-15 20:20:32 +02:00
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
2018-01-06 15:30:10 +01:00
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
2017-09-15 20:20:32 +02:00
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
2013-01-08 19:53:35 +01:00
)
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
Provider string ` config:"provider" `
EnvAuth bool ` config:"env_auth" `
AccessKeyID string ` config:"access_key_id" `
SecretAccessKey string ` config:"secret_access_key" `
Region string ` config:"region" `
Endpoint string ` config:"endpoint" `
LocationConstraint string ` config:"location_constraint" `
ACL string ` config:"acl" `
ServerSideEncryption string ` config:"server_side_encryption" `
StorageClass string ` config:"storage_class" `
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
DisableChecksum bool ` config:"disable_checksum" `
SessionToken string ` config:"session_token" `
UploadConcurrency int ` config:"upload_concurrency" `
2018-07-18 17:40:59 +02:00
ForcePathStyle bool ` config:"force_path_style" `
2018-05-14 19:06:57 +02:00
}
2016-09-01 23:27:50 +02:00
2015-11-07 12:14:46 +01:00
// Fs represents a remote s3 server
type Fs struct {
2018-05-14 19:06:57 +02:00
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features * fs . Features // optional features
c * s3 . S3 // the connection to the s3 server
ses * session . Session // the s3 session
bucket string // the bucket we are working on
bucketOKMu sync . Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
2013-01-08 19:53:35 +01:00
}
2015-11-07 12:14:46 +01:00
// Object describes a s3 object
type Object struct {
2013-01-08 19:53:35 +01:00
// Will definitely have everything but meta which may be nil
//
2016-09-21 23:13:24 +02:00
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
2015-11-07 12:14:46 +01:00
fs * Fs // what this object is part of
2014-12-23 13:09:02 +01:00
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time . Time // Last modified
meta map [ string ] * string // The object metadata if known - may be nil
2016-09-21 23:13:24 +02:00
mimeType string // MimeType of object - may be ""
2013-01-08 19:53:35 +01:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-01 21:45:27 +02:00
if f . root == "" {
return f . bucket
}
return f . bucket + "/" + f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2014-05-05 19:25:32 +02:00
if f . root == "" {
return fmt . Sprintf ( "S3 bucket %s" , f . bucket )
}
return fmt . Sprintf ( "S3 bucket %s path %s" , f . bucket , f . root )
2013-01-08 19:53:35 +01:00
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2014-03-15 17:06:11 +01:00
// Pattern to match a s3 path
var matcher = regexp . MustCompile ( ` ^([^/]*)(.*)$ ` )
2013-01-08 19:53:35 +01:00
// parseParse parses a s3 'url'
func s3ParsePath ( path string ) ( bucket , directory string , err error ) {
2014-03-15 17:06:11 +01:00
parts := matcher . FindStringSubmatch ( path )
if parts == nil {
2016-06-12 16:06:02 +02:00
err = errors . Errorf ( "couldn't parse bucket out of s3 path %q" , path )
2013-01-08 19:53:35 +01:00
} else {
2014-03-15 17:06:11 +01:00
bucket , directory = parts [ 1 ] , parts [ 2 ]
2013-01-08 19:53:35 +01:00
directory = strings . Trim ( directory , "/" )
}
return
}
// s3Connection makes a connection to s3
2018-05-14 19:06:57 +02:00
func s3Connection ( opt * Options ) ( * s3 . S3 , * session . Session , error ) {
2013-01-08 19:53:35 +01:00
// Make the auth
2016-02-01 14:11:27 +01:00
v := credentials . Value {
2018-05-14 19:06:57 +02:00
AccessKeyID : opt . AccessKeyID ,
SecretAccessKey : opt . SecretAccessKey ,
SessionToken : opt . SessionToken ,
2016-02-01 14:11:27 +01:00
}
2017-11-22 22:21:36 +01:00
lowTimeoutClient := & http . Client { Timeout : 1 * time . Second } // low timeout to ec2 metadata service
def := defaults . Get ( )
def . Config . HTTPClient = lowTimeoutClient
2016-02-01 14:11:27 +01:00
// first provider to supply a credential set "wins"
providers := [ ] credentials . Provider {
// use static credentials if they're present (checked by provider)
& credentials . StaticProvider { Value : v } ,
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
& credentials . EnvProvider { } ,
2018-04-16 13:14:35 +02:00
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
& credentials . SharedCredentialsProvider { } ,
2017-11-22 22:21:36 +01:00
// Pick up IAM role if we're in an ECS task
defaults . RemoteCredProvider ( * def . Config , def . Handlers ) ,
2016-02-01 14:11:27 +01:00
// Pick up IAM role in case we're on EC2
& ec2rolecreds . EC2RoleProvider {
Client : ec2metadata . New ( session . New ( ) , & aws . Config {
2017-11-22 22:21:36 +01:00
HTTPClient : lowTimeoutClient ,
2016-02-01 14:11:27 +01:00
} ) ,
ExpiryWindow : 3 ,
} ,
}
cred := credentials . NewChainCredentials ( providers )
2015-09-29 10:58:03 +02:00
switch {
2018-05-14 19:06:57 +02:00
case opt . EnvAuth :
2016-02-09 18:19:33 +01:00
// No need for empty checks if "env_auth" is true
case v . AccessKeyID == "" && v . SecretAccessKey == "" :
2016-02-01 14:11:27 +01:00
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials . AnonymousCredentials
case v . AccessKeyID == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "access_key_id not found" )
2016-02-01 14:11:27 +01:00
case v . SecretAccessKey == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "secret_access_key not found" )
2013-01-08 19:53:35 +01:00
}
2018-05-14 19:06:57 +02:00
if opt . Region == "" && opt . Endpoint == "" {
opt . Endpoint = "https://s3.amazonaws.com/"
2013-01-08 19:53:35 +01:00
}
2018-05-14 19:06:57 +02:00
if opt . Region == "" {
opt . Region = "us-east-1"
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
awsConfig := aws . NewConfig ( ) .
2018-05-14 19:06:57 +02:00
WithRegion ( opt . Region ) .
2014-12-23 13:09:02 +01:00
WithMaxRetries ( maxRetries ) .
2016-02-01 14:11:27 +01:00
WithCredentials ( cred ) .
2018-05-14 19:06:57 +02:00
WithEndpoint ( opt . Endpoint ) .
2018-01-12 17:30:54 +01:00
WithHTTPClient ( fshttp . NewClient ( fs . Config ) ) .
2018-07-18 17:40:59 +02:00
WithS3ForcePathStyle ( opt . ForcePathStyle )
2015-08-10 12:02:34 +02:00
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
2015-10-30 12:50:45 +01:00
ses := session . New ( )
c := s3 . New ( ses , awsConfig )
2018-05-14 19:06:57 +02:00
if opt . Region == "other-v2-signature" {
fs . Debugf ( nil , "Using v2 auth" )
2015-08-28 09:47:41 +02:00
signer := func ( req * request . Request ) {
2015-08-10 12:02:34 +02:00
// Ignore AnonymousCredentials object
2015-10-30 12:50:45 +01:00
if req . Config . Credentials == credentials . AnonymousCredentials {
2015-08-10 12:02:34 +02:00
return
}
2016-02-01 14:11:27 +01:00
sign ( v . AccessKeyID , v . SecretAccessKey , req . HTTPRequest )
2015-08-10 12:02:34 +02:00
}
c . Handlers . Sign . Clear ( )
2015-08-28 09:47:41 +02:00
c . Handlers . Sign . PushBackNamed ( corehandlers . BuildContentLengthHandler )
2015-08-10 12:02:34 +02:00
c . Handlers . Sign . PushBack ( signer )
2013-01-08 19:53:35 +01:00
}
2015-10-30 12:50:45 +01:00
return c , ses , nil
2013-01-08 19:53:35 +01:00
}
2016-09-01 23:27:50 +02:00
// NewFs constructs an Fs from the path, bucket:path
2018-05-14 19:06:57 +02:00
func NewFs ( name , root string , m configmap . Mapper ) ( fs . Fs , error ) {
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
if opt . ChunkSize < fs . SizeSuffix ( s3manager . MinUploadPartSize ) {
return nil , errors . Errorf ( "s3 chunk size (%v) must be >= %v" , opt . ChunkSize , fs . SizeSuffix ( s3manager . MinUploadPartSize ) )
}
2014-05-05 20:52:52 +02:00
bucket , directory , err := s3ParsePath ( root )
2013-01-08 19:53:35 +01:00
if err != nil {
return nil , err
}
2018-05-14 19:06:57 +02:00
c , ses , err := s3Connection ( opt )
2013-01-08 19:53:35 +01:00
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
f := & Fs {
2018-05-14 19:06:57 +02:00
name : name ,
root : directory ,
opt : * opt ,
c : c ,
bucket : bucket ,
ses : ses ,
2016-09-01 23:27:50 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
ReadMimeType : true ,
WriteMimeType : true ,
BucketBased : true ,
} ) . Fill ( f )
2014-05-05 20:52:52 +02:00
if f . root != "" {
f . root += "/"
// Check to see if the object exists
2014-12-23 13:09:02 +01:00
req := s3 . HeadObjectInput {
Bucket : & f . bucket ,
Key : & directory ,
}
_ , err = f . c . HeadObject ( & req )
2014-05-05 20:52:52 +02:00
if err == nil {
f . root = path . Dir ( directory )
if f . root == "." {
f . root = ""
} else {
f . root += "/"
}
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
return f , fs . ErrorIsFile
2014-05-05 20:52:52 +02:00
}
}
2014-12-23 13:09:02 +01:00
// f.listMultipartUploads()
2013-01-08 19:53:35 +01:00
return f , nil
}
2016-06-25 22:58:34 +02:00
// Return an Object from a path
2013-01-08 19:53:35 +01:00
//
2016-06-25 22:23:20 +02:00
//If it can't be found it returns the error ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * s3 . Object ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
o := & Object {
fs : f ,
2013-01-08 19:53:35 +01:00
remote : remote ,
}
if info != nil {
// Set info but not meta
2014-12-23 13:09:02 +01:00
if info . LastModified == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read last modified" )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * info . LastModified
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
o . etag = aws . StringValue ( info . ETag )
o . bytes = aws . Int64Value ( info . Size )
2013-01-08 19:53:35 +01:00
} else {
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( ) // reads info and meta, returning an error
2013-01-08 19:53:35 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2013-01-08 19:53:35 +01:00
}
}
2016-06-25 22:23:20 +02:00
return o , nil
2013-01-08 19:53:35 +01:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2016-06-25 22:58:34 +02:00
return f . newObjectWithInfo ( remote , nil )
2013-01-08 19:53:35 +01:00
}
2016-04-21 21:06:21 +02:00
// listFn is called from list to handle an object.
type listFn func ( remote string , object * s3 . Object , isDirectory bool ) error
2014-05-05 19:25:32 +02:00
// list the objects into the function supplied
//
2016-04-23 22:46:52 +02:00
// dir is the starting directory, "" for root
//
2017-06-11 23:43:31 +02:00
// Set recurse to read sub directories
func ( f * Fs ) list ( dir string , recurse bool , fn listFn ) error {
2016-04-23 22:46:52 +02:00
root := f . root
if dir != "" {
root += dir + "/"
}
2014-12-23 13:09:02 +01:00
maxKeys := int64 ( listChunkSize )
2014-05-05 19:25:32 +02:00
delimiter := ""
2017-06-11 23:43:31 +02:00
if ! recurse {
2014-05-05 19:25:32 +02:00
delimiter = "/"
}
2014-12-23 13:09:02 +01:00
var marker * string
2015-02-10 18:58:29 +01:00
for {
2014-12-23 13:09:02 +01:00
// FIXME need to implement ALL loop
req := s3 . ListObjectsInput {
Bucket : & f . bucket ,
Delimiter : & delimiter ,
2016-04-23 22:46:52 +02:00
Prefix : & root ,
2014-12-23 13:09:02 +01:00
MaxKeys : & maxKeys ,
Marker : marker ,
}
resp , err := f . c . ListObjects ( & req )
2015-02-10 18:58:29 +01:00
if err != nil {
2017-06-11 23:43:31 +02:00
if awsErr , ok := err . ( awserr . RequestFailure ) ; ok {
if awsErr . StatusCode ( ) == http . StatusNotFound {
err = fs . ErrorDirNotFound
}
}
2016-04-21 21:06:21 +02:00
return err
}
rootLength := len ( f . root )
2017-06-11 23:43:31 +02:00
if ! recurse {
2016-04-21 21:06:21 +02:00
for _ , commonPrefix := range resp . CommonPrefixes {
if commonPrefix . Prefix == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Nil common prefix received" )
2016-04-21 21:06:21 +02:00
continue
2015-02-10 18:58:29 +01:00
}
2016-04-21 21:06:21 +02:00
remote := * commonPrefix . Prefix
if ! strings . HasPrefix ( remote , f . root ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Odd name received %q" , remote )
2016-04-21 21:06:21 +02:00
continue
}
remote = remote [ rootLength : ]
if strings . HasSuffix ( remote , "/" ) {
remote = remote [ : len ( remote ) - 1 ]
}
err = fn ( remote , & s3 . Object { Key : & remote } , true )
if err != nil {
return err
2013-01-08 19:53:35 +01:00
}
}
2016-04-21 21:06:21 +02:00
}
for _ , object := range resp . Contents {
key := aws . StringValue ( object . Key )
if ! strings . HasPrefix ( key , f . root ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Odd name received %q" , key )
2016-04-21 21:06:21 +02:00
continue
2014-12-23 13:09:02 +01:00
}
2016-04-21 21:06:21 +02:00
remote := key [ rootLength : ]
2018-03-19 18:41:46 +01:00
// is this a directory marker?
2018-03-21 21:09:37 +01:00
if ( strings . HasSuffix ( remote , "/" ) || remote == "" ) && * object . Size == 0 {
2018-07-09 18:00:05 +02:00
if recurse && remote != "" {
2018-03-19 18:41:46 +01:00
// add a directory in if --fast-list since will have no prefixes
remote = remote [ : len ( remote ) - 1 ]
err = fn ( remote , & s3 . Object { Key : & remote } , true )
if err != nil {
return err
}
}
continue // skip directory marker
}
2016-04-21 21:06:21 +02:00
err = fn ( remote , object , false )
if err != nil {
return err
2014-12-23 13:09:02 +01:00
}
2015-02-10 18:58:29 +01:00
}
2016-04-21 21:06:21 +02:00
if ! aws . BoolValue ( resp . IsTruncated ) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp . NextMarker == nil || * resp . NextMarker == "" {
2017-12-20 17:40:41 +01:00
if len ( resp . Contents ) == 0 {
return errors . New ( "s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents" )
}
2016-04-21 21:06:21 +02:00
marker = resp . Contents [ len ( resp . Contents ) - 1 ] . Key
} else {
marker = resp . NextMarker
}
2014-05-05 19:25:32 +02:00
}
2016-04-21 21:06:21 +02:00
return nil
2014-05-05 19:25:32 +02:00
}
2017-06-30 11:54:14 +02:00
// Convert a list item into a DirEntry
func ( f * Fs ) itemToDirEntry ( remote string , object * s3 . Object , isDirectory bool ) ( fs . DirEntry , error ) {
2017-06-11 23:43:31 +02:00
if isDirectory {
size := int64 ( 0 )
if object . Size != nil {
size = * object . Size
}
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( remote , time . Time { } ) . SetSize ( size )
2017-06-11 23:43:31 +02:00
return d , nil
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
o , err := f . newObjectWithInfo ( remote , object )
if err != nil {
return nil , err
}
return o , nil
}
2018-03-01 13:11:34 +01:00
// mark the bucket as being OK
func ( f * Fs ) markBucketOK ( ) {
if f . bucket != "" {
f . bucketOKMu . Lock ( )
f . bucketOK = true
f . bucketDeleted = false
f . bucketOKMu . Unlock ( )
}
}
2017-06-11 23:43:31 +02:00
// listDir lists files and directories to out
func ( f * Fs ) listDir ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
// List the objects and directories
2017-06-11 23:43:31 +02:00
err = f . list ( dir , false , func ( remote string , object * s3 . Object , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( remote , object , isDirectory )
if err != nil {
return err
}
if entry != nil {
entries = append ( entries , entry )
2016-04-21 21:06:21 +02:00
}
return nil
} )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return entries , nil
2016-04-21 21:06:21 +02:00
}
// listBuckets lists the buckets to out
2017-06-11 23:43:31 +02:00
func ( f * Fs ) listBuckets ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-23 22:46:52 +02:00
if dir != "" {
2017-06-11 23:43:31 +02:00
return nil , fs . ErrorListBucketRequired
2016-04-23 22:46:52 +02:00
}
2016-04-21 21:06:21 +02:00
req := s3 . ListBucketsInput { }
resp , err := f . c . ListBuckets ( & req )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
for _ , bucket := range resp . Buckets {
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( aws . StringValue ( bucket . Name ) , aws . TimeValue ( bucket . CreationDate ) )
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2014-05-05 19:25:32 +02:00
}
2017-06-11 23:43:31 +02:00
return entries , nil
2013-01-08 19:53:35 +01:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func ( f * Fs ) List ( dir string ) ( entries fs . DirEntries , err error ) {
2014-05-05 19:25:32 +02:00
if f . bucket == "" {
2017-06-11 23:43:31 +02:00
return f . listBuckets ( dir )
2014-05-05 19:25:32 +02:00
}
2017-06-11 23:43:31 +02:00
return f . listDir ( dir )
2013-01-23 23:43:20 +01:00
}
2017-06-05 17:14:24 +02:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2017-06-11 23:43:31 +02:00
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func ( f * Fs ) ListR ( dir string , callback fs . ListRCallback ) ( err error ) {
if f . bucket == "" {
return fs . ErrorListBucketRequired
}
2018-01-12 17:30:54 +01:00
list := walk . NewListRHelper ( callback )
2017-06-11 23:43:31 +02:00
err = f . list ( dir , true , func ( remote string , object * s3 . Object , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( remote , object , isDirectory )
if err != nil {
return err
}
return list . Add ( entry )
} )
if err != nil {
return err
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return list . Flush ( )
2017-06-05 17:14:24 +02:00
}
2016-06-25 22:58:34 +02:00
// Put the Object into the bucket
2017-05-28 13:44:22 +02:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
// Temporary Object under construction
fs := & Object {
fs : f ,
2016-02-18 12:35:25 +01:00
remote : src . Remote ( ) ,
2015-11-07 12:14:46 +01:00
}
2017-05-28 13:44:22 +02:00
return fs , fs . Update ( in , src , options ... )
2013-01-08 19:53:35 +01:00
}
2017-09-15 20:20:32 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
func ( f * Fs ) PutStream ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( in , src , options ... )
}
2016-02-24 02:58:55 +01:00
// Check if the bucket exists
2017-06-29 13:26:14 +02:00
//
// NB this can return incorrect results if called immediately after bucket deletion
2016-02-24 02:58:55 +01:00
func ( f * Fs ) dirExists ( ) ( bool , error ) {
req := s3 . HeadBucketInput {
Bucket : & f . bucket ,
}
_ , err := f . c . HeadBucket ( & req )
if err == nil {
return true , nil
}
if err , ok := err . ( awserr . RequestFailure ) ; ok {
if err . StatusCode ( ) == http . StatusNotFound {
return false , nil
}
}
return false , err
}
2013-01-08 19:53:35 +01:00
// Mkdir creates the bucket if it doesn't exist
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Mkdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
if f . bucketOK {
2016-11-25 22:52:43 +01:00
return nil
}
2017-06-29 13:26:14 +02:00
if ! f . bucketDeleted {
exists , err := f . dirExists ( )
if err == nil {
f . bucketOK = exists
}
if err != nil || exists {
return err
}
2016-02-24 02:58:55 +01:00
}
2014-12-23 13:09:02 +01:00
req := s3 . CreateBucketInput {
Bucket : & f . bucket ,
2018-05-14 19:06:57 +02:00
ACL : & f . opt . ACL ,
2014-12-23 13:09:02 +01:00
}
2018-05-14 19:06:57 +02:00
if f . opt . LocationConstraint != "" {
2014-12-23 13:09:02 +01:00
req . CreateBucketConfiguration = & s3 . CreateBucketConfiguration {
2018-05-14 19:06:57 +02:00
LocationConstraint : & f . opt . LocationConstraint ,
2014-12-23 13:09:02 +01:00
}
}
2017-06-29 13:26:14 +02:00
_ , err := f . c . CreateBucket ( & req )
2014-12-23 13:09:02 +01:00
if err , ok := err . ( awserr . Error ) ; ok {
if err . Code ( ) == "BucketAlreadyOwnedByYou" {
2017-06-07 15:16:50 +02:00
err = nil
2013-01-08 23:31:16 +01:00
}
}
2017-06-07 15:16:50 +02:00
if err == nil {
f . bucketOK = true
2017-06-29 13:26:14 +02:00
f . bucketDeleted = false
2017-06-07 15:16:50 +02:00
}
2013-01-08 23:31:16 +01:00
return err
2013-01-08 19:53:35 +01:00
}
2015-11-07 16:31:04 +01:00
// Rmdir deletes the bucket if the fs is at the root
2013-01-08 19:53:35 +01:00
//
// Returns an error if it isn't empty
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Rmdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
2016-11-25 22:52:43 +01:00
if f . root != "" || dir != "" {
2015-11-07 16:31:04 +01:00
return nil
}
2014-12-23 13:09:02 +01:00
req := s3 . DeleteBucketInput {
Bucket : & f . bucket ,
}
_ , err := f . c . DeleteBucket ( & req )
2017-06-07 15:16:50 +02:00
if err == nil {
f . bucketOK = false
2017-06-29 13:26:14 +02:00
f . bucketDeleted = true
2017-06-07 15:16:50 +02:00
}
2014-12-23 13:09:02 +01:00
return err
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Precision of the remote
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2013-01-19 00:21:02 +01:00
return time . Nanosecond
}
2018-01-23 11:50:50 +01:00
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape ( s string ) string {
return strings . Replace ( rest . URLPathEscape ( s ) , "+" , "%2B" , - 1 )
}
2015-02-14 19:48:08 +01:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Copy ( src fs . Object , remote string ) ( fs . Object , error ) {
2017-06-28 22:14:53 +02:00
err := f . Mkdir ( "" )
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
srcObj , ok := src . ( * Object )
2015-02-14 19:48:08 +01:00
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-02-14 19:48:08 +01:00
return nil , fs . ErrorCantCopy
}
2015-11-07 12:14:46 +01:00
srcFs := srcObj . fs
2015-02-14 19:48:08 +01:00
key := f . root + remote
2018-01-23 11:50:50 +01:00
source := pathEscape ( srcFs . bucket + "/" + srcFs . root + srcObj . remote )
2015-02-14 19:48:08 +01:00
req := s3 . CopyObjectInput {
Bucket : & f . bucket ,
Key : & key ,
CopySource : & source ,
MetadataDirective : aws . String ( s3 . MetadataDirectiveCopy ) ,
}
2017-06-28 22:14:53 +02:00
_ , err = f . c . CopyObject ( & req )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2016-06-25 22:23:20 +02:00
return f . NewObject ( remote )
2015-02-14 19:48:08 +01:00
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . MD5 )
2016-01-11 13:39:33 +01:00
}
2013-01-08 19:53:35 +01:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2014-03-28 18:56:04 +01:00
}
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-03-28 18:56:04 +01:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remote ( ) string {
2013-06-27 21:13:07 +02:00
return o . remote
2013-01-08 19:53:35 +01:00
}
2015-05-09 11:37:43 +02:00
var matchMd5 = regexp . MustCompile ( ` ^[0-9a-f] { 32}$ ` )
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2018-01-12 17:30:54 +01:00
func ( o * Object ) Hash ( t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2018-01-06 15:30:10 +01:00
hash := strings . Trim ( strings . ToLower ( o . etag ) , ` " ` )
2015-05-09 11:37:43 +02:00
// Check the etag is a valid md5sum
2018-01-06 15:30:10 +01:00
if ! matchMd5 . MatchString ( hash ) {
err := o . readMetaData ( )
if err != nil {
return "" , err
}
if md5sum , ok := o . meta [ metaMD5Hash ] ; ok {
md5sumBytes , err := base64 . StdEncoding . DecodeString ( * md5sum )
if err != nil {
return "" , err
}
hash = hex . EncodeToString ( md5sumBytes )
} else {
hash = ""
}
2015-05-09 11:37:43 +02:00
}
2018-01-06 15:30:10 +01:00
return hash , nil
2013-01-08 19:53:35 +01:00
}
// Size returns the size of an object in bytes
2015-11-07 12:14:46 +01:00
func ( o * Object ) Size ( ) int64 {
2013-06-27 21:13:07 +02:00
return o . bytes
2013-01-08 19:53:35 +01:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2015-11-07 12:14:46 +01:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2013-06-27 21:13:07 +02:00
if o . meta != nil {
2013-01-08 19:53:35 +01:00
return nil
}
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . HeadObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
2014-07-28 23:32:15 +02:00
}
2015-11-07 12:14:46 +01:00
resp , err := o . fs . c . HeadObject ( & req )
2013-01-08 19:53:35 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
if awsErr , ok := err . ( awserr . RequestFailure ) ; ok {
if awsErr . StatusCode ( ) == http . StatusNotFound {
return fs . ErrorObjectNotFound
}
}
2013-01-08 19:53:35 +01:00
return err
}
2014-05-16 17:27:53 +02:00
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
2014-12-23 13:09:02 +01:00
if resp . ContentLength != nil {
size = * resp . ContentLength
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
o . etag = aws . StringValue ( resp . ETag )
2013-06-27 21:13:07 +02:00
o . bytes = size
2014-12-23 13:09:02 +01:00
o . meta = resp . Metadata
if resp . LastModified == nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read last modified from HEAD: %v" , err )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * resp . LastModified
2013-01-08 19:53:35 +01:00
}
2016-09-21 23:13:24 +02:00
o . mimeType = aws . StringValue ( resp . ContentType )
2013-01-08 19:53:35 +01:00
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2015-11-07 12:14:46 +01:00
func ( o * Object ) ModTime ( ) time . Time {
2018-04-13 14:32:17 +02:00
if fs . Config . UseServerModTime {
return o . lastModified
}
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( )
2013-01-08 19:53:35 +01:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2013-01-08 19:53:35 +01:00
return time . Now ( )
}
// read mtime out of metadata if available
2013-06-27 21:13:07 +02:00
d , ok := o . meta [ metaMtime ]
2014-12-23 13:09:02 +01:00
if ! ok || d == nil {
2017-02-09 12:01:20 +01:00
// fs.Debugf(o, "No metadata")
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
modTime , err := swift . FloatStringToTime ( * d )
2013-01-08 19:53:35 +01:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read mtime from object: %v" , err )
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the local fs object
2016-03-22 16:07:10 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( )
2013-01-08 23:31:16 +01:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2013-01-08 23:31:16 +01:00
}
2014-12-23 13:09:02 +01:00
o . meta [ metaMtime ] = aws . String ( swift . TimeToFloatString ( modTime ) )
2016-06-19 18:26:44 +02:00
if o . bytes >= maxSizeForCopy {
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "SetModTime is unsupported for objects bigger than %v bytes" , fs . SizeSuffix ( maxSizeForCopy ) )
2016-06-19 18:26:44 +02:00
return nil
}
2016-01-02 09:58:48 +01:00
// Guess the content type
2016-09-21 23:13:24 +02:00
mimeType := fs . MimeType ( o )
2016-01-02 09:58:48 +01:00
2014-12-23 13:09:02 +01:00
// Copy the object to itself to update the metadata
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
sourceKey := o . fs . bucket + "/" + key
2014-12-23 13:09:02 +01:00
directive := s3 . MetadataDirectiveReplace // replace metadata with that passed in
req := s3 . CopyObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2018-05-14 19:06:57 +02:00
ACL : & o . fs . opt . ACL ,
2014-12-23 13:09:02 +01:00
Key : & key ,
2016-09-21 23:13:24 +02:00
ContentType : & mimeType ,
2018-01-23 11:50:50 +01:00
CopySource : aws . String ( pathEscape ( sourceKey ) ) ,
2014-12-23 13:09:02 +01:00
Metadata : o . meta ,
MetadataDirective : & directive ,
}
2015-11-07 12:14:46 +01:00
_ , err = o . fs . c . CopyObject ( & req )
2016-03-22 16:07:10 +01:00
return err
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Storable raturns a boolean indicating if this object is storable
2015-11-07 12:14:46 +01:00
func ( o * Object ) Storable ( ) bool {
2013-01-08 19:53:35 +01:00
return true
}
// Open an object for read
2016-09-10 12:29:57 +02:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . GetObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
}
2016-09-10 12:29:57 +02:00
for _ , option := range options {
switch option . ( type ) {
case * fs . RangeOption , * fs . SeekOption :
_ , value := option . Header ( )
req . Range = & value
default :
if option . Mandatory ( ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Unsupported mandatory option: %v" , option )
2016-09-10 12:29:57 +02:00
}
}
}
2015-11-07 12:14:46 +01:00
resp , err := o . fs . c . GetObject ( & req )
2017-09-09 14:02:26 +02:00
if err , ok := err . ( awserr . RequestFailure ) ; ok {
if err . Code ( ) == "InvalidObjectState" {
return nil , errors . Errorf ( "Object in GLACIER, restore first: %v" , key )
}
}
2014-12-23 13:09:02 +01:00
if err != nil {
return nil , err
}
return resp . Body , nil
2013-01-08 19:53:35 +01:00
}
2014-04-18 18:04:21 +02:00
// Update the Object from in with modTime and size
2017-05-28 13:44:22 +02:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2017-06-07 15:16:50 +02:00
err := o . fs . Mkdir ( "" )
if err != nil {
return err
}
2016-02-18 12:35:25 +01:00
modTime := src . ModTime ( )
2018-01-06 15:30:10 +01:00
size := src . Size ( )
2016-02-18 12:35:25 +01:00
2015-11-07 12:14:46 +01:00
uploader := s3manager . NewUploader ( o . fs . ses , func ( u * s3manager . Uploader ) {
2018-05-14 19:06:57 +02:00
u . Concurrency = o . fs . opt . UploadConcurrency
2015-10-30 12:50:45 +01:00
u . LeavePartsOnError = false
2015-11-07 12:14:46 +01:00
u . S3 = o . fs . c
2018-05-14 19:06:57 +02:00
u . PartSize = int64 ( o . fs . opt . ChunkSize )
2016-03-10 17:48:55 +01:00
2017-09-15 20:20:32 +02:00
if size == - 1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u . PartSize = ( ( ( maxFileSize / s3manager . MaxUploadParts ) >> 20 ) + 1 ) << 20
return
}
2016-03-10 17:48:55 +01:00
// Adjust PartSize until the number of parts is small enough.
if size / u . PartSize >= s3manager . MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u . PartSize = ( ( ( size / s3manager . MaxUploadParts ) >> 20 ) + 1 ) << 20
}
2015-10-30 12:50:45 +01:00
} )
2014-04-18 18:04:21 +02:00
2014-12-23 13:09:02 +01:00
// Set the mtime in the meta data
metadata := map [ string ] * string {
metaMtime : aws . String ( swift . TimeToFloatString ( modTime ) ) ,
}
2018-05-14 19:06:57 +02:00
if ! o . fs . opt . DisableChecksum && size > uploader . PartSize {
2018-01-18 21:27:52 +01:00
hash , err := src . Hash ( hash . MD5 )
2018-01-06 15:30:10 +01:00
if err == nil && matchMd5 . MatchString ( hash ) {
hashBytes , err := hex . DecodeString ( hash )
if err == nil {
metadata [ metaMD5Hash ] = aws . String ( base64 . StdEncoding . EncodeToString ( hashBytes ) )
}
}
}
2014-12-23 13:09:02 +01:00
// Guess the content type
2016-09-21 23:13:24 +02:00
mimeType := fs . MimeType ( src )
2014-12-23 13:09:02 +01:00
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3manager . UploadInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2018-05-14 19:06:57 +02:00
ACL : & o . fs . opt . ACL ,
2014-12-23 13:09:02 +01:00
Key : & key ,
Body : in ,
2016-09-21 23:13:24 +02:00
ContentType : & mimeType ,
2014-12-23 13:09:02 +01:00
Metadata : metadata ,
//ContentLength: &size,
}
2018-05-14 19:06:57 +02:00
if o . fs . opt . ServerSideEncryption != "" {
req . ServerSideEncryption = & o . fs . opt . ServerSideEncryption
2016-06-14 22:22:54 +02:00
}
2018-05-14 19:06:57 +02:00
if o . fs . opt . StorageClass != "" {
req . StorageClass = & o . fs . opt . StorageClass
2016-09-01 23:27:50 +02:00
}
2017-06-07 15:16:50 +02:00
_ , err = uploader . Upload ( & req )
2014-07-19 13:37:11 +02:00
if err != nil {
return err
}
2014-12-23 13:09:02 +01:00
2014-07-19 13:37:11 +02:00
// Read the metadata from the newly created object
2014-07-20 12:23:05 +02:00
o . meta = nil // wipe old metadata
2014-07-19 13:37:11 +02:00
err = o . readMetaData ( )
2014-04-18 18:04:21 +02:00
return err
}
2013-01-08 19:53:35 +01:00
// Remove an object
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remove ( ) error {
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . DeleteObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
}
2015-11-07 12:14:46 +01:00
_ , err := o . fs . c . DeleteObject ( & req )
2014-12-23 13:09:02 +01:00
return err
2013-01-08 19:53:35 +01:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
err := o . readMetaData ( )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Failed to read metadata: %v" , err )
2016-09-21 23:13:24 +02:00
return ""
}
return o . mimeType
}
2013-01-08 19:53:35 +01:00
// Check the interfaces are satisfied
2015-11-07 12:14:46 +01:00
var (
2017-09-15 20:20:32 +02:00
_ fs . Fs = & Fs { }
_ fs . Copier = & Fs { }
_ fs . PutStreamer = & Fs { }
_ fs . ListRer = & Fs { }
_ fs . Object = & Object { }
_ fs . MimeTyper = & Object { }
2015-11-07 12:14:46 +01:00
)