2015-09-22 19:47:16 +02:00
// Package s3 provides an interface to Amazon S3 oject storage
2013-06-27 21:13:07 +02:00
package s3
2013-01-08 19:53:35 +01:00
2013-01-23 23:43:20 +01:00
// FIXME need to prevent anything but ListDir working for s3://
2014-12-23 13:09:02 +01:00
/ *
Progress of port to aws - sdk
* Don ' t really need o . meta at all ?
What happens if you CTRL - C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
* /
2013-01-08 19:53:35 +01:00
import (
"errors"
"fmt"
"io"
2016-02-01 14:11:27 +01:00
"net/http"
2016-01-27 18:39:33 +01:00
"net/url"
2013-01-08 19:53:35 +01:00
"path"
"regexp"
"strings"
"time"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/corehandlers"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/aws/credentials"
2016-02-01 14:11:27 +01:00
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
2015-08-28 09:47:41 +02:00
"github.com/aws/aws-sdk-go/aws/request"
2015-10-30 12:50:45 +01:00
"github.com/aws/aws-sdk-go/aws/session"
2014-12-23 13:09:02 +01:00
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
2014-03-15 17:06:11 +01:00
"github.com/ncw/rclone/fs"
"github.com/ncw/swift"
)
2013-06-27 21:13:07 +02:00
// Register with Fs
func init ( ) {
2015-09-22 19:47:16 +02:00
fs . Register ( & fs . Info {
2014-03-15 17:06:11 +01:00
Name : "s3" ,
NewFs : NewFs ,
// AWS endpoints: http://docs.amazonwebservices.com/general/latest/gr/rande.html#s3_region
Options : [ ] fs . Option { {
2016-02-01 14:11:27 +01:00
Name : "env_auth" ,
Help : "Get AWS credentials from runtime (environment variables or EC2 meta data if no env vars). Only applies if access_key_id and secret_access_key is blank." ,
Examples : [ ] fs . OptionExample {
{
Value : "false" ,
Help : "Enter AWS credentials in the next step" ,
} , {
Value : "true" ,
Help : "Get AWS credentials from the environment (env vars or IAM)" ,
} ,
} ,
} , {
2014-03-15 17:06:11 +01:00
Name : "access_key_id" ,
2016-02-01 14:11:27 +01:00
Help : "AWS Access Key ID - leave blank for anonymous access or runtime credentials." ,
2014-03-15 17:06:11 +01:00
} , {
Name : "secret_access_key" ,
2016-02-01 14:11:27 +01:00
Help : "AWS Secret Access Key (password) - leave blank for anonymous access or runtime credentials." ,
2014-03-15 17:06:11 +01:00
} , {
2014-12-23 13:09:02 +01:00
Name : "region" ,
Help : "Region to connect to." ,
2014-03-15 17:06:11 +01:00
Examples : [ ] fs . OptionExample { {
2014-12-23 13:09:02 +01:00
Value : "us-east-1" ,
2014-03-15 17:06:11 +01:00
Help : "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "us-west-2" ,
2014-03-15 17:06:11 +01:00
Help : "US West (Oregon) Region\nNeeds location constraint us-west-2." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "us-west-1" ,
2014-03-15 17:06:11 +01:00
Help : "US West (Northern California) Region\nNeeds location constraint us-west-1." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "eu-west-1" ,
2014-03-15 17:06:11 +01:00
Help : "EU (Ireland) Region Region\nNeeds location constraint EU or eu-west-1." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "eu-central-1" ,
Help : "EU (Frankfurt) Region\nNeeds location constraint eu-central-1." ,
} , {
Value : "ap-southeast-1" ,
2014-03-15 17:06:11 +01:00
Help : "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2." ,
2014-03-15 17:06:11 +01:00
} , {
2014-12-23 13:09:02 +01:00
Value : "ap-northeast-1" ,
2014-03-15 17:06:11 +01:00
Help : "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1." ,
} , {
2014-12-23 13:09:02 +01:00
Value : "sa-east-1" ,
2014-03-15 17:06:11 +01:00
Help : "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1." ,
2014-12-23 13:09:02 +01:00
} , {
Value : "other-v2-signature" ,
Help : "If using an S3 clone that only understands v2 signatures - eg Ceph - set this and make sure you set the endpoint." ,
} , {
Value : "other-v4-signature" ,
Help : "If using an S3 clone that understands v4 signatures set this and make sure you set the endpoint." ,
2014-03-15 17:06:11 +01:00
} } ,
2014-12-23 13:09:02 +01:00
} , {
Name : "endpoint" ,
Help : "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.\nSpecify if using an S3 clone such as Ceph." ,
2014-03-15 17:06:11 +01:00
} , {
Name : "location_constraint" ,
2014-12-23 13:09:02 +01:00
Help : "Location constraint - must be set to match the Region. Used when creating buckets only." ,
2014-03-15 17:06:11 +01:00
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Empty for US Region, Northern Virginia or Pacific Northwest." ,
} , {
Value : "us-west-2" ,
Help : "US West (Oregon) Region." ,
} , {
Value : "us-west-1" ,
Help : "US West (Northern California) Region." ,
} , {
Value : "eu-west-1" ,
Help : "EU (Ireland) Region." ,
} , {
Value : "EU" ,
Help : "EU Region." ,
} , {
Value : "ap-southeast-1" ,
Help : "Asia Pacific (Singapore) Region." ,
} , {
Value : "ap-southeast-2" ,
Help : "Asia Pacific (Sydney) Region." ,
} , {
Value : "ap-northeast-1" ,
Help : "Asia Pacific (Tokyo) Region." ,
} , {
Value : "sa-east-1" ,
Help : "South America (Sao Paulo) Region." ,
} } ,
} } ,
} )
2013-06-27 21:13:07 +02:00
}
2013-01-08 19:53:35 +01:00
// Constants
const (
2014-12-23 13:09:02 +01:00
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
listChunkSize = 1024 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
2013-01-08 19:53:35 +01:00
)
2015-11-07 12:14:46 +01:00
// Fs represents a remote s3 server
type Fs struct {
2015-10-30 12:50:45 +01:00
name string // the name of the remote
c * s3 . S3 // the connection to the s3 server
ses * session . Session // the s3 session
bucket string // the bucket we are working on
perm string // permissions for new buckets / objects
root string // root of the bucket - ignore all objects above this
locationConstraint string // location constraint of new buckets
2013-01-08 19:53:35 +01:00
}
2015-11-07 12:14:46 +01:00
// Object describes a s3 object
type Object struct {
2013-01-08 19:53:35 +01:00
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta - to fill that in need to call
// readMetaData
2015-11-07 12:14:46 +01:00
fs * Fs // what this object is part of
2014-12-23 13:09:02 +01:00
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time . Time // Last modified
meta map [ string ] * string // The object metadata if known - may be nil
2013-01-08 19:53:35 +01:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-01 21:45:27 +02:00
if f . root == "" {
return f . bucket
}
return f . bucket + "/" + f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2014-05-05 19:25:32 +02:00
if f . root == "" {
return fmt . Sprintf ( "S3 bucket %s" , f . bucket )
}
return fmt . Sprintf ( "S3 bucket %s path %s" , f . bucket , f . root )
2013-01-08 19:53:35 +01:00
}
2014-03-15 17:06:11 +01:00
// Pattern to match a s3 path
var matcher = regexp . MustCompile ( ` ^([^/]*)(.*)$ ` )
2013-01-08 19:53:35 +01:00
// parseParse parses a s3 'url'
func s3ParsePath ( path string ) ( bucket , directory string , err error ) {
2014-03-15 17:06:11 +01:00
parts := matcher . FindStringSubmatch ( path )
if parts == nil {
err = fmt . Errorf ( "Couldn't parse bucket out of s3 path %q" , path )
2013-01-08 19:53:35 +01:00
} else {
2014-03-15 17:06:11 +01:00
bucket , directory = parts [ 1 ] , parts [ 2 ]
2013-01-08 19:53:35 +01:00
directory = strings . Trim ( directory , "/" )
}
return
}
// s3Connection makes a connection to s3
2015-10-30 12:50:45 +01:00
func s3Connection ( name string ) ( * s3 . S3 , * session . Session , error ) {
2013-01-08 19:53:35 +01:00
// Make the auth
2016-02-01 14:11:27 +01:00
v := credentials . Value {
AccessKeyID : fs . ConfigFile . MustValue ( name , "access_key_id" ) ,
SecretAccessKey : fs . ConfigFile . MustValue ( name , "secret_access_key" ) ,
}
// first provider to supply a credential set "wins"
providers := [ ] credentials . Provider {
// use static credentials if they're present (checked by provider)
& credentials . StaticProvider { Value : v } ,
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
& credentials . EnvProvider { } ,
// Pick up IAM role in case we're on EC2
& ec2rolecreds . EC2RoleProvider {
Client : ec2metadata . New ( session . New ( ) , & aws . Config {
HTTPClient : & http . Client { Timeout : 1 * time . Second } , // low timeout to ec2 metadata service
} ) ,
ExpiryWindow : 3 ,
} ,
}
cred := credentials . NewChainCredentials ( providers )
2015-09-29 10:58:03 +02:00
switch {
2016-02-09 18:19:33 +01:00
case fs . ConfigFile . MustBool ( name , "env_auth" , false ) :
// No need for empty checks if "env_auth" is true
case v . AccessKeyID == "" && v . SecretAccessKey == "" :
2016-02-01 14:11:27 +01:00
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials . AnonymousCredentials
case v . AccessKeyID == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "access_key_id not found" )
2016-02-01 14:11:27 +01:00
case v . SecretAccessKey == "" :
2015-10-30 12:50:45 +01:00
return nil , nil , errors . New ( "secret_access_key not found" )
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
endpoint := fs . ConfigFile . MustValue ( name , "endpoint" )
region := fs . ConfigFile . MustValue ( name , "region" )
if region == "" && endpoint == "" {
endpoint = "https://s3.amazonaws.com/"
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
if region == "" {
region = "us-east-1"
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
awsConfig := aws . NewConfig ( ) .
WithRegion ( region ) .
WithMaxRetries ( maxRetries ) .
2016-02-01 14:11:27 +01:00
WithCredentials ( cred ) .
2014-12-23 13:09:02 +01:00
WithEndpoint ( endpoint ) .
2015-08-10 12:02:34 +02:00
WithHTTPClient ( fs . Config . Client ( ) ) .
WithS3ForcePathStyle ( true )
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
2015-10-30 12:50:45 +01:00
ses := session . New ( )
c := s3 . New ( ses , awsConfig )
2014-12-23 13:09:02 +01:00
if region == "other-v2-signature" {
2015-08-10 12:02:34 +02:00
fs . Debug ( name , "Using v2 auth" )
2015-08-28 09:47:41 +02:00
signer := func ( req * request . Request ) {
2015-08-10 12:02:34 +02:00
// Ignore AnonymousCredentials object
2015-10-30 12:50:45 +01:00
if req . Config . Credentials == credentials . AnonymousCredentials {
2015-08-10 12:02:34 +02:00
return
}
2016-02-01 14:11:27 +01:00
sign ( v . AccessKeyID , v . SecretAccessKey , req . HTTPRequest )
2015-08-10 12:02:34 +02:00
}
c . Handlers . Sign . Clear ( )
2015-08-28 09:47:41 +02:00
c . Handlers . Sign . PushBackNamed ( corehandlers . BuildContentLengthHandler )
2015-08-10 12:02:34 +02:00
c . Handlers . Sign . PushBack ( signer )
2013-01-08 19:53:35 +01:00
}
2015-08-10 12:02:34 +02:00
// Add user agent
2015-08-28 09:47:41 +02:00
c . Handlers . Build . PushBack ( func ( r * request . Request ) {
2015-08-10 12:02:34 +02:00
r . HTTPRequest . Header . Set ( "User-Agent" , fs . UserAgent )
} )
2015-10-30 12:50:45 +01:00
return c , ses , nil
2013-01-08 19:53:35 +01:00
}
2015-11-07 12:14:46 +01:00
// NewFs contstructs an Fs from the path, bucket:path
2014-05-05 20:52:52 +02:00
func NewFs ( name , root string ) ( fs . Fs , error ) {
bucket , directory , err := s3ParsePath ( root )
2013-01-08 19:53:35 +01:00
if err != nil {
return nil , err
}
2015-10-30 12:50:45 +01:00
c , ses , err := s3Connection ( name )
2013-01-08 19:53:35 +01:00
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
f := & Fs {
2015-08-22 17:53:11 +02:00
name : name ,
2013-01-08 19:53:35 +01:00
c : c ,
bucket : bucket ,
2015-10-30 12:50:45 +01:00
ses : ses ,
2014-12-23 13:09:02 +01:00
// FIXME perm: s3.Private, // FIXME need user to specify
root : directory ,
locationConstraint : fs . ConfigFile . MustValue ( name , "location_constraint" ) ,
2013-01-08 19:53:35 +01:00
}
2014-05-05 20:52:52 +02:00
if f . root != "" {
f . root += "/"
// Check to see if the object exists
2014-12-23 13:09:02 +01:00
req := s3 . HeadObjectInput {
Bucket : & f . bucket ,
Key : & directory ,
}
_ , err = f . c . HeadObject ( & req )
2014-05-05 20:52:52 +02:00
if err == nil {
remote := path . Base ( directory )
f . root = path . Dir ( directory )
if f . root == "." {
f . root = ""
} else {
f . root += "/"
}
obj := f . NewFsObject ( remote )
// return a Fs Limited to this object
return fs . NewLimited ( f , obj ) , nil
}
}
2014-12-23 13:09:02 +01:00
// f.listMultipartUploads()
2013-01-08 19:53:35 +01:00
return f , nil
}
// Return an FsObject from a path
//
// May return nil if an error occurred
2015-11-07 12:14:46 +01:00
func ( f * Fs ) newFsObjectWithInfo ( remote string , info * s3 . Object ) fs . Object {
o := & Object {
fs : f ,
2013-01-08 19:53:35 +01:00
remote : remote ,
}
if info != nil {
// Set info but not meta
2014-12-23 13:09:02 +01:00
if info . LastModified == nil {
fs . Log ( o , "Failed to read last modified" )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * info . LastModified
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
o . etag = aws . StringValue ( info . ETag )
o . bytes = aws . Int64Value ( info . Size )
2013-01-08 19:53:35 +01:00
} else {
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( ) // reads info and meta, returning an error
2013-01-08 19:53:35 +01:00
if err != nil {
// logged already FsDebug("Failed to read info: %s", err)
return nil
}
}
2013-06-27 21:13:07 +02:00
return o
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// NewFsObject returns an FsObject from a path
2013-01-08 19:53:35 +01:00
//
// May return nil if an error occurred
2015-11-07 12:14:46 +01:00
func ( f * Fs ) NewFsObject ( remote string ) fs . Object {
2014-07-29 18:50:07 +02:00
return f . newFsObjectWithInfo ( remote , nil )
2013-01-08 19:53:35 +01:00
}
2014-05-05 19:25:32 +02:00
// list the objects into the function supplied
//
// If directories is set it only sends directories
2015-11-07 12:14:46 +01:00
func ( f * Fs ) list ( directories bool , fn func ( string , * s3 . Object ) ) {
2014-12-23 13:09:02 +01:00
maxKeys := int64 ( listChunkSize )
2014-05-05 19:25:32 +02:00
delimiter := ""
if directories {
delimiter = "/"
}
2014-12-23 13:09:02 +01:00
var marker * string
2015-02-10 18:58:29 +01:00
for {
2014-12-23 13:09:02 +01:00
// FIXME need to implement ALL loop
req := s3 . ListObjectsInput {
Bucket : & f . bucket ,
Delimiter : & delimiter ,
Prefix : & f . root ,
MaxKeys : & maxKeys ,
Marker : marker ,
}
resp , err := f . c . ListObjects ( & req )
2015-02-10 18:58:29 +01:00
if err != nil {
fs . Stats . Error ( )
2015-08-08 21:10:31 +02:00
fs . ErrorLog ( f , "Couldn't read bucket %q: %s" , f . bucket , err )
2014-12-23 13:09:02 +01:00
break
2013-01-08 19:53:35 +01:00
} else {
2015-02-10 18:58:29 +01:00
rootLength := len ( f . root )
if directories {
2014-12-23 13:09:02 +01:00
for _ , commonPrefix := range resp . CommonPrefixes {
if commonPrefix . Prefix == nil {
fs . Log ( f , "Nil common prefix received" )
continue
}
remote := * commonPrefix . Prefix
2015-02-10 18:58:29 +01:00
if ! strings . HasPrefix ( remote , f . root ) {
fs . Log ( f , "Odd name received %q" , remote )
continue
}
2014-12-23 13:09:02 +01:00
remote = remote [ rootLength : ]
2015-02-10 18:58:29 +01:00
if strings . HasSuffix ( remote , "/" ) {
remote = remote [ : len ( remote ) - 1 ]
}
2014-12-23 13:09:02 +01:00
fn ( remote , & s3 . Object { Key : & remote } )
2015-02-10 18:58:29 +01:00
}
} else {
2014-12-23 13:09:02 +01:00
for _ , object := range resp . Contents {
key := aws . StringValue ( object . Key )
if ! strings . HasPrefix ( key , f . root ) {
fs . Log ( f , "Odd name received %q" , key )
2015-02-10 18:58:29 +01:00
continue
}
2014-12-23 13:09:02 +01:00
remote := key [ rootLength : ]
2015-02-10 18:58:29 +01:00
fn ( remote , object )
2013-01-08 19:53:35 +01:00
}
}
2014-12-23 13:09:02 +01:00
if ! aws . BoolValue ( resp . IsTruncated ) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp . NextMarker == nil || * resp . NextMarker == "" {
marker = resp . Contents [ len ( resp . Contents ) - 1 ] . Key
} else {
marker = resp . NextMarker
}
2015-02-10 18:58:29 +01:00
}
2014-05-05 19:25:32 +02:00
}
}
2015-09-22 19:47:16 +02:00
// List walks the path returning a channel of FsObjects
2015-11-07 12:14:46 +01:00
func ( f * Fs ) List ( ) fs . ObjectsChan {
2014-05-05 19:25:32 +02:00
out := make ( fs . ObjectsChan , fs . Config . Checkers )
if f . bucket == "" {
// Return no objects at top level list
2013-01-08 19:53:35 +01:00
close ( out )
2014-05-05 19:25:32 +02:00
fs . Stats . Error ( )
2015-08-08 21:10:31 +02:00
fs . ErrorLog ( f , "Can't list objects at root - choose a bucket using lsd" )
2014-05-05 19:25:32 +02:00
} else {
go func ( ) {
defer close ( out )
2014-12-23 13:09:02 +01:00
f . list ( false , func ( remote string , object * s3 . Object ) {
2014-07-29 18:50:07 +02:00
if fs := f . newFsObjectWithInfo ( remote , object ) ; fs != nil {
2014-05-05 19:25:32 +02:00
out <- fs
}
} )
} ( )
}
2013-01-08 19:53:35 +01:00
return out
}
2015-09-22 19:47:16 +02:00
// ListDir lists the buckets
2015-11-07 12:14:46 +01:00
func ( f * Fs ) ListDir ( ) fs . DirChan {
2013-06-28 09:57:32 +02:00
out := make ( fs . DirChan , fs . Config . Checkers )
2014-05-05 19:25:32 +02:00
if f . bucket == "" {
// List the buckets
go func ( ) {
defer close ( out )
2014-12-23 13:09:02 +01:00
req := s3 . ListBucketsInput { }
resp , err := f . c . ListBuckets ( & req )
2014-05-05 19:25:32 +02:00
if err != nil {
fs . Stats . Error ( )
2015-08-08 21:10:31 +02:00
fs . ErrorLog ( f , "Couldn't list buckets: %s" , err )
2014-05-05 19:25:32 +02:00
} else {
2014-12-23 13:09:02 +01:00
for _ , bucket := range resp . Buckets {
2014-05-05 19:25:32 +02:00
out <- & fs . Dir {
2014-12-23 13:09:02 +01:00
Name : aws . StringValue ( bucket . Name ) ,
When : aws . TimeValue ( bucket . CreationDate ) ,
2014-05-05 19:25:32 +02:00
Bytes : - 1 ,
Count : - 1 ,
}
2013-01-23 23:43:20 +01:00
}
}
2014-05-05 19:25:32 +02:00
} ( )
} else {
2014-05-05 20:52:52 +02:00
// List the directories in the path in the bucket
2014-05-05 19:25:32 +02:00
go func ( ) {
defer close ( out )
2014-12-23 13:09:02 +01:00
f . list ( true , func ( remote string , object * s3 . Object ) {
size := int64 ( 0 )
if object . Size != nil {
size = * object . Size
}
2014-05-05 19:25:32 +02:00
out <- & fs . Dir {
Name : remote ,
2014-12-23 13:09:02 +01:00
Bytes : size ,
2014-05-05 19:25:32 +02:00
Count : 0 ,
}
} )
} ( )
}
2013-01-23 23:43:20 +01:00
return out
}
2013-01-08 19:53:35 +01:00
// Put the FsObject into the bucket
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Put ( in io . Reader , remote string , modTime time . Time , size int64 ) ( fs . Object , error ) {
// Temporary Object under construction
fs := & Object {
fs : f ,
remote : remote ,
}
2014-04-18 18:04:21 +02:00
return fs , fs . Update ( in , modTime , size )
2013-01-08 19:53:35 +01:00
}
// Mkdir creates the bucket if it doesn't exist
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Mkdir ( ) error {
2014-12-23 13:09:02 +01:00
req := s3 . CreateBucketInput {
Bucket : & f . bucket ,
ACL : & f . perm ,
}
if f . locationConstraint != "" {
req . CreateBucketConfiguration = & s3 . CreateBucketConfiguration {
LocationConstraint : & f . locationConstraint ,
}
}
_ , err := f . c . CreateBucket ( & req )
if err , ok := err . ( awserr . Error ) ; ok {
if err . Code ( ) == "BucketAlreadyOwnedByYou" {
2013-01-08 23:31:16 +01:00
return nil
}
}
return err
2013-01-08 19:53:35 +01:00
}
2015-11-07 16:31:04 +01:00
// Rmdir deletes the bucket if the fs is at the root
2013-01-08 19:53:35 +01:00
//
// Returns an error if it isn't empty
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Rmdir ( ) error {
2015-11-07 16:31:04 +01:00
if f . root != "" {
return nil
}
2014-12-23 13:09:02 +01:00
req := s3 . DeleteBucketInput {
Bucket : & f . bucket ,
}
_ , err := f . c . DeleteBucket ( & req )
return err
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Precision of the remote
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2013-01-19 00:21:02 +01:00
return time . Nanosecond
}
2015-02-14 19:48:08 +01:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Copy ( src fs . Object , remote string ) ( fs . Object , error ) {
srcObj , ok := src . ( * Object )
2015-02-14 19:48:08 +01:00
if ! ok {
fs . Debug ( src , "Can't copy - not same remote type" )
return nil , fs . ErrorCantCopy
}
2015-11-07 12:14:46 +01:00
srcFs := srcObj . fs
2015-02-14 19:48:08 +01:00
key := f . root + remote
2016-01-27 18:39:33 +01:00
source := url . QueryEscape ( srcFs . bucket + "/" + srcFs . root + srcObj . remote )
2015-02-14 19:48:08 +01:00
req := s3 . CopyObjectInput {
Bucket : & f . bucket ,
Key : & key ,
CopySource : & source ,
MetadataDirective : aws . String ( s3 . MetadataDirectiveCopy ) ,
}
_ , err := f . c . CopyObject ( & req )
if err != nil {
return nil , err
}
return f . NewFsObject ( remote ) , err
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
func ( f * Fs ) Hashes ( ) fs . HashSet {
return fs . HashSet ( fs . HashMD5 )
}
2013-01-08 19:53:35 +01:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2015-11-07 12:14:46 +01:00
func ( o * Object ) Fs ( ) fs . Fs {
return o . fs
2014-03-28 18:56:04 +01:00
}
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-03-28 18:56:04 +01:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remote ( ) string {
2013-06-27 21:13:07 +02:00
return o . remote
2013-01-08 19:53:35 +01:00
}
2015-05-09 11:37:43 +02:00
var matchMd5 = regexp . MustCompile ( ` ^[0-9a-f] { 32}$ ` )
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
func ( o * Object ) Hash ( t fs . HashType ) ( string , error ) {
if t != fs . HashMD5 {
return "" , fs . ErrHashUnsupported
}
2015-05-09 11:37:43 +02:00
etag := strings . Trim ( strings . ToLower ( o . etag ) , ` " ` )
// Check the etag is a valid md5sum
if ! matchMd5 . MatchString ( etag ) {
2015-08-16 19:14:22 +02:00
// fs.Debug(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
2015-05-09 11:37:43 +02:00
return "" , nil
}
return etag , nil
2013-01-08 19:53:35 +01:00
}
// Size returns the size of an object in bytes
2015-11-07 12:14:46 +01:00
func ( o * Object ) Size ( ) int64 {
2013-06-27 21:13:07 +02:00
return o . bytes
2013-01-08 19:53:35 +01:00
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2015-11-07 12:14:46 +01:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2013-06-27 21:13:07 +02:00
if o . meta != nil {
2013-01-08 19:53:35 +01:00
return nil
}
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . HeadObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
2014-07-28 23:32:15 +02:00
}
2015-11-07 12:14:46 +01:00
resp , err := o . fs . c . HeadObject ( & req )
2013-01-08 19:53:35 +01:00
if err != nil {
2013-06-28 09:57:32 +02:00
fs . Debug ( o , "Failed to read info: %s" , err )
2013-01-08 19:53:35 +01:00
return err
}
2014-05-16 17:27:53 +02:00
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
2014-12-23 13:09:02 +01:00
if resp . ContentLength != nil {
size = * resp . ContentLength
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
o . etag = aws . StringValue ( resp . ETag )
2013-06-27 21:13:07 +02:00
o . bytes = size
2014-12-23 13:09:02 +01:00
o . meta = resp . Metadata
if resp . LastModified == nil {
2013-06-28 09:57:32 +02:00
fs . Log ( o , "Failed to read last modified from HEAD: %s" , err )
2013-06-27 21:13:07 +02:00
o . lastModified = time . Now ( )
2014-12-23 13:09:02 +01:00
} else {
o . lastModified = * resp . LastModified
2013-01-08 19:53:35 +01:00
}
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2015-11-07 12:14:46 +01:00
func ( o * Object ) ModTime ( ) time . Time {
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( )
2013-01-08 19:53:35 +01:00
if err != nil {
2013-06-28 09:57:32 +02:00
fs . Log ( o , "Failed to read metadata: %s" , err )
2013-01-08 19:53:35 +01:00
return time . Now ( )
}
// read mtime out of metadata if available
2013-06-27 21:13:07 +02:00
d , ok := o . meta [ metaMtime ]
2014-12-23 13:09:02 +01:00
if ! ok || d == nil {
2013-06-28 09:57:32 +02:00
// fs.Debug(o, "No metadata")
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
2014-12-23 13:09:02 +01:00
modTime , err := swift . FloatStringToTime ( * d )
2013-01-08 19:53:35 +01:00
if err != nil {
2013-06-28 09:57:32 +02:00
fs . Log ( o , "Failed to read mtime from object: %s" , err )
2013-06-27 21:13:07 +02:00
return o . lastModified
2013-01-08 19:53:35 +01:00
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the local fs object
2015-11-07 12:14:46 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) {
2013-06-27 21:13:07 +02:00
err := o . readMetaData ( )
2013-01-08 23:31:16 +01:00
if err != nil {
2013-06-27 21:13:07 +02:00
fs . Stats . Error ( )
2015-08-08 21:10:31 +02:00
fs . ErrorLog ( o , "Failed to read metadata: %s" , err )
2013-01-08 23:31:16 +01:00
return
}
2014-12-23 13:09:02 +01:00
o . meta [ metaMtime ] = aws . String ( swift . TimeToFloatString ( modTime ) )
2016-01-02 09:58:48 +01:00
// Guess the content type
contentType := fs . MimeType ( o )
2014-12-23 13:09:02 +01:00
// Copy the object to itself to update the metadata
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
sourceKey := o . fs . bucket + "/" + key
2014-12-23 13:09:02 +01:00
directive := s3 . MetadataDirectiveReplace // replace metadata with that passed in
req := s3 . CopyObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
ACL : & o . fs . perm ,
2014-12-23 13:09:02 +01:00
Key : & key ,
2016-01-02 09:58:48 +01:00
ContentType : & contentType ,
2016-01-27 18:39:33 +01:00
CopySource : aws . String ( url . QueryEscape ( sourceKey ) ) ,
2014-12-23 13:09:02 +01:00
Metadata : o . meta ,
MetadataDirective : & directive ,
}
2015-11-07 12:14:46 +01:00
_ , err = o . fs . c . CopyObject ( & req )
2013-01-08 23:31:16 +01:00
if err != nil {
2013-06-27 21:13:07 +02:00
fs . Stats . Error ( )
2015-08-08 21:10:31 +02:00
fs . ErrorLog ( o , "Failed to update remote mtime: %s" , err )
2013-01-08 23:31:16 +01:00
}
2013-01-08 19:53:35 +01:00
}
2015-09-22 19:47:16 +02:00
// Storable raturns a boolean indicating if this object is storable
2015-11-07 12:14:46 +01:00
func ( o * Object ) Storable ( ) bool {
2013-01-08 19:53:35 +01:00
return true
}
// Open an object for read
2015-11-07 12:14:46 +01:00
func ( o * Object ) Open ( ) ( in io . ReadCloser , err error ) {
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . GetObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
}
2015-11-07 12:14:46 +01:00
resp , err := o . fs . c . GetObject ( & req )
2014-12-23 13:09:02 +01:00
if err != nil {
return nil , err
}
return resp . Body , nil
2013-01-08 19:53:35 +01:00
}
2014-04-18 18:04:21 +02:00
// Update the Object from in with modTime and size
2015-11-07 12:14:46 +01:00
func ( o * Object ) Update ( in io . Reader , modTime time . Time , size int64 ) error {
uploader := s3manager . NewUploader ( o . fs . ses , func ( u * s3manager . Uploader ) {
2015-10-30 12:50:45 +01:00
u . Concurrency = 2
u . LeavePartsOnError = false
2015-11-07 12:14:46 +01:00
u . S3 = o . fs . c
2015-10-30 12:50:45 +01:00
} )
2014-04-18 18:04:21 +02:00
2014-12-23 13:09:02 +01:00
// Set the mtime in the meta data
metadata := map [ string ] * string {
metaMtime : aws . String ( swift . TimeToFloatString ( modTime ) ) ,
}
// Guess the content type
contentType := fs . MimeType ( o )
2015-11-07 12:14:46 +01:00
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3manager . UploadInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
ACL : & o . fs . perm ,
2014-12-23 13:09:02 +01:00
Key : & key ,
Body : in ,
ContentType : & contentType ,
Metadata : metadata ,
//ContentLength: &size,
}
_ , err := uploader . Upload ( & req )
2014-07-19 13:37:11 +02:00
if err != nil {
return err
}
2014-12-23 13:09:02 +01:00
2014-07-19 13:37:11 +02:00
// Read the metadata from the newly created object
2014-07-20 12:23:05 +02:00
o . meta = nil // wipe old metadata
2014-07-19 13:37:11 +02:00
err = o . readMetaData ( )
2014-04-18 18:04:21 +02:00
return err
}
2013-01-08 19:53:35 +01:00
// Remove an object
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remove ( ) error {
key := o . fs . root + o . remote
2014-12-23 13:09:02 +01:00
req := s3 . DeleteObjectInput {
2015-11-07 12:14:46 +01:00
Bucket : & o . fs . bucket ,
2014-12-23 13:09:02 +01:00
Key : & key ,
}
2015-11-07 12:14:46 +01:00
_ , err := o . fs . c . DeleteObject ( & req )
2014-12-23 13:09:02 +01:00
return err
2013-01-08 19:53:35 +01:00
}
// Check the interfaces are satisfied
2015-11-07 12:14:46 +01:00
var (
_ fs . Fs = & Fs { }
_ fs . Copier = & Fs { }
_ fs . Object = & Object { }
)