2016-10-31 16:50:02 +01:00
|
|
|
package s3manager
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sort"
|
|
|
|
"sync"
|
|
|
|
|
2016-11-19 11:03:41 +01:00
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
2016-10-31 16:50:02 +01:00
|
|
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awsutil"
|
|
|
|
"github.com/aws/aws-sdk-go/aws/client"
|
2019-04-13 14:47:57 +02:00
|
|
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
2016-10-31 16:50:02 +01:00
|
|
|
"github.com/aws/aws-sdk-go/aws/request"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3"
|
|
|
|
"github.com/aws/aws-sdk-go/service/s3/s3iface"
|
|
|
|
)
|
|
|
|
|
|
|
|
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
|
|
|
|
// on Amazon S3.
|
|
|
|
const MaxUploadParts = 10000
|
|
|
|
|
|
|
|
// MinUploadPartSize is the minimum allowed part size when uploading a part to
|
|
|
|
// Amazon S3.
|
|
|
|
const MinUploadPartSize int64 = 1024 * 1024 * 5
|
|
|
|
|
|
|
|
// DefaultUploadPartSize is the default part size to buffer chunks of a
|
|
|
|
// payload into.
|
|
|
|
const DefaultUploadPartSize = MinUploadPartSize
|
|
|
|
|
|
|
|
// DefaultUploadConcurrency is the default number of goroutines to spin up when
|
|
|
|
// using Upload().
|
|
|
|
const DefaultUploadConcurrency = 5
|
|
|
|
|
|
|
|
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
|
|
|
|
// will satisfy this interface when a multi part upload failed to upload all
|
|
|
|
// chucks to S3. In the case of a failure the UploadID is needed to operate on
|
|
|
|
// the chunks, if any, which were uploaded.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
//
|
|
|
|
// u := s3manager.NewUploader(opts)
|
|
|
|
// output, err := u.upload(input)
|
|
|
|
// if err != nil {
|
2016-11-19 11:03:41 +01:00
|
|
|
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
|
2016-10-31 16:50:02 +01:00
|
|
|
// // Process error and its associated uploadID
|
|
|
|
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
|
|
|
|
// } else {
|
|
|
|
// // Process error generically
|
|
|
|
// fmt.Println("Error:", err.Error())
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
type MultiUploadFailure interface {
|
|
|
|
awserr.Error
|
|
|
|
|
|
|
|
// Returns the upload id for the S3 multipart upload that failed.
|
|
|
|
UploadID() string
|
|
|
|
}
|
|
|
|
|
|
|
|
// So that the Error interface type can be included as an anonymous field
|
|
|
|
// in the multiUploadError struct and not conflict with the error.Error() method.
|
|
|
|
type awsError awserr.Error
|
|
|
|
|
|
|
|
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
|
|
|
|
// Composed of BaseError for code, message, and original error
|
|
|
|
//
|
|
|
|
// Should be used for an error that occurred failing a S3 multipart upload,
|
|
|
|
// and a upload ID is available. If an uploadID is not available a more relevant
|
|
|
|
type multiUploadError struct {
|
|
|
|
awsError
|
|
|
|
|
|
|
|
// ID for multipart upload which failed.
|
|
|
|
uploadID string
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the string representation of the error.
|
|
|
|
//
|
|
|
|
// See apierr.BaseError ErrorWithExtra for output format
|
|
|
|
//
|
|
|
|
// Satisfies the error interface.
|
|
|
|
func (m multiUploadError) Error() string {
|
|
|
|
extra := fmt.Sprintf("upload id: %s", m.uploadID)
|
|
|
|
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns the string representation of the error.
|
|
|
|
// Alias for Error to satisfy the stringer interface.
|
|
|
|
func (m multiUploadError) String() string {
|
|
|
|
return m.Error()
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadID returns the id of the S3 upload which failed.
|
|
|
|
func (m multiUploadError) UploadID() string {
|
|
|
|
return m.uploadID
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadOutput represents a response from the Upload() call.
|
|
|
|
type UploadOutput struct {
|
|
|
|
// The URL where the object was uploaded to.
|
|
|
|
Location string
|
|
|
|
|
|
|
|
// The version of the object that was uploaded. Will only be populated if
|
|
|
|
// the S3 Bucket is versioned. If the bucket is not versioned this field
|
|
|
|
// will not be set.
|
|
|
|
VersionID *string
|
|
|
|
|
|
|
|
// The ID for a multipart upload to S3. In the case of an error the error
|
|
|
|
// can be cast to the MultiUploadFailure interface to extract the upload ID.
|
|
|
|
UploadID string
|
|
|
|
}
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
// WithUploaderRequestOptions appends to the Uploader's API request options.
|
|
|
|
func WithUploaderRequestOptions(opts ...request.Option) func(*Uploader) {
|
|
|
|
return func(u *Uploader) {
|
|
|
|
u.RequestOptions = append(u.RequestOptions, opts...)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-31 16:50:02 +01:00
|
|
|
// The Uploader structure that calls Upload(). It is safe to call Upload()
|
|
|
|
// on this structure for multiple objects and across concurrent goroutines.
|
|
|
|
// Mutating the Uploader's properties is not safe to be done concurrently.
|
|
|
|
type Uploader struct {
|
|
|
|
// The buffer size (in bytes) to use when buffering data into chunks and
|
|
|
|
// sending them as parts to S3. The minimum allowed part size is 5MB, and
|
2017-07-23 09:51:42 +02:00
|
|
|
// if this value is set to zero, the DefaultUploadPartSize value will be used.
|
2016-10-31 16:50:02 +01:00
|
|
|
PartSize int64
|
|
|
|
|
2017-09-30 16:27:27 +02:00
|
|
|
// The number of goroutines to spin up in parallel per call to Upload when
|
|
|
|
// sending parts. If this is set to zero, the DefaultUploadConcurrency value
|
|
|
|
// will be used.
|
|
|
|
//
|
|
|
|
// The concurrency pool is not shared between calls to Upload.
|
2016-10-31 16:50:02 +01:00
|
|
|
Concurrency int
|
|
|
|
|
|
|
|
// Setting this value to true will cause the SDK to avoid calling
|
|
|
|
// AbortMultipartUpload on a failure, leaving all successfully uploaded
|
|
|
|
// parts on S3 for manual recovery.
|
|
|
|
//
|
|
|
|
// Note that storing parts of an incomplete multipart upload counts towards
|
|
|
|
// space usage on S3 and will add additional costs if not cleaned up.
|
|
|
|
LeavePartsOnError bool
|
|
|
|
|
|
|
|
// MaxUploadParts is the max number of parts which will be uploaded to S3.
|
|
|
|
// Will be used to calculate the partsize of the object to be uploaded.
|
|
|
|
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
|
|
|
|
// as 100, 50MB parts.
|
|
|
|
// With a limited of s3.MaxUploadParts (10,000 parts).
|
2018-09-21 12:01:55 +02:00
|
|
|
//
|
|
|
|
// Defaults to package const's MaxUploadParts value.
|
2016-10-31 16:50:02 +01:00
|
|
|
MaxUploadParts int
|
|
|
|
|
|
|
|
// The client to use when uploading to S3.
|
|
|
|
S3 s3iface.S3API
|
2017-05-11 16:39:54 +02:00
|
|
|
|
|
|
|
// List of request options that will be passed down to individual API
|
|
|
|
// operation requests made by the uploader.
|
|
|
|
RequestOptions []request.Option
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
|
|
|
|
// additional functional options to customize the uploader's behavior. Requires a
|
|
|
|
// client.ConfigProvider in order to create a S3 service client. The session.Session
|
|
|
|
// satisfies the client.ConfigProvider interface.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
// // The session the S3 Uploader will use
|
2017-05-11 16:39:54 +02:00
|
|
|
// sess := session.Must(session.NewSession())
|
2016-10-31 16:50:02 +01:00
|
|
|
//
|
|
|
|
// // Create an uploader with the session and default options
|
|
|
|
// uploader := s3manager.NewUploader(sess)
|
|
|
|
//
|
|
|
|
// // Create an uploader with the session and custom options
|
|
|
|
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
|
|
|
|
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
|
|
|
// })
|
|
|
|
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
|
|
|
|
u := &Uploader{
|
|
|
|
S3: s3.New(c),
|
|
|
|
PartSize: DefaultUploadPartSize,
|
|
|
|
Concurrency: DefaultUploadConcurrency,
|
|
|
|
LeavePartsOnError: false,
|
|
|
|
MaxUploadParts: MaxUploadParts,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, option := range options {
|
|
|
|
option(u)
|
|
|
|
}
|
|
|
|
|
|
|
|
return u
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
|
|
|
|
// additional functional options to customize the uploader's behavior. Requires
|
|
|
|
// a S3 service client to make S3 API calls.
|
|
|
|
//
|
|
|
|
// Example:
|
2016-11-19 11:03:41 +01:00
|
|
|
// // The session the S3 Uploader will use
|
2017-05-11 16:39:54 +02:00
|
|
|
// sess := session.Must(session.NewSession())
|
2016-11-19 11:03:41 +01:00
|
|
|
//
|
2016-10-31 16:50:02 +01:00
|
|
|
// // S3 service client the Upload manager will use.
|
2016-11-19 11:03:41 +01:00
|
|
|
// s3Svc := s3.New(sess)
|
2016-10-31 16:50:02 +01:00
|
|
|
//
|
|
|
|
// // Create an uploader with S3 client and default options
|
|
|
|
// uploader := s3manager.NewUploaderWithClient(s3Svc)
|
|
|
|
//
|
|
|
|
// // Create an uploader with S3 client and custom options
|
|
|
|
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
|
|
|
|
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
|
|
|
|
// })
|
|
|
|
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
|
|
|
|
u := &Uploader{
|
|
|
|
S3: svc,
|
|
|
|
PartSize: DefaultUploadPartSize,
|
|
|
|
Concurrency: DefaultUploadConcurrency,
|
|
|
|
LeavePartsOnError: false,
|
|
|
|
MaxUploadParts: MaxUploadParts,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, option := range options {
|
|
|
|
option(u)
|
|
|
|
}
|
|
|
|
|
|
|
|
return u
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload uploads an object to S3, intelligently buffering large files into
|
|
|
|
// smaller chunks and sending them in parallel across multiple goroutines. You
|
|
|
|
// can configure the buffer size and concurrency through the Uploader's parameters.
|
|
|
|
//
|
|
|
|
// Additional functional options can be provided to configure the individual
|
|
|
|
// upload. These options are copies of the Uploader instance Upload is called from.
|
|
|
|
// Modifying the options will not impact the original Uploader instance.
|
|
|
|
//
|
2017-05-11 16:39:54 +02:00
|
|
|
// Use the WithUploaderRequestOptions helper function to pass in request
|
|
|
|
// options that will be applied to all API operations made with this uploader.
|
|
|
|
//
|
2016-10-31 16:50:02 +01:00
|
|
|
// It is safe to call this method concurrently across goroutines.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
// // Upload input parameters
|
|
|
|
// upParams := &s3manager.UploadInput{
|
|
|
|
// Bucket: &bucketName,
|
|
|
|
// Key: &keyName,
|
|
|
|
// Body: file,
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// // Perform an upload.
|
|
|
|
// result, err := uploader.Upload(upParams)
|
|
|
|
//
|
|
|
|
// // Perform upload with options different than the those in the Uploader.
|
|
|
|
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
|
|
|
|
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
|
2016-11-19 11:03:41 +01:00
|
|
|
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
|
2016-10-31 16:50:02 +01:00
|
|
|
// })
|
|
|
|
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
|
2017-05-11 16:39:54 +02:00
|
|
|
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
|
|
|
|
}
|
2016-10-31 16:50:02 +01:00
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
// UploadWithContext uploads an object to S3, intelligently buffering large
|
|
|
|
// files into smaller chunks and sending them in parallel across multiple
|
|
|
|
// goroutines. You can configure the buffer size and concurrency through the
|
|
|
|
// Uploader's parameters.
|
|
|
|
//
|
|
|
|
// UploadWithContext is the same as Upload with the additional support for
|
|
|
|
// Context input parameters. The Context must not be nil. A nil Context will
|
2018-09-21 12:01:55 +02:00
|
|
|
// cause a panic. Use the context to add deadlining, timeouts, etc. The
|
2017-05-11 16:39:54 +02:00
|
|
|
// UploadWithContext may create sub-contexts for individual underlying requests.
|
|
|
|
//
|
|
|
|
// Additional functional options can be provided to configure the individual
|
|
|
|
// upload. These options are copies of the Uploader instance Upload is called from.
|
|
|
|
// Modifying the options will not impact the original Uploader instance.
|
|
|
|
//
|
|
|
|
// Use the WithUploaderRequestOptions helper function to pass in request
|
|
|
|
// options that will be applied to all API operations made with this uploader.
|
|
|
|
//
|
|
|
|
// It is safe to call this method concurrently across goroutines.
|
|
|
|
func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ...func(*Uploader)) (*UploadOutput, error) {
|
|
|
|
i := uploader{in: input, cfg: u, ctx: ctx}
|
|
|
|
|
|
|
|
for _, opt := range opts {
|
|
|
|
opt(&i.cfg)
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
2017-05-11 16:39:54 +02:00
|
|
|
i.cfg.RequestOptions = append(i.cfg.RequestOptions, request.WithAppendUserAgent("S3Manager"))
|
2016-10-31 16:50:02 +01:00
|
|
|
|
|
|
|
return i.upload()
|
|
|
|
}
|
|
|
|
|
2017-07-23 09:51:42 +02:00
|
|
|
// UploadWithIterator will upload a batched amount of objects to S3. This operation uses
|
|
|
|
// the iterator pattern to know which object to upload next. Since this is an interface this
|
|
|
|
// allows for custom defined functionality.
|
|
|
|
//
|
|
|
|
// Example:
|
|
|
|
// svc:= s3manager.NewUploader(sess)
|
|
|
|
//
|
|
|
|
// objects := []BatchUploadObject{
|
|
|
|
// {
|
|
|
|
// Object: &s3manager.UploadInput {
|
|
|
|
// Key: aws.String("key"),
|
|
|
|
// Bucket: aws.String("bucket"),
|
|
|
|
// },
|
|
|
|
// },
|
|
|
|
// }
|
|
|
|
//
|
2018-09-21 12:01:55 +02:00
|
|
|
// iter := &s3manager.UploadObjectsIterator{Objects: objects}
|
2017-07-23 09:51:42 +02:00
|
|
|
// if err := svc.UploadWithIterator(aws.BackgroundContext(), iter); err != nil {
|
|
|
|
// return err
|
|
|
|
// }
|
|
|
|
func (u Uploader) UploadWithIterator(ctx aws.Context, iter BatchUploadIterator, opts ...func(*Uploader)) error {
|
|
|
|
var errs []Error
|
|
|
|
for iter.Next() {
|
|
|
|
object := iter.UploadObject()
|
|
|
|
if _, err := u.UploadWithContext(ctx, object.Object, opts...); err != nil {
|
|
|
|
s3Err := Error{
|
|
|
|
OrigErr: err,
|
|
|
|
Bucket: object.Object.Bucket,
|
|
|
|
Key: object.Object.Key,
|
|
|
|
}
|
|
|
|
|
|
|
|
errs = append(errs, s3Err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if object.After == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := object.After(); err != nil {
|
|
|
|
s3Err := Error{
|
|
|
|
OrigErr: err,
|
|
|
|
Bucket: object.Object.Bucket,
|
|
|
|
Key: object.Object.Key,
|
|
|
|
}
|
|
|
|
|
|
|
|
errs = append(errs, s3Err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(errs) > 0 {
|
|
|
|
return NewBatchError("BatchedUploadIncomplete", "some objects have failed to upload.", errs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-31 16:50:02 +01:00
|
|
|
// internal structure to manage an upload to S3.
|
|
|
|
type uploader struct {
|
2017-05-11 16:39:54 +02:00
|
|
|
ctx aws.Context
|
|
|
|
cfg Uploader
|
2016-10-31 16:50:02 +01:00
|
|
|
|
|
|
|
in *UploadInput
|
|
|
|
|
|
|
|
readerPos int64 // current reader position
|
|
|
|
totalSize int64 // set to -1 if the size is not known
|
2018-03-19 16:51:38 +01:00
|
|
|
|
|
|
|
bufferPool sync.Pool
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// internal logic for deciding whether to upload a single part or use a
|
|
|
|
// multipart upload.
|
|
|
|
func (u *uploader) upload() (*UploadOutput, error) {
|
|
|
|
u.init()
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
if u.cfg.PartSize < MinUploadPartSize {
|
2016-10-31 16:50:02 +01:00
|
|
|
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
|
|
|
|
return nil, awserr.New("ConfigError", msg, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do one read to determine if we have more than one part
|
2018-03-19 16:51:38 +01:00
|
|
|
reader, _, part, err := u.nextReader()
|
2016-11-19 11:03:41 +01:00
|
|
|
if err == io.EOF { // single part
|
|
|
|
return u.singlePart(reader)
|
2016-10-31 16:50:02 +01:00
|
|
|
} else if err != nil {
|
|
|
|
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
mu := multiuploader{uploader: u}
|
2018-03-19 16:51:38 +01:00
|
|
|
return mu.upload(reader, part)
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// init will initialize all default options.
|
|
|
|
func (u *uploader) init() {
|
2017-05-11 16:39:54 +02:00
|
|
|
if u.cfg.Concurrency == 0 {
|
|
|
|
u.cfg.Concurrency = DefaultUploadConcurrency
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
2017-05-11 16:39:54 +02:00
|
|
|
if u.cfg.PartSize == 0 {
|
|
|
|
u.cfg.PartSize = DefaultUploadPartSize
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
2018-09-21 12:01:55 +02:00
|
|
|
if u.cfg.MaxUploadParts == 0 {
|
|
|
|
u.cfg.MaxUploadParts = MaxUploadParts
|
|
|
|
}
|
2016-10-31 16:50:02 +01:00
|
|
|
|
2018-03-19 16:51:38 +01:00
|
|
|
u.bufferPool = sync.Pool{
|
|
|
|
New: func() interface{} { return make([]byte, u.cfg.PartSize) },
|
|
|
|
}
|
|
|
|
|
2016-10-31 16:50:02 +01:00
|
|
|
// Try to get the total size for some optimizations
|
|
|
|
u.initSize()
|
|
|
|
}
|
|
|
|
|
|
|
|
// initSize tries to detect the total stream size, setting u.totalSize. If
|
|
|
|
// the size is not known, totalSize is set to -1.
|
|
|
|
func (u *uploader) initSize() {
|
|
|
|
u.totalSize = -1
|
|
|
|
|
|
|
|
switch r := u.in.Body.(type) {
|
|
|
|
case io.Seeker:
|
2018-03-19 16:51:38 +01:00
|
|
|
n, err := aws.SeekerLen(r)
|
2016-10-31 16:50:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
u.totalSize = n
|
|
|
|
|
|
|
|
// Try to adjust partSize if it is too small and account for
|
|
|
|
// integer division truncation.
|
2017-05-11 16:39:54 +02:00
|
|
|
if u.totalSize/u.cfg.PartSize >= int64(u.cfg.MaxUploadParts) {
|
2016-10-31 16:50:02 +01:00
|
|
|
// Add one to the part size to account for remainders
|
|
|
|
// during the size calculation. e.g odd number of bytes.
|
2017-05-11 16:39:54 +02:00
|
|
|
u.cfg.PartSize = (u.totalSize / int64(u.cfg.MaxUploadParts)) + 1
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nextReader returns a seekable reader representing the next packet of data.
|
|
|
|
// This operation increases the shared u.readerPos counter, but note that it
|
|
|
|
// does not need to be wrapped in a mutex because nextReader is only called
|
|
|
|
// from the main thread.
|
2018-03-19 16:51:38 +01:00
|
|
|
func (u *uploader) nextReader() (io.ReadSeeker, int, []byte, error) {
|
2016-11-19 11:03:41 +01:00
|
|
|
type readerAtSeeker interface {
|
|
|
|
io.ReaderAt
|
|
|
|
io.ReadSeeker
|
|
|
|
}
|
2016-10-31 16:50:02 +01:00
|
|
|
switch r := u.in.Body.(type) {
|
2016-11-19 11:03:41 +01:00
|
|
|
case readerAtSeeker:
|
2016-10-31 16:50:02 +01:00
|
|
|
var err error
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
n := u.cfg.PartSize
|
2016-10-31 16:50:02 +01:00
|
|
|
if u.totalSize >= 0 {
|
|
|
|
bytesLeft := u.totalSize - u.readerPos
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
if bytesLeft <= u.cfg.PartSize {
|
2016-10-31 16:50:02 +01:00
|
|
|
err = io.EOF
|
|
|
|
n = bytesLeft
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-19 11:03:41 +01:00
|
|
|
reader := io.NewSectionReader(r, u.readerPos, n)
|
2016-10-31 16:50:02 +01:00
|
|
|
u.readerPos += n
|
|
|
|
|
2018-03-19 16:51:38 +01:00
|
|
|
return reader, int(n), nil, err
|
2016-10-31 16:50:02 +01:00
|
|
|
|
|
|
|
default:
|
2018-03-19 16:51:38 +01:00
|
|
|
part := u.bufferPool.Get().([]byte)
|
2016-11-19 11:03:41 +01:00
|
|
|
n, err := readFillBuf(r, part)
|
2016-10-31 16:50:02 +01:00
|
|
|
u.readerPos += int64(n)
|
|
|
|
|
2018-03-19 16:51:38 +01:00
|
|
|
return bytes.NewReader(part[0:n]), n, part, err
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-19 11:03:41 +01:00
|
|
|
func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
|
|
|
|
for offset < len(b) && err == nil {
|
|
|
|
var n int
|
|
|
|
n, err = r.Read(b[offset:])
|
|
|
|
offset += n
|
|
|
|
}
|
|
|
|
|
|
|
|
return offset, err
|
|
|
|
}
|
|
|
|
|
2016-10-31 16:50:02 +01:00
|
|
|
// singlePart contains upload logic for uploading a single chunk via
|
|
|
|
// a regular PutObject request. Multipart requests require at least two
|
|
|
|
// parts, or at least 5MB of data.
|
|
|
|
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
|
|
|
|
params := &s3.PutObjectInput{}
|
|
|
|
awsutil.Copy(params, u.in)
|
|
|
|
params.Body = buf
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
// Need to use request form because URL generated in request is
|
|
|
|
// used in return.
|
|
|
|
req, out := u.cfg.S3.PutObjectRequest(params)
|
|
|
|
req.SetContext(u.ctx)
|
|
|
|
req.ApplyOptions(u.cfg.RequestOptions...)
|
2016-10-31 16:50:02 +01:00
|
|
|
if err := req.Send(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
url := req.HTTPRequest.URL.String()
|
|
|
|
return &UploadOutput{
|
|
|
|
Location: url,
|
|
|
|
VersionID: out.VersionId,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// internal structure to manage a specific multipart upload to S3.
|
|
|
|
type multiuploader struct {
|
|
|
|
*uploader
|
|
|
|
wg sync.WaitGroup
|
|
|
|
m sync.Mutex
|
|
|
|
err error
|
|
|
|
uploadID string
|
|
|
|
parts completedParts
|
|
|
|
}
|
|
|
|
|
|
|
|
// keeps track of a single chunk of data being sent to S3.
|
|
|
|
type chunk struct {
|
2018-03-19 16:51:38 +01:00
|
|
|
buf io.ReadSeeker
|
|
|
|
part []byte
|
|
|
|
num int64
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// completedParts is a wrapper to make parts sortable by their part number,
|
|
|
|
// since S3 required this list to be sent in sorted order.
|
|
|
|
type completedParts []*s3.CompletedPart
|
|
|
|
|
|
|
|
func (a completedParts) Len() int { return len(a) }
|
|
|
|
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
|
|
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
|
|
|
|
|
|
|
|
// upload will perform a multipart upload using the firstBuf buffer containing
|
|
|
|
// the first chunk of data.
|
2018-03-19 16:51:38 +01:00
|
|
|
func (u *multiuploader) upload(firstBuf io.ReadSeeker, firstPart []byte) (*UploadOutput, error) {
|
2016-10-31 16:50:02 +01:00
|
|
|
params := &s3.CreateMultipartUploadInput{}
|
|
|
|
awsutil.Copy(params, u.in)
|
|
|
|
|
|
|
|
// Create the multipart
|
2017-05-11 16:39:54 +02:00
|
|
|
resp, err := u.cfg.S3.CreateMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
|
|
|
if err != nil {
|
2016-10-31 16:50:02 +01:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
u.uploadID = *resp.UploadId
|
|
|
|
|
|
|
|
// Create the workers
|
2017-05-11 16:39:54 +02:00
|
|
|
ch := make(chan chunk, u.cfg.Concurrency)
|
|
|
|
for i := 0; i < u.cfg.Concurrency; i++ {
|
2016-10-31 16:50:02 +01:00
|
|
|
u.wg.Add(1)
|
|
|
|
go u.readChunk(ch)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send part 1 to the workers
|
|
|
|
var num int64 = 1
|
2018-03-19 16:51:38 +01:00
|
|
|
ch <- chunk{buf: firstBuf, part: firstPart, num: num}
|
2016-10-31 16:50:02 +01:00
|
|
|
|
|
|
|
// Read and queue the rest of the parts
|
2016-11-19 11:03:41 +01:00
|
|
|
for u.geterr() == nil && err == nil {
|
|
|
|
num++
|
2016-10-31 16:50:02 +01:00
|
|
|
// This upload exceeded maximum number of supported parts, error now.
|
2017-05-11 16:39:54 +02:00
|
|
|
if num > int64(u.cfg.MaxUploadParts) || num > int64(MaxUploadParts) {
|
2016-10-31 16:50:02 +01:00
|
|
|
var msg string
|
2017-05-11 16:39:54 +02:00
|
|
|
if num > int64(u.cfg.MaxUploadParts) {
|
2016-10-31 16:50:02 +01:00
|
|
|
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
2017-05-11 16:39:54 +02:00
|
|
|
u.cfg.MaxUploadParts)
|
2016-10-31 16:50:02 +01:00
|
|
|
} else {
|
|
|
|
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
|
|
|
|
MaxUploadParts)
|
|
|
|
}
|
|
|
|
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2016-11-19 11:03:41 +01:00
|
|
|
var reader io.ReadSeeker
|
|
|
|
var nextChunkLen int
|
2018-03-19 16:51:38 +01:00
|
|
|
var part []byte
|
|
|
|
reader, nextChunkLen, part, err = u.nextReader()
|
2016-10-31 16:50:02 +01:00
|
|
|
|
2016-11-19 11:03:41 +01:00
|
|
|
if err != nil && err != io.EOF {
|
2016-10-31 16:50:02 +01:00
|
|
|
u.seterr(awserr.New(
|
|
|
|
"ReadRequestBody",
|
|
|
|
"read multipart upload data failed",
|
|
|
|
err))
|
|
|
|
break
|
|
|
|
}
|
2016-11-19 11:03:41 +01:00
|
|
|
|
|
|
|
if nextChunkLen == 0 {
|
|
|
|
// No need to upload empty part, if file was empty to start
|
|
|
|
// with empty single part would of been created and never
|
|
|
|
// started multipart upload.
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:51:38 +01:00
|
|
|
ch <- chunk{buf: reader, part: part, num: num}
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Close the channel, wait for workers, and complete upload
|
|
|
|
close(ch)
|
|
|
|
u.wg.Wait()
|
|
|
|
complete := u.complete()
|
|
|
|
|
|
|
|
if err := u.geterr(); err != nil {
|
|
|
|
return nil, &multiUploadError{
|
|
|
|
awsError: awserr.New(
|
|
|
|
"MultipartUpload",
|
|
|
|
"upload multipart failed",
|
|
|
|
err),
|
|
|
|
uploadID: u.uploadID,
|
|
|
|
}
|
|
|
|
}
|
2019-04-13 14:47:57 +02:00
|
|
|
|
|
|
|
// Create a presigned URL of the S3 Get Object in order to have parity with
|
|
|
|
// single part upload.
|
|
|
|
getReq, _ := u.cfg.S3.GetObjectRequest(&s3.GetObjectInput{
|
|
|
|
Bucket: u.in.Bucket,
|
|
|
|
Key: u.in.Key,
|
|
|
|
})
|
|
|
|
getReq.Config.Credentials = credentials.AnonymousCredentials
|
|
|
|
uploadLocation, _, _ := getReq.PresignRequest(1)
|
|
|
|
|
2016-10-31 16:50:02 +01:00
|
|
|
return &UploadOutput{
|
2019-04-13 14:47:57 +02:00
|
|
|
Location: uploadLocation,
|
2016-10-31 16:50:02 +01:00
|
|
|
VersionID: complete.VersionId,
|
|
|
|
UploadID: u.uploadID,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readChunk runs in worker goroutines to pull chunks off of the ch channel
|
|
|
|
// and send() them as UploadPart requests.
|
|
|
|
func (u *multiuploader) readChunk(ch chan chunk) {
|
|
|
|
defer u.wg.Done()
|
|
|
|
for {
|
|
|
|
data, ok := <-ch
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if u.geterr() == nil {
|
|
|
|
if err := u.send(data); err != nil {
|
|
|
|
u.seterr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// send performs an UploadPart request and keeps track of the completed
|
|
|
|
// part information.
|
|
|
|
func (u *multiuploader) send(c chunk) error {
|
2017-05-11 16:39:54 +02:00
|
|
|
params := &s3.UploadPartInput{
|
|
|
|
Bucket: u.in.Bucket,
|
|
|
|
Key: u.in.Key,
|
|
|
|
Body: c.buf,
|
|
|
|
UploadId: &u.uploadID,
|
|
|
|
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
|
|
|
|
SSECustomerKey: u.in.SSECustomerKey,
|
|
|
|
PartNumber: &c.num,
|
|
|
|
}
|
|
|
|
resp, err := u.cfg.S3.UploadPartWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
2018-03-19 16:51:38 +01:00
|
|
|
// put the byte array back into the pool to conserve memory
|
|
|
|
u.bufferPool.Put(c.part)
|
2017-05-11 16:39:54 +02:00
|
|
|
if err != nil {
|
2016-10-31 16:50:02 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
n := c.num
|
|
|
|
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
|
|
|
|
|
|
|
|
u.m.Lock()
|
|
|
|
u.parts = append(u.parts, completed)
|
|
|
|
u.m.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// geterr is a thread-safe getter for the error object
|
|
|
|
func (u *multiuploader) geterr() error {
|
|
|
|
u.m.Lock()
|
|
|
|
defer u.m.Unlock()
|
|
|
|
|
|
|
|
return u.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// seterr is a thread-safe setter for the error object
|
|
|
|
func (u *multiuploader) seterr(e error) {
|
|
|
|
u.m.Lock()
|
|
|
|
defer u.m.Unlock()
|
|
|
|
|
|
|
|
u.err = e
|
|
|
|
}
|
|
|
|
|
|
|
|
// fail will abort the multipart unless LeavePartsOnError is set to true.
|
|
|
|
func (u *multiuploader) fail() {
|
2017-05-11 16:39:54 +02:00
|
|
|
if u.cfg.LeavePartsOnError {
|
2016-10-31 16:50:02 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
params := &s3.AbortMultipartUploadInput{
|
2016-10-31 16:50:02 +01:00
|
|
|
Bucket: u.in.Bucket,
|
|
|
|
Key: u.in.Key,
|
|
|
|
UploadId: &u.uploadID,
|
2017-05-11 16:39:54 +02:00
|
|
|
}
|
|
|
|
_, err := u.cfg.S3.AbortMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
|
|
|
if err != nil {
|
|
|
|
logMessage(u.cfg.S3, aws.LogDebug, fmt.Sprintf("failed to abort multipart upload, %v", err))
|
|
|
|
}
|
2016-10-31 16:50:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// complete successfully completes a multipart upload and returns the response.
|
|
|
|
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
|
|
|
|
if u.geterr() != nil {
|
|
|
|
u.fail()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parts must be sorted in PartNumber order.
|
|
|
|
sort.Sort(u.parts)
|
|
|
|
|
2017-05-11 16:39:54 +02:00
|
|
|
params := &s3.CompleteMultipartUploadInput{
|
2016-10-31 16:50:02 +01:00
|
|
|
Bucket: u.in.Bucket,
|
|
|
|
Key: u.in.Key,
|
|
|
|
UploadId: &u.uploadID,
|
|
|
|
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
|
2017-05-11 16:39:54 +02:00
|
|
|
}
|
|
|
|
resp, err := u.cfg.S3.CompleteMultipartUploadWithContext(u.ctx, params, u.cfg.RequestOptions...)
|
|
|
|
if err != nil {
|
2016-10-31 16:50:02 +01:00
|
|
|
u.seterr(err)
|
|
|
|
u.fail()
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp
|
|
|
|
}
|