2015-03-02 10:05:23 +01:00
|
|
|
// Upload for drive
|
|
|
|
//
|
|
|
|
// Docs
|
|
|
|
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
|
|
|
|
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
|
|
|
|
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
|
|
|
|
// Files update: https://developers.google.com/drive/v2/reference/files/update
|
|
|
|
//
|
|
|
|
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
|
|
|
|
|
|
|
package drive
|
|
|
|
|
|
|
|
import (
|
2019-05-11 11:03:51 +02:00
|
|
|
"bytes"
|
2019-09-04 21:21:10 +02:00
|
|
|
"context"
|
2015-03-02 10:05:23 +01:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"strconv"
|
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/lib/readers"
|
2018-01-24 00:46:41 +01:00
|
|
|
"google.golang.org/api/drive/v3"
|
2015-03-02 10:05:23 +01:00
|
|
|
"google.golang.org/api/googleapi"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
|
|
|
statusResumeIncomplete = 308
|
|
|
|
)
|
|
|
|
|
|
|
|
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
|
|
|
// It is not used by developers directly.
|
|
|
|
type resumableUpload struct {
|
2015-11-07 12:14:46 +01:00
|
|
|
f *Fs
|
2015-03-02 10:05:23 +01:00
|
|
|
remote string
|
|
|
|
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
|
|
|
URI string
|
|
|
|
// Media is the object being uploaded.
|
|
|
|
Media io.Reader
|
|
|
|
// MediaType defines the media type, e.g. "image/jpeg".
|
|
|
|
MediaType string
|
|
|
|
// ContentLength is the full size of the object being uploaded.
|
|
|
|
ContentLength int64
|
|
|
|
// Return value
|
|
|
|
ret *drive.File
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload the io.Reader in of size bytes with contentType and info
|
2019-09-04 21:21:10 +02:00
|
|
|
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
2018-09-01 13:16:01 +02:00
|
|
|
params := url.Values{
|
|
|
|
"alt": {"json"},
|
|
|
|
"uploadType": {"resumable"},
|
|
|
|
"fields": {partialFields},
|
|
|
|
}
|
2019-07-30 19:49:06 +02:00
|
|
|
params.Set("supportsAllDrives", "true")
|
2018-05-14 19:06:57 +02:00
|
|
|
if f.opt.KeepRevisionForever {
|
2018-06-17 19:34:35 +02:00
|
|
|
params.Set("keepRevisionForever", "true")
|
|
|
|
}
|
2018-01-29 23:36:39 +01:00
|
|
|
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
2015-03-02 10:05:23 +01:00
|
|
|
method := "POST"
|
2015-09-22 19:47:16 +02:00
|
|
|
if fileID != "" {
|
2015-03-02 10:05:23 +01:00
|
|
|
params.Set("setModifiedDate", "true")
|
|
|
|
urls += "/{fileId}"
|
2018-01-30 14:37:06 +01:00
|
|
|
method = "PATCH"
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
urls += "?" + params.Encode()
|
|
|
|
var res *http.Response
|
2016-06-15 22:48:30 +02:00
|
|
|
var err error
|
2015-09-11 20:18:41 +02:00
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2016-06-15 22:48:30 +02:00
|
|
|
var body io.Reader
|
|
|
|
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
var req *http.Request
|
2021-02-03 18:41:27 +01:00
|
|
|
req, err = http.NewRequestWithContext(ctx, method, urls, body)
|
2016-06-15 22:48:30 +02:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
googleapi.Expand(req.URL, map[string]string{
|
|
|
|
"fileId": fileID,
|
|
|
|
})
|
|
|
|
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
|
|
|
req.Header.Set("X-Upload-Content-Type", contentType)
|
2019-05-11 11:03:51 +02:00
|
|
|
if size >= 0 {
|
|
|
|
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
|
|
|
}
|
2015-03-02 10:05:23 +01:00
|
|
|
res, err = f.client.Do(req)
|
|
|
|
if err == nil {
|
|
|
|
defer googleapi.CloseBody(res)
|
|
|
|
err = googleapi.CheckResponse(res)
|
|
|
|
}
|
2020-01-12 16:47:31 +01:00
|
|
|
return f.shouldRetry(err)
|
2015-03-02 10:05:23 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
loc := res.Header.Get("Location")
|
|
|
|
rx := &resumableUpload{
|
|
|
|
f: f,
|
|
|
|
remote: remote,
|
|
|
|
URI: loc,
|
|
|
|
Media: in,
|
|
|
|
MediaType: contentType,
|
|
|
|
ContentLength: size,
|
|
|
|
}
|
2019-09-04 21:21:10 +02:00
|
|
|
return rx.Upload(ctx)
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make an http.Request for the range passed in
|
2019-09-04 21:21:10 +02:00
|
|
|
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
2021-02-03 18:41:27 +01:00
|
|
|
req, _ := http.NewRequestWithContext(ctx, "POST", rx.URI, body)
|
2015-03-02 10:05:23 +01:00
|
|
|
req.ContentLength = reqSize
|
2019-05-11 11:03:51 +02:00
|
|
|
totalSize := "*"
|
|
|
|
if rx.ContentLength >= 0 {
|
|
|
|
totalSize = strconv.FormatInt(rx.ContentLength, 10)
|
|
|
|
}
|
2015-03-02 10:05:23 +01:00
|
|
|
if reqSize != 0 {
|
2019-05-11 11:03:51 +02:00
|
|
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize))
|
2015-03-02 10:05:23 +01:00
|
|
|
} else {
|
2019-05-11 11:03:51 +02:00
|
|
|
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize))
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
req.Header.Set("Content-Type", rx.MediaType)
|
|
|
|
return req
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
2019-09-04 21:21:10 +02:00
|
|
|
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
2018-04-06 20:53:06 +02:00
|
|
|
_, _ = chunk.Seek(0, io.SeekStart)
|
2019-09-04 21:21:10 +02:00
|
|
|
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
2015-03-02 10:05:23 +01:00
|
|
|
res, err := rx.f.client.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return 599, err
|
|
|
|
}
|
|
|
|
defer googleapi.CloseBody(res)
|
|
|
|
if res.StatusCode == statusResumeIncomplete {
|
|
|
|
return res.StatusCode, nil
|
|
|
|
}
|
|
|
|
err = googleapi.CheckResponse(res)
|
|
|
|
if err != nil {
|
|
|
|
return res.StatusCode, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// When the entire file upload is complete, the server
|
|
|
|
// responds with an HTTP 201 Created along with any metadata
|
|
|
|
// associated with this resource. If this request had been
|
|
|
|
// updating an existing entity rather than creating a new one,
|
|
|
|
// the HTTP response code for a completed upload would have
|
|
|
|
// been 200 OK.
|
|
|
|
//
|
|
|
|
// So parse the response out of the body. We aren't expecting
|
2019-02-07 18:41:17 +01:00
|
|
|
// any other 2xx codes, so we parse it unconditionally on
|
2015-03-02 10:05:23 +01:00
|
|
|
// StatusCode
|
|
|
|
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
|
|
|
return 598, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return res.StatusCode, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Upload uploads the chunks from the input
|
2018-05-04 16:31:55 +02:00
|
|
|
// It retries each chunk using the pacer and --low-level-retries
|
2019-09-04 21:21:10 +02:00
|
|
|
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
2015-03-02 10:05:23 +01:00
|
|
|
start := int64(0)
|
|
|
|
var StatusCode int
|
2017-07-04 19:56:46 +02:00
|
|
|
var err error
|
2018-05-14 19:06:57 +02:00
|
|
|
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
2019-05-11 11:03:51 +02:00
|
|
|
for finished := false; !finished; {
|
|
|
|
var reqSize int64
|
|
|
|
var chunk io.ReadSeeker
|
|
|
|
if rx.ContentLength >= 0 {
|
|
|
|
// If size known use repeatable reader for smoother bwlimit
|
|
|
|
if start >= rx.ContentLength {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
reqSize = rx.ContentLength - start
|
|
|
|
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
|
|
|
reqSize = int64(rx.f.opt.ChunkSize)
|
|
|
|
}
|
|
|
|
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
|
|
|
} else {
|
|
|
|
// If size unknown read into buffer
|
|
|
|
var n int
|
|
|
|
n, err = readers.ReadFill(rx.Media, buf)
|
|
|
|
if err == io.EOF {
|
|
|
|
// Send the last chunk with the correct ContentLength
|
|
|
|
// otherwise Google doesn't know we've finished
|
|
|
|
rx.ContentLength = start + int64(n)
|
|
|
|
finished = true
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
reqSize = int64(n)
|
|
|
|
chunk = bytes.NewReader(buf[:reqSize])
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer the chunk
|
2015-09-11 20:18:41 +02:00
|
|
|
err = rx.f.pacer.Call(func() (bool, error) {
|
2017-02-09 12:01:20 +01:00
|
|
|
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
2019-09-04 21:21:10 +02:00
|
|
|
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
2020-01-12 16:47:31 +01:00
|
|
|
again, err := rx.f.shouldRetry(err)
|
2015-03-02 10:05:23 +01:00
|
|
|
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
2015-09-11 20:18:41 +02:00
|
|
|
again = false
|
|
|
|
err = nil
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
2015-09-11 20:18:41 +02:00
|
|
|
return again, err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
start += reqSize
|
|
|
|
}
|
|
|
|
// Resume or retry uploads that fail due to connection interruptions or
|
|
|
|
// any 5xx errors, including:
|
|
|
|
//
|
|
|
|
// 500 Internal Server Error
|
|
|
|
// 502 Bad Gateway
|
|
|
|
// 503 Service Unavailable
|
|
|
|
// 504 Gateway Timeout
|
|
|
|
//
|
|
|
|
// Use an exponential backoff strategy if any 5xx server error is
|
|
|
|
// returned when resuming or retrying upload requests. These errors can
|
|
|
|
// occur if a server is getting overloaded. Exponential backoff can help
|
|
|
|
// alleviate these kinds of problems during periods of high volume of
|
|
|
|
// requests or heavy network traffic. Other kinds of requests should not
|
|
|
|
// be handled by exponential backoff but you can still retry a number of
|
|
|
|
// them. When retrying these requests, limit the number of times you
|
|
|
|
// retry them. For example your code could limit to ten retries or less
|
|
|
|
// before reporting an error.
|
|
|
|
//
|
|
|
|
// Handle 404 Not Found errors when doing resumable uploads by starting
|
|
|
|
// the entire upload over from the beginning.
|
|
|
|
if rx.ret == nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
2015-03-02 10:05:23 +01:00
|
|
|
}
|
|
|
|
return rx.ret, nil
|
|
|
|
}
|