2015-09-22 19:47:16 +02:00
|
|
|
|
// Package drive interfaces with the Google Drive object storage system
|
2013-06-27 21:13:07 +02:00
|
|
|
|
package drive
|
2013-01-15 00:38:18 +01:00
|
|
|
|
|
|
|
|
|
// FIXME need to deal with some corner cases
|
|
|
|
|
// * multiple files with the same name
|
|
|
|
|
// * files can be in multiple directories
|
|
|
|
|
// * can have directory loops
|
2013-01-20 12:56:56 +01:00
|
|
|
|
// * files with / in name
|
2013-01-15 00:38:18 +01:00
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
|
|
|
|
"io"
|
2017-11-29 22:34:19 +01:00
|
|
|
|
"io/ioutil"
|
2015-08-18 09:55:09 +02:00
|
|
|
|
"log"
|
2013-01-15 00:38:18 +01:00
|
|
|
|
"net/http"
|
2017-11-29 22:34:19 +01:00
|
|
|
|
"os"
|
2016-11-25 22:52:43 +01:00
|
|
|
|
"path"
|
2017-05-25 23:05:49 +02:00
|
|
|
|
"sort"
|
2017-07-06 16:32:57 +02:00
|
|
|
|
"strconv"
|
2013-01-15 00:38:18 +01:00
|
|
|
|
"strings"
|
|
|
|
|
"time"
|
|
|
|
|
|
2014-03-15 17:06:11 +01:00
|
|
|
|
"github.com/ncw/rclone/fs"
|
2018-01-12 17:30:54 +01:00
|
|
|
|
"github.com/ncw/rclone/fs/config"
|
|
|
|
|
"github.com/ncw/rclone/fs/config/flags"
|
|
|
|
|
"github.com/ncw/rclone/fs/fserrors"
|
|
|
|
|
"github.com/ncw/rclone/fs/fshttp"
|
|
|
|
|
"github.com/ncw/rclone/fs/hash"
|
2018-01-11 17:29:20 +01:00
|
|
|
|
"github.com/ncw/rclone/lib/dircache"
|
|
|
|
|
"github.com/ncw/rclone/lib/oauthutil"
|
|
|
|
|
"github.com/ncw/rclone/lib/pacer"
|
2016-06-12 16:06:02 +02:00
|
|
|
|
"github.com/pkg/errors"
|
2016-06-12 16:06:27 +02:00
|
|
|
|
"golang.org/x/oauth2"
|
|
|
|
|
"golang.org/x/oauth2/google"
|
|
|
|
|
"google.golang.org/api/drive/v2"
|
|
|
|
|
"google.golang.org/api/googleapi"
|
2014-03-15 17:06:11 +01:00
|
|
|
|
)
|
2013-06-29 13:15:31 +02:00
|
|
|
|
|
2014-03-16 15:01:17 +01:00
|
|
|
|
// Constants
|
|
|
|
|
const (
|
2016-02-28 20:57:19 +01:00
|
|
|
|
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
2016-08-14 13:04:43 +02:00
|
|
|
|
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
|
2016-02-28 20:57:19 +01:00
|
|
|
|
driveFolderType = "application/vnd.google-apps.folder"
|
|
|
|
|
timeFormatIn = time.RFC3339
|
|
|
|
|
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
|
|
|
|
minSleep = 10 * time.Millisecond
|
|
|
|
|
defaultExtensions = "docx,xlsx,pptx,svg"
|
2014-03-16 15:01:17 +01:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
// Globals
|
|
|
|
|
var (
|
|
|
|
|
// Flags
|
2018-01-12 17:30:54 +01:00
|
|
|
|
driveAuthOwnerOnly = flags.BoolP("drive-auth-owner-only", "", false, "Only consider files owned by the authenticated user.")
|
|
|
|
|
driveUseTrash = flags.BoolP("drive-use-trash", "", true, "Send files to the trash instead of deleting permanently.")
|
|
|
|
|
driveSkipGdocs = flags.BoolP("drive-skip-gdocs", "", false, "Skip google documents in all listings.")
|
|
|
|
|
driveSharedWithMe = flags.BoolP("drive-shared-with-me", "", false, "Only show files that are shared with me")
|
|
|
|
|
driveTrashedOnly = flags.BoolP("drive-trashed-only", "", false, "Only show files that are in the trash")
|
|
|
|
|
driveExtensions = flags.StringP("drive-formats", "", defaultExtensions, "Comma separated list of preferred formats for downloading Google docs.")
|
|
|
|
|
driveListChunk = flags.Int64P("drive-list-chunk", "", 1000, "Size of listing chunk 100-1000. 0 to disable.")
|
2015-03-14 18:55:38 +01:00
|
|
|
|
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
|
|
|
|
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
2016-04-12 22:33:55 +02:00
|
|
|
|
chunkSize = fs.SizeSuffix(8 * 1024 * 1024)
|
2015-03-14 18:55:38 +01:00
|
|
|
|
driveUploadCutoff = chunkSize
|
2014-07-13 18:53:11 +02:00
|
|
|
|
// Description of how to auth for this app
|
2015-08-18 09:55:09 +02:00
|
|
|
|
driveConfig = &oauth2.Config{
|
|
|
|
|
Scopes: []string{"https://www.googleapis.com/auth/drive"},
|
|
|
|
|
Endpoint: google.Endpoint,
|
|
|
|
|
ClientID: rcloneClientID,
|
2018-01-12 17:30:54 +01:00
|
|
|
|
ClientSecret: config.MustReveal(rcloneEncryptedClientSecret),
|
2015-08-18 09:55:09 +02:00
|
|
|
|
RedirectURL: oauthutil.TitleBarRedirectURL,
|
2014-07-13 18:53:11 +02:00
|
|
|
|
}
|
2016-01-26 17:52:53 +01:00
|
|
|
|
mimeTypeToExtension = map[string]string{
|
2016-09-19 19:08:10 +02:00
|
|
|
|
"application/epub+zip": "epub",
|
2016-01-26 17:52:53 +01:00
|
|
|
|
"application/msword": "doc",
|
|
|
|
|
"application/pdf": "pdf",
|
|
|
|
|
"application/rtf": "rtf",
|
|
|
|
|
"application/vnd.ms-excel": "xls",
|
2016-09-19 19:08:10 +02:00
|
|
|
|
"application/vnd.oasis.opendocument.presentation": "odp",
|
2016-01-26 17:52:53 +01:00
|
|
|
|
"application/vnd.oasis.opendocument.spreadsheet": "ods",
|
|
|
|
|
"application/vnd.oasis.opendocument.text": "odt",
|
|
|
|
|
"application/vnd.openxmlformats-officedocument.presentationml.presentation": "pptx",
|
|
|
|
|
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": "xlsx",
|
|
|
|
|
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": "docx",
|
|
|
|
|
"application/x-vnd.oasis.opendocument.spreadsheet": "ods",
|
|
|
|
|
"application/zip": "zip",
|
|
|
|
|
"image/jpeg": "jpg",
|
|
|
|
|
"image/png": "png",
|
|
|
|
|
"image/svg+xml": "svg",
|
|
|
|
|
"text/csv": "csv",
|
|
|
|
|
"text/html": "html",
|
|
|
|
|
"text/plain": "txt",
|
2016-09-19 19:08:10 +02:00
|
|
|
|
"text/tab-separated-values": "tsv",
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
2016-02-06 10:22:52 +01:00
|
|
|
|
extensionToMimeType map[string]string
|
2017-04-27 11:00:04 +02:00
|
|
|
|
partialFields = "id,downloadUrl,exportLinks,fileExtension,fullFileExtension,fileSize,labels,md5Checksum,modifiedDate,mimeType,title"
|
2014-03-16 15:01:17 +01:00
|
|
|
|
)
|
|
|
|
|
|
2013-06-29 13:15:31 +02:00
|
|
|
|
// Register with Fs
|
|
|
|
|
func init() {
|
2016-02-18 12:35:25 +01:00
|
|
|
|
fs.Register(&fs.RegInfo{
|
2016-02-15 19:11:53 +01:00
|
|
|
|
Name: "drive",
|
|
|
|
|
Description: "Google Drive",
|
|
|
|
|
NewFs: NewFs,
|
2014-07-13 18:53:11 +02:00
|
|
|
|
Config: func(name string) {
|
2017-11-29 22:34:19 +01:00
|
|
|
|
var err error
|
2018-01-12 17:30:54 +01:00
|
|
|
|
if config.FileGet(name, "service_account_file") == "" {
|
2017-11-29 22:34:19 +01:00
|
|
|
|
err = oauthutil.Config("drive", name, driveConfig)
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Fatalf("Failed to configure token: %v", err)
|
|
|
|
|
}
|
2015-08-18 09:55:09 +02:00
|
|
|
|
}
|
2017-06-01 21:12:11 +02:00
|
|
|
|
err = configTeamDrive(name)
|
|
|
|
|
if err != nil {
|
|
|
|
|
log.Fatalf("Failed to configure team drive: %v", err)
|
|
|
|
|
}
|
2014-07-13 18:53:11 +02:00
|
|
|
|
},
|
2014-03-15 17:06:11 +01:00
|
|
|
|
Options: []fs.Option{{
|
2018-01-12 17:30:54 +01:00
|
|
|
|
Name: config.ConfigClientID,
|
2015-10-03 15:23:12 +02:00
|
|
|
|
Help: "Google Application Client Id - leave blank normally.",
|
2014-03-15 17:06:11 +01:00
|
|
|
|
}, {
|
2018-01-12 17:30:54 +01:00
|
|
|
|
Name: config.ConfigClientSecret,
|
2015-10-03 15:23:12 +02:00
|
|
|
|
Help: "Google Application Client Secret - leave blank normally.",
|
2017-11-29 22:34:19 +01:00
|
|
|
|
}, {
|
|
|
|
|
Name: "service_account_file",
|
|
|
|
|
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
2014-03-15 17:06:11 +01:00
|
|
|
|
}},
|
|
|
|
|
})
|
2018-01-12 17:30:54 +01:00
|
|
|
|
flags.VarP(&driveUploadCutoff, "drive-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
|
|
|
|
flags.VarP(&chunkSize, "drive-chunk-size", "", "Upload chunk size. Must a power of 2 >= 256k.")
|
2016-02-06 10:22:52 +01:00
|
|
|
|
|
|
|
|
|
// Invert mimeTypeToExtension
|
|
|
|
|
extensionToMimeType = make(map[string]string, len(mimeTypeToExtension))
|
|
|
|
|
for mimeType, extension := range mimeTypeToExtension {
|
|
|
|
|
extensionToMimeType[extension] = mimeType
|
|
|
|
|
}
|
2013-06-29 13:15:31 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// Fs represents a remote drive server
|
|
|
|
|
type Fs struct {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
name string // name of this remote
|
|
|
|
|
root string // the path we are working on
|
|
|
|
|
features *fs.Features // optional features
|
|
|
|
|
svc *drive.Service // the connection to the drive server
|
|
|
|
|
client *http.Client // authorized client
|
|
|
|
|
about *drive.About // information about the drive, including the root
|
|
|
|
|
dirCache *dircache.DirCache // Map of directory path to directory id
|
|
|
|
|
pacer *pacer.Pacer // To pace the API calls
|
|
|
|
|
extensions []string // preferred extensions to download docs
|
|
|
|
|
teamDriveID string // team drive ID, may be ""
|
|
|
|
|
isTeamDrive bool // true if this is a team drive
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// Object describes a drive object
|
|
|
|
|
type Object struct {
|
|
|
|
|
fs *Fs // what this object is part of
|
|
|
|
|
remote string // The remote path
|
|
|
|
|
id string // Drive Id of this object
|
|
|
|
|
url string // Download URL of this object
|
|
|
|
|
md5sum string // md5sum of the object
|
|
|
|
|
bytes int64 // size of the object
|
|
|
|
|
modifiedDate string // RFC3339 time it was last modified
|
2016-01-26 17:52:53 +01:00
|
|
|
|
isDocument bool // if set this is a Google doc
|
2016-09-21 23:13:24 +02:00
|
|
|
|
mimeType string
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Name of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Name() string {
|
2015-08-22 17:53:11 +02:00
|
|
|
|
return f.name
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Root of the remote (as passed into NewFs)
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Root() string {
|
2015-09-01 21:45:27 +02:00
|
|
|
|
return f.root
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// String converts this Fs to a string
|
|
|
|
|
func (f *Fs) String() string {
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return fmt.Sprintf("Google drive root '%s'", f.root)
|
|
|
|
|
}
|
|
|
|
|
|
2017-01-13 18:21:47 +01:00
|
|
|
|
// Features returns the optional features of this Fs
|
|
|
|
|
func (f *Fs) Features() *fs.Features {
|
|
|
|
|
return f.features
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-11 20:18:41 +02:00
|
|
|
|
// shouldRetry determines whehter a given err rates being retried
|
|
|
|
|
func shouldRetry(err error) (again bool, errOut error) {
|
|
|
|
|
again = false
|
|
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
|
if fserrors.ShouldRetry(err) {
|
2015-03-02 10:05:23 +01:00
|
|
|
|
again = true
|
2015-10-14 18:37:53 +02:00
|
|
|
|
} else {
|
|
|
|
|
switch gerr := err.(type) {
|
|
|
|
|
case *googleapi.Error:
|
|
|
|
|
if gerr.Code >= 500 && gerr.Code < 600 {
|
|
|
|
|
// All 5xx errors should be retried
|
2015-02-02 18:29:08 +01:00
|
|
|
|
again = true
|
2015-10-14 18:37:53 +02:00
|
|
|
|
} else if len(gerr.Errors) > 0 {
|
|
|
|
|
reason := gerr.Errors[0].Reason
|
|
|
|
|
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
|
|
|
|
again = true
|
|
|
|
|
}
|
2015-02-02 18:29:08 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return again, err
|
2015-02-02 18:29:08 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// parseParse parses a drive 'url'
|
|
|
|
|
func parseDrivePath(path string) (root string, err error) {
|
2014-03-27 18:49:36 +01:00
|
|
|
|
root = strings.Trim(path, "/")
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
|
// User function to process a File item from list
|
2013-01-20 12:56:56 +01:00
|
|
|
|
//
|
|
|
|
|
// Should return true to finish processing
|
2017-06-11 23:43:31 +02:00
|
|
|
|
type listFn func(*drive.File) bool
|
2013-01-20 12:56:56 +01:00
|
|
|
|
|
|
|
|
|
// Lists the directory required calling the user function on each item found
|
|
|
|
|
//
|
|
|
|
|
// If the user fn ever returns true then it early exits with found = true
|
2013-01-15 00:38:18 +01:00
|
|
|
|
//
|
|
|
|
|
// Search params: https://developers.google.com/drive/search-parameters
|
2017-07-06 16:32:57 +02:00
|
|
|
|
func (f *Fs) list(dirID string, title string, directoriesOnly bool, filesOnly bool, includeAll bool, fn listFn) (found bool, err error) {
|
2017-02-16 13:29:37 +01:00
|
|
|
|
var query []string
|
2017-07-06 16:32:57 +02:00
|
|
|
|
if !includeAll {
|
2017-07-19 15:35:58 +02:00
|
|
|
|
q := "trashed=" + strconv.FormatBool(*driveTrashedOnly)
|
|
|
|
|
if *driveTrashedOnly {
|
|
|
|
|
q = fmt.Sprintf("(mimeType='%s' or %s)", driveFolderType, q)
|
|
|
|
|
}
|
|
|
|
|
query = append(query, q)
|
2017-02-16 13:29:37 +01:00
|
|
|
|
}
|
2017-03-25 17:16:56 +01:00
|
|
|
|
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
|
|
|
|
|
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
|
|
|
|
|
// If we need to list file inside those shared folders, we must search it without sharedWithMe
|
|
|
|
|
if *driveSharedWithMe && dirID == f.about.RootFolderId {
|
|
|
|
|
query = append(query, "sharedWithMe=true")
|
|
|
|
|
}
|
|
|
|
|
if dirID != "" && !(*driveSharedWithMe && dirID == f.about.RootFolderId) {
|
2017-02-16 13:29:37 +01:00
|
|
|
|
query = append(query, fmt.Sprintf("'%s' in parents", dirID))
|
2013-01-23 22:19:26 +01:00
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if title != "" {
|
|
|
|
|
// Escaping the backslash isn't documented but seems to work
|
|
|
|
|
title = strings.Replace(title, `\`, `\\`, -1)
|
|
|
|
|
title = strings.Replace(title, `'`, `\'`, -1)
|
2017-06-20 16:11:53 +02:00
|
|
|
|
// Convert / to / for search
|
|
|
|
|
title = strings.Replace(title, "/", "/", -1)
|
2017-02-16 13:29:37 +01:00
|
|
|
|
query = append(query, fmt.Sprintf("title='%s'", title))
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
if directoriesOnly {
|
2017-02-16 13:29:37 +01:00
|
|
|
|
query = append(query, fmt.Sprintf("mimeType='%s'", driveFolderType))
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
if filesOnly {
|
2017-02-16 13:29:37 +01:00
|
|
|
|
query = append(query, fmt.Sprintf("mimeType!='%s'", driveFolderType))
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2017-02-16 13:29:37 +01:00
|
|
|
|
list := f.svc.Files.List()
|
|
|
|
|
if len(query) > 0 {
|
|
|
|
|
list = list.Q(strings.Join(query, " and "))
|
2017-07-19 15:35:58 +02:00
|
|
|
|
// fmt.Printf("list Query = %q\n", query)
|
2017-02-16 13:29:37 +01:00
|
|
|
|
}
|
2016-12-29 19:04:37 +01:00
|
|
|
|
if *driveListChunk > 0 {
|
|
|
|
|
list = list.MaxResults(*driveListChunk)
|
|
|
|
|
}
|
2017-06-01 21:12:11 +02:00
|
|
|
|
if f.isTeamDrive {
|
|
|
|
|
list.TeamDriveId(f.teamDriveID)
|
|
|
|
|
list.SupportsTeamDrives(true)
|
|
|
|
|
list.IncludeTeamDriveItems(true)
|
|
|
|
|
list.Corpora("teamDrive")
|
|
|
|
|
}
|
2017-04-23 21:12:28 +02:00
|
|
|
|
|
2017-04-27 11:00:04 +02:00
|
|
|
|
var fields = partialFields
|
2017-04-23 21:12:28 +02:00
|
|
|
|
|
|
|
|
|
if *driveAuthOwnerOnly {
|
2017-04-27 11:00:04 +02:00
|
|
|
|
fields += ",owners"
|
2017-04-23 21:12:28 +02:00
|
|
|
|
}
|
2017-04-27 11:00:04 +02:00
|
|
|
|
|
|
|
|
|
fields = fmt.Sprintf("items(%s),nextPageToken", fields)
|
|
|
|
|
|
2013-01-20 12:56:56 +01:00
|
|
|
|
OUTER:
|
2013-01-15 00:38:18 +01:00
|
|
|
|
for {
|
2015-02-02 18:29:08 +01:00
|
|
|
|
var files *drive.FileList
|
2015-09-11 20:18:41 +02:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-04-27 11:00:04 +02:00
|
|
|
|
files, err = list.Fields(googleapi.Field(fields)).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return false, errors.Wrap(err, "couldn't list directory")
|
2013-01-20 12:56:56 +01:00
|
|
|
|
}
|
|
|
|
|
for _, item := range files.Items {
|
2017-06-20 16:11:53 +02:00
|
|
|
|
// Convert / to / for listing purposes
|
|
|
|
|
item.Title = strings.Replace(item.Title, "/", "/", -1)
|
2013-01-20 12:56:56 +01:00
|
|
|
|
if fn(item) {
|
|
|
|
|
found = true
|
|
|
|
|
break OUTER
|
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
if files.NextPageToken == "" {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
list.PageToken(files.NextPageToken)
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
2015-03-14 18:55:38 +01:00
|
|
|
|
// Returns true of x is a power of 2 or zero
|
|
|
|
|
func isPowerOfTwo(x int64) bool {
|
|
|
|
|
switch {
|
|
|
|
|
case x == 0:
|
|
|
|
|
return true
|
|
|
|
|
case x < 0:
|
|
|
|
|
return false
|
|
|
|
|
default:
|
|
|
|
|
return (x & (x - 1)) == 0
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 17:52:53 +01:00
|
|
|
|
// parseExtensions parses drive export extensions from a string
|
2016-02-06 10:22:52 +01:00
|
|
|
|
func (f *Fs) parseExtensions(extensions string) error {
|
2016-01-26 17:52:53 +01:00
|
|
|
|
for _, extension := range strings.Split(extensions, ",") {
|
|
|
|
|
extension = strings.ToLower(strings.TrimSpace(extension))
|
|
|
|
|
if _, found := extensionToMimeType[extension]; !found {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return errors.Errorf("couldn't find mime type for extension %q", extension)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
|
|
|
|
found := false
|
|
|
|
|
for _, existingExtension := range f.extensions {
|
|
|
|
|
if extension == existingExtension {
|
|
|
|
|
found = true
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if !found {
|
|
|
|
|
f.extensions = append(f.extensions, extension)
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-02-06 10:22:52 +01:00
|
|
|
|
return nil
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-01 21:12:11 +02:00
|
|
|
|
// Figure out if the user wants to use a team drive
|
|
|
|
|
func configTeamDrive(name string) error {
|
2018-01-12 17:30:54 +01:00
|
|
|
|
teamDrive := config.FileGet(name, "team_drive")
|
2017-06-01 21:12:11 +02:00
|
|
|
|
if teamDrive == "" {
|
|
|
|
|
fmt.Printf("Configure this as a team drive?\n")
|
|
|
|
|
} else {
|
|
|
|
|
fmt.Printf("Change current team drive ID %q?\n", teamDrive)
|
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
|
if !config.Confirm() {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
return nil
|
|
|
|
|
}
|
2017-11-29 22:34:19 +01:00
|
|
|
|
client, err := authenticate(name)
|
2017-06-01 21:12:11 +02:00
|
|
|
|
if err != nil {
|
2017-11-29 22:34:19 +01:00
|
|
|
|
return errors.Wrap(err, "config team drive failed to authenticate")
|
2017-06-01 21:12:11 +02:00
|
|
|
|
}
|
|
|
|
|
svc, err := drive.New(client)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "config team drive failed to make drive client")
|
|
|
|
|
}
|
|
|
|
|
fmt.Printf("Fetching team drive list...\n")
|
|
|
|
|
var driveIDs, driveNames []string
|
|
|
|
|
listTeamDrives := svc.Teamdrives.List().MaxResults(100)
|
|
|
|
|
for {
|
|
|
|
|
var teamDrives *drive.TeamDriveList
|
|
|
|
|
err = newPacer().Call(func() (bool, error) {
|
|
|
|
|
teamDrives, err = listTeamDrives.Do()
|
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrap(err, "list team drives failed")
|
|
|
|
|
}
|
|
|
|
|
for _, drive := range teamDrives.Items {
|
|
|
|
|
driveIDs = append(driveIDs, drive.Id)
|
|
|
|
|
driveNames = append(driveNames, drive.Name)
|
|
|
|
|
}
|
|
|
|
|
if teamDrives.NextPageToken == "" {
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
listTeamDrives.PageToken(teamDrives.NextPageToken)
|
|
|
|
|
}
|
|
|
|
|
var driveID string
|
|
|
|
|
if len(driveIDs) == 0 {
|
|
|
|
|
fmt.Printf("No team drives found in your account")
|
|
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
|
driveID = config.Choose("Enter a Team Drive ID", driveIDs, driveNames, true)
|
2017-06-01 21:12:11 +02:00
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
|
config.FileSet(name, "team_drive", driveID)
|
2017-06-01 21:12:11 +02:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// newPacer makes a pacer configured for drive
|
|
|
|
|
func newPacer() *pacer.Pacer {
|
|
|
|
|
return pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer)
|
|
|
|
|
}
|
|
|
|
|
|
2017-11-29 22:34:19 +01:00
|
|
|
|
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
|
|
|
|
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "error opening credentials file")
|
|
|
|
|
}
|
|
|
|
|
conf, err := google.JWTConfigFromJSON(data, driveConfig.Scopes...)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "error processing credentials")
|
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
|
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
2017-11-29 22:34:19 +01:00
|
|
|
|
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func authenticate(name string) (*http.Client, error) {
|
|
|
|
|
var oAuthClient *http.Client
|
|
|
|
|
var err error
|
|
|
|
|
|
2018-01-12 17:30:54 +01:00
|
|
|
|
serviceAccountPath := config.FileGet(name, "service_account_file")
|
2017-11-29 22:34:19 +01:00
|
|
|
|
if serviceAccountPath != "" {
|
|
|
|
|
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "Failed to configure drive Service Account")
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
oAuthClient, _, err = oauthutil.NewClient(name, driveConfig)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, errors.Wrap(err, "Failed to configure drive")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return oAuthClient, nil
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// NewFs contstructs an Fs from the path, container:path
|
2014-03-16 15:01:17 +01:00
|
|
|
|
func NewFs(name, path string) (fs.Fs, error) {
|
2015-03-14 18:55:38 +01:00
|
|
|
|
if !isPowerOfTwo(int64(chunkSize)) {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.Errorf("drive: chunk size %v isn't a power of two", chunkSize)
|
2015-03-14 18:55:38 +01:00
|
|
|
|
}
|
|
|
|
|
if chunkSize < 256*1024 {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.Errorf("drive: chunk size can't be less than 256k - was %v", chunkSize)
|
2015-03-14 18:55:38 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-11-29 22:34:19 +01:00
|
|
|
|
oAuthClient, _ := authenticate(name)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
|
2014-03-16 15:01:17 +01:00
|
|
|
|
root, err := parseDrivePath(path)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2014-07-13 18:53:11 +02:00
|
|
|
|
|
2015-11-07 12:14:46 +01:00
|
|
|
|
f := &Fs{
|
2015-09-11 20:18:41 +02:00
|
|
|
|
name: name,
|
|
|
|
|
root: root,
|
2017-06-01 21:12:11 +02:00
|
|
|
|
pacer: newPacer(),
|
2014-05-05 20:52:52 +02:00
|
|
|
|
}
|
2018-01-12 17:30:54 +01:00
|
|
|
|
f.teamDriveID = config.FileGet(name, "team_drive")
|
2017-06-01 21:12:11 +02:00
|
|
|
|
f.isTeamDrive = f.teamDriveID != ""
|
2017-08-09 16:27:43 +02:00
|
|
|
|
f.features = (&fs.Features{
|
|
|
|
|
DuplicateFiles: true,
|
|
|
|
|
ReadMimeType: true,
|
|
|
|
|
WriteMimeType: true,
|
|
|
|
|
CanHaveEmptyDirectories: true,
|
|
|
|
|
}).Fill(f)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
|
|
|
|
|
// Create a new authorized Drive client.
|
2015-08-18 09:55:09 +02:00
|
|
|
|
f.client = oAuthClient
|
2013-01-15 00:38:18 +01:00
|
|
|
|
f.svc, err = drive.New(f.client)
|
|
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.Wrap(err, "couldn't create Drive client")
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read About so we know the root path
|
2015-09-11 20:18:41 +02:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2015-02-02 18:29:08 +01:00
|
|
|
|
f.about, err = f.svc.About.Get().Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.Wrap(err, "couldn't read info about Drive")
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2017-06-01 21:12:11 +02:00
|
|
|
|
// override root folder for a team drive
|
|
|
|
|
if f.isTeamDrive {
|
|
|
|
|
f.about.RootFolderId = f.teamDriveID
|
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
|
2015-09-03 22:25:55 +02:00
|
|
|
|
f.dirCache = dircache.New(root, f.about.RootFolderId, f)
|
|
|
|
|
|
2016-01-26 17:52:53 +01:00
|
|
|
|
// Parse extensions
|
2016-02-06 10:22:52 +01:00
|
|
|
|
err = f.parseExtensions(*driveExtensions)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
err = f.parseExtensions(defaultExtensions) // make sure there are some sensible ones on there
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2016-01-26 17:52:53 +01:00
|
|
|
|
|
2014-05-05 20:52:52 +02:00
|
|
|
|
// Find the current root
|
2015-09-03 22:25:55 +02:00
|
|
|
|
err = f.dirCache.FindRoot(false)
|
2014-05-05 20:52:52 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
// Assume it is a file
|
2015-09-03 22:25:55 +02:00
|
|
|
|
newRoot, remote := dircache.SplitPath(root)
|
2014-05-05 20:52:52 +02:00
|
|
|
|
newF := *f
|
2015-09-03 22:25:55 +02:00
|
|
|
|
newF.dirCache = dircache.New(newRoot, f.about.RootFolderId, &newF)
|
2014-05-05 20:52:52 +02:00
|
|
|
|
newF.root = newRoot
|
|
|
|
|
// Make new Fs which is the parent
|
2015-09-03 22:25:55 +02:00
|
|
|
|
err = newF.dirCache.FindRoot(false)
|
2014-05-05 20:52:52 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
// No root so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
2016-06-25 22:23:20 +02:00
|
|
|
|
_, err := newF.newObjectWithInfo(remote, nil)
|
2014-05-05 20:52:52 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
// File doesn't exist so return old f
|
|
|
|
|
return f, nil
|
|
|
|
|
}
|
2016-06-21 19:01:53 +02:00
|
|
|
|
// return an error with an fs which points to the parent
|
|
|
|
|
return &newF, fs.ErrorIsFile
|
2014-05-05 20:52:52 +02:00
|
|
|
|
}
|
2015-09-03 22:25:55 +02:00
|
|
|
|
// fmt.Printf("Root id %s", f.dirCache.RootID())
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return f, nil
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Return an Object from a path
|
2016-06-25 22:23:20 +02:00
|
|
|
|
//
|
|
|
|
|
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
|
|
|
|
func (f *Fs) newObjectWithInfo(remote string, info *drive.File) (fs.Object, error) {
|
2016-06-12 16:06:27 +02:00
|
|
|
|
o := &Object{
|
2015-11-07 12:14:46 +01:00
|
|
|
|
fs: f,
|
2013-01-15 00:38:18 +01:00
|
|
|
|
remote: remote,
|
|
|
|
|
}
|
|
|
|
|
if info != nil {
|
2016-06-12 16:06:27 +02:00
|
|
|
|
o.setMetaData(info)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
} else {
|
2016-06-12 16:06:27 +02:00
|
|
|
|
err := o.readMetaData() // reads info and meta, returning an error
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2014-05-05 20:52:52 +02:00
|
|
|
|
return nil, err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-06-12 16:06:27 +02:00
|
|
|
|
return o, nil
|
2014-05-05 20:52:52 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-25 22:23:20 +02:00
|
|
|
|
// NewObject finds the Object at remote. If it can't be found
|
|
|
|
|
// it returns the error fs.ErrorObjectNotFound.
|
|
|
|
|
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
2016-06-25 22:58:34 +02:00
|
|
|
|
return f.newObjectWithInfo(remote, nil)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Find the leaf in pathID
|
2017-06-11 23:43:31 +02:00
|
|
|
|
found, err = f.list(pathID, leaf, true, false, false, func(item *drive.File) bool {
|
2015-09-03 22:25:55 +02:00
|
|
|
|
if item.Title == leaf {
|
2015-09-22 19:47:16 +02:00
|
|
|
|
pathIDOut = item.Id
|
2015-09-03 22:25:55 +02:00
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
})
|
2015-09-22 19:47:16 +02:00
|
|
|
|
return pathIDOut, found, err
|
2015-09-03 22:25:55 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// CreateDir makes a directory with pathID as parent and name leaf
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
2015-09-03 22:25:55 +02:00
|
|
|
|
// fmt.Println("Making", path)
|
|
|
|
|
// Define the metadata for the directory we are going to create.
|
|
|
|
|
createInfo := &drive.File{
|
|
|
|
|
Title: leaf,
|
|
|
|
|
Description: leaf,
|
|
|
|
|
MimeType: driveFolderType,
|
2015-09-22 19:47:16 +02:00
|
|
|
|
Parents: []*drive.ParentReference{{Id: pathID}},
|
2015-09-03 22:25:55 +02:00
|
|
|
|
}
|
|
|
|
|
var info *drive.File
|
2015-09-11 20:18:41 +02:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = f.svc.Files.Insert(createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-09-03 22:25:55 +02:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return "", err
|
|
|
|
|
}
|
|
|
|
|
return info.Id, nil
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-06 10:22:52 +01:00
|
|
|
|
// isAuthOwned checks if any of the item owners is the authenticated owner
|
|
|
|
|
func isAuthOwned(item *drive.File) bool {
|
|
|
|
|
for _, owner := range item.Owners {
|
|
|
|
|
if owner.IsAuthenticatedUser {
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// findExportFormat works out the optimum extension and download URL
|
|
|
|
|
// for this item.
|
|
|
|
|
//
|
|
|
|
|
// Look through the extensions and find the first format that can be
|
|
|
|
|
// converted. If none found then return "", ""
|
|
|
|
|
func (f *Fs) findExportFormat(filepath string, item *drive.File) (extension, link string) {
|
|
|
|
|
// Warn about unknown export formats
|
|
|
|
|
for mimeType := range item.ExportLinks {
|
|
|
|
|
if _, ok := mimeTypeToExtension[mimeType]; !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(filepath, "Unknown export type %q - ignoring", mimeType)
|
2016-02-06 10:22:52 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find the first export format we can
|
|
|
|
|
for _, extension := range f.extensions {
|
|
|
|
|
mimeType := extensionToMimeType[extension]
|
|
|
|
|
if link, ok := item.ExportLinks[mimeType]; ok {
|
|
|
|
|
return extension, link
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// else return empty
|
|
|
|
|
return "", ""
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
|
// List the objects and directories in dir into entries. The
|
|
|
|
|
// entries can be returned in any order but should be for a
|
|
|
|
|
// complete directory.
|
|
|
|
|
//
|
|
|
|
|
// dir should be "" to list the root, and should not have
|
|
|
|
|
// trailing slashes.
|
|
|
|
|
//
|
|
|
|
|
// This should return ErrDirNotFound if the directory isn't
|
|
|
|
|
// found.
|
|
|
|
|
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
|
|
|
|
err = f.dirCache.FindRoot(false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
directoryID, err := f.dirCache.FindDir(dir, false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var iErr error
|
|
|
|
|
_, err = f.list(directoryID, "", false, false, false, func(item *drive.File) bool {
|
|
|
|
|
remote := path.Join(dir, item.Title)
|
2016-02-06 10:22:52 +01:00
|
|
|
|
switch {
|
|
|
|
|
case *driveAuthOwnerOnly && !isAuthOwned(item):
|
|
|
|
|
// ignore object or directory
|
|
|
|
|
case item.MimeType == driveFolderType:
|
2017-06-11 23:43:31 +02:00
|
|
|
|
// cache the directory ID for later lookups
|
|
|
|
|
f.dirCache.Put(remote, item.Id)
|
2017-06-30 14:37:29 +02:00
|
|
|
|
when, _ := time.Parse(timeFormatIn, item.ModifiedDate)
|
2017-08-02 17:44:36 +02:00
|
|
|
|
d := fs.NewDir(remote, when).SetID(item.Id)
|
2017-06-11 23:43:31 +02:00
|
|
|
|
entries = append(entries, d)
|
2017-01-03 22:20:21 +01:00
|
|
|
|
case item.Md5Checksum != "" || item.FileSize > 0:
|
|
|
|
|
// If item has MD5 sum or a length it is a file stored on drive
|
2016-06-25 22:23:20 +02:00
|
|
|
|
o, err := f.newObjectWithInfo(remote, item)
|
|
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
iErr = err
|
2016-06-25 22:23:20 +02:00
|
|
|
|
return true
|
2016-02-06 10:22:52 +01:00
|
|
|
|
}
|
2017-06-11 23:43:31 +02:00
|
|
|
|
entries = append(entries, o)
|
2016-02-06 10:22:52 +01:00
|
|
|
|
case len(item.ExportLinks) != 0:
|
|
|
|
|
// If item has export links then it is a google doc
|
2016-04-21 21:06:21 +02:00
|
|
|
|
extension, link := f.findExportFormat(remote, item)
|
2016-02-06 10:22:52 +01:00
|
|
|
|
if extension == "" {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(remote, "No export formats found")
|
2016-02-06 10:22:52 +01:00
|
|
|
|
} else {
|
2016-06-25 22:23:20 +02:00
|
|
|
|
o, err := f.newObjectWithInfo(remote+"."+extension, item)
|
|
|
|
|
if err != nil {
|
2017-06-11 23:43:31 +02:00
|
|
|
|
iErr = err
|
2016-06-25 22:23:20 +02:00
|
|
|
|
return true
|
|
|
|
|
}
|
2017-01-29 18:06:26 +01:00
|
|
|
|
if !*driveSkipGdocs {
|
|
|
|
|
obj := o.(*Object)
|
|
|
|
|
obj.isDocument = true
|
|
|
|
|
obj.url = link
|
|
|
|
|
obj.bytes = -1
|
2017-06-11 23:43:31 +02:00
|
|
|
|
entries = append(entries, o)
|
2017-01-29 18:06:26 +01:00
|
|
|
|
} else {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(f, "Skip google document: %q", remote)
|
2013-01-20 12:56:56 +01:00
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2016-02-06 10:22:52 +01:00
|
|
|
|
default:
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(remote, "Ignoring unknown object")
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2013-01-20 12:56:56 +01:00
|
|
|
|
return false
|
|
|
|
|
})
|
2017-06-11 23:43:31 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
if iErr != nil {
|
|
|
|
|
return nil, iErr
|
|
|
|
|
}
|
|
|
|
|
return entries, nil
|
2013-01-23 23:43:20 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-02-14 19:48:08 +01:00
|
|
|
|
// Creates a drive.File info from the parameters passed in and a half
|
2015-11-07 12:14:46 +01:00
|
|
|
|
// finished Object which must have setMetaData called on it
|
2013-01-15 00:38:18 +01:00
|
|
|
|
//
|
2015-02-14 19:48:08 +01:00
|
|
|
|
// Used to create new objects
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) createFileInfo(remote string, modTime time.Time, size int64) (*Object, *drive.File, error) {
|
|
|
|
|
// Temporary Object under construction
|
|
|
|
|
o := &Object{
|
|
|
|
|
fs: f,
|
2015-02-14 19:48:08 +01:00
|
|
|
|
remote: remote,
|
|
|
|
|
bytes: size,
|
|
|
|
|
}
|
2014-04-18 18:46:57 +02:00
|
|
|
|
|
2017-03-15 21:55:05 +01:00
|
|
|
|
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
2014-04-18 18:46:57 +02:00
|
|
|
|
if err != nil {
|
2015-09-03 22:25:55 +02:00
|
|
|
|
return nil, nil, err
|
2014-04-18 18:46:57 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Define the metadata for the file we are going to create.
|
2015-02-02 18:29:08 +01:00
|
|
|
|
createInfo := &drive.File{
|
2014-04-18 18:46:57 +02:00
|
|
|
|
Title: leaf,
|
|
|
|
|
Description: leaf,
|
2015-09-22 19:47:16 +02:00
|
|
|
|
Parents: []*drive.ParentReference{{Id: directoryID}},
|
2016-09-21 23:13:24 +02:00
|
|
|
|
MimeType: fs.MimeTypeFromName(remote),
|
2015-03-01 13:38:31 +01:00
|
|
|
|
ModifiedDate: modTime.Format(timeFormatOut),
|
2014-04-18 18:46:57 +02:00
|
|
|
|
}
|
2015-02-14 19:48:08 +01:00
|
|
|
|
return o, createInfo, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Put the object
|
|
|
|
|
//
|
|
|
|
|
// Copy the reader in to the new object which is returned
|
|
|
|
|
//
|
|
|
|
|
// The new object may have been created if an error is returned
|
2017-05-28 13:44:22 +02:00
|
|
|
|
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2016-06-25 22:23:20 +02:00
|
|
|
|
exisitingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
2016-06-12 16:06:27 +02:00
|
|
|
|
switch err {
|
|
|
|
|
case nil:
|
2017-07-05 23:16:07 +02:00
|
|
|
|
return exisitingObj, exisitingObj.Update(in, src, options...)
|
2016-06-25 22:23:20 +02:00
|
|
|
|
case fs.ErrorObjectNotFound:
|
2016-06-12 16:06:27 +02:00
|
|
|
|
// Not found so create it
|
2017-07-05 23:16:07 +02:00
|
|
|
|
return f.PutUnchecked(in, src, options...)
|
2016-06-12 16:06:27 +02:00
|
|
|
|
default:
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-03 21:42:35 +02:00
|
|
|
|
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
|
|
|
|
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
|
|
|
|
return f.Put(in, src, options...)
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-12 16:06:27 +02:00
|
|
|
|
// PutUnchecked uploads the object
|
|
|
|
|
//
|
|
|
|
|
// This will create a duplicate if we upload a new file without
|
|
|
|
|
// checking to see if there is one already - use Put() for that.
|
2017-05-28 13:44:22 +02:00
|
|
|
|
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
2016-02-18 12:35:25 +01:00
|
|
|
|
remote := src.Remote()
|
|
|
|
|
size := src.Size()
|
|
|
|
|
modTime := src.ModTime()
|
|
|
|
|
|
2015-02-14 19:48:08 +01:00
|
|
|
|
o, createInfo, err := f.createFileInfo(remote, modTime, size)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2014-04-18 18:46:57 +02:00
|
|
|
|
|
2015-02-02 18:29:08 +01:00
|
|
|
|
var info *drive.File
|
2015-03-14 18:55:38 +01:00
|
|
|
|
if size == 0 || size < int64(driveUploadCutoff) {
|
2015-03-02 10:05:23 +01:00
|
|
|
|
// Make the API request to upload metadata and file data.
|
|
|
|
|
// Don't retry, return a retry error instead
|
2015-09-11 20:18:41 +02:00
|
|
|
|
err = f.pacer.CallNoRetry(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = f.svc.Files.Insert(createInfo).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
2015-03-02 10:05:23 +01:00
|
|
|
|
if err != nil {
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return o, err
|
2015-03-02 10:05:23 +01:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Upload the file in chunks
|
|
|
|
|
info, err = f.Upload(in, size, createInfo.MimeType, createInfo, remote)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return o, err
|
|
|
|
|
}
|
2014-04-18 18:46:57 +02:00
|
|
|
|
}
|
|
|
|
|
o.setMetaData(info)
|
|
|
|
|
return o, nil
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-02 17:51:24 +02:00
|
|
|
|
// MergeDirs merges the contents of all the directories passed
|
|
|
|
|
// in into the first one and rmdirs the other directories.
|
|
|
|
|
func (f *Fs) MergeDirs(dirs []fs.Directory) error {
|
|
|
|
|
if len(dirs) < 2 {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
dstDir := dirs[0]
|
|
|
|
|
for _, srcDir := range dirs[1:] {
|
|
|
|
|
// list the the objects
|
|
|
|
|
infos := []*drive.File{}
|
|
|
|
|
_, err := f.list(srcDir.ID(), "", false, false, true, func(info *drive.File) bool {
|
|
|
|
|
infos = append(infos, info)
|
|
|
|
|
return false
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrapf(err, "MergeDirs list failed on %v", srcDir)
|
|
|
|
|
}
|
|
|
|
|
// move them into place
|
|
|
|
|
for _, info := range infos {
|
|
|
|
|
fs.Infof(srcDir, "merging %q", info.Title)
|
|
|
|
|
// Move the file into the destination
|
2017-08-31 18:48:24 +02:00
|
|
|
|
info.Parents = []*drive.ParentReference{{Id: dstDir.ID()}}
|
2017-08-02 17:51:24 +02:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-08-31 18:48:24 +02:00
|
|
|
|
_, err = f.svc.Files.Patch(info.Id, info).SetModifiedDate(true).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2017-08-02 17:51:24 +02:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return errors.Wrapf(err, "MergDirs move failed on %q in %v", info.Title, srcDir)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// rmdir (into trash) the now empty source directory
|
|
|
|
|
err = f.rmdir(srcDir.ID(), true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Infof(srcDir, "removing empty directory")
|
|
|
|
|
return errors.Wrapf(err, "MergDirs move failed to rmdir %q", srcDir)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// Mkdir creates the container if it doesn't exist
|
2016-11-25 22:52:43 +01:00
|
|
|
|
func (f *Fs) Mkdir(dir string) error {
|
|
|
|
|
err := f.dirCache.FindRoot(true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
if dir != "" {
|
|
|
|
|
_, err = f.dirCache.FindDir(dir, true)
|
|
|
|
|
}
|
|
|
|
|
return err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-08-02 17:51:24 +02:00
|
|
|
|
// Rmdir deletes a directory unconditionally by ID
|
|
|
|
|
func (f *Fs) rmdir(directoryID string, useTrash bool) error {
|
|
|
|
|
return f.pacer.Call(func() (bool, error) {
|
|
|
|
|
var err error
|
|
|
|
|
if useTrash {
|
|
|
|
|
_, err = f.svc.Files.Trash(directoryID).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
|
|
|
|
} else {
|
|
|
|
|
err = f.svc.Files.Delete(directoryID).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
|
|
|
|
}
|
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-16 13:29:37 +01:00
|
|
|
|
// Rmdir deletes a directory
|
2013-01-15 00:38:18 +01:00
|
|
|
|
//
|
|
|
|
|
// Returns an error if it isn't empty
|
2016-11-25 22:52:43 +01:00
|
|
|
|
func (f *Fs) Rmdir(dir string) error {
|
|
|
|
|
root := path.Join(f.root, dir)
|
|
|
|
|
dc := f.dirCache
|
2017-02-16 13:29:37 +01:00
|
|
|
|
directoryID, err := dc.FindDir(dir, false)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2017-02-16 13:29:37 +01:00
|
|
|
|
var trashedFiles = false
|
2017-06-11 23:43:31 +02:00
|
|
|
|
found, err := f.list(directoryID, "", false, false, true, func(item *drive.File) bool {
|
2017-02-16 13:29:37 +01:00
|
|
|
|
if item.Labels == nil || !item.Labels.Trashed {
|
|
|
|
|
fs.Debugf(dir, "Rmdir: contains file: %q", item.Title)
|
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
fs.Debugf(dir, "Rmdir: contains trashed file: %q", item.Title)
|
|
|
|
|
trashedFiles = true
|
|
|
|
|
return false
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2017-02-16 13:29:37 +01:00
|
|
|
|
if found {
|
|
|
|
|
return errors.Errorf("directory not empty")
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2016-11-25 22:52:43 +01:00
|
|
|
|
if root != "" {
|
2017-08-02 17:51:24 +02:00
|
|
|
|
// trash the directory if it had trashed files
|
|
|
|
|
// in or the user wants to trash, otherwise
|
|
|
|
|
// delete it.
|
|
|
|
|
err = f.rmdir(directoryID, trashedFiles || *driveUseTrash)
|
2013-01-18 18:01:47 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-11-25 22:52:43 +01:00
|
|
|
|
f.dirCache.FlushDir(dir)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2013-01-18 18:01:47 +01:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Precision of the object storage system
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Precision() time.Duration {
|
2013-01-19 00:21:02 +01:00
|
|
|
|
return time.Millisecond
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-14 19:48:08 +01:00
|
|
|
|
// Copy src to this remote using server side copy operations.
|
|
|
|
|
//
|
|
|
|
|
// This is stored with the remote path given
|
|
|
|
|
//
|
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantCopy
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
|
|
|
|
srcObj, ok := src.(*Object)
|
2015-02-14 19:48:08 +01:00
|
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(src, "Can't copy - not same remote type")
|
2015-02-14 19:48:08 +01:00
|
|
|
|
return nil, fs.ErrorCantCopy
|
|
|
|
|
}
|
2016-02-28 10:35:28 +01:00
|
|
|
|
if srcObj.isDocument {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.New("can't copy a Google document")
|
2016-02-28 10:35:28 +01:00
|
|
|
|
}
|
2015-02-14 19:48:08 +01:00
|
|
|
|
|
|
|
|
|
o, createInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
var info *drive.File
|
2015-11-07 12:14:46 +01:00
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = o.fs.svc.Files.Copy(srcObj.id, createInfo).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-14 19:48:08 +01:00
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
o.setMetaData(info)
|
|
|
|
|
return o, nil
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-18 18:01:47 +01:00
|
|
|
|
// Purge deletes all the files and the container
|
|
|
|
|
//
|
2014-07-13 10:30:14 +02:00
|
|
|
|
// Optional interface: Only implement this if you have a way of
|
|
|
|
|
// deleting all the files quicker than just running Remove() on the
|
|
|
|
|
// result of List()
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Purge() error {
|
2013-01-18 18:01:47 +01:00
|
|
|
|
if f.root == "" {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return errors.New("can't purge root directory")
|
2013-01-18 18:01:47 +01:00
|
|
|
|
}
|
2015-09-03 22:25:55 +02:00
|
|
|
|
err := f.dirCache.FindRoot(false)
|
2013-01-18 18:01:47 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2015-09-11 20:18:41 +02:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2015-08-16 15:49:58 +02:00
|
|
|
|
if *driveUseTrash {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
_, err = f.svc.Files.Trash(f.dirCache.RootID()).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2015-08-16 15:49:58 +02:00
|
|
|
|
} else {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
err = f.svc.Files.Delete(f.dirCache.RootID()).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2015-08-16 15:49:58 +02:00
|
|
|
|
}
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2015-09-03 22:25:55 +02:00
|
|
|
|
f.dirCache.ResetRoot()
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-09-01 19:54:14 +02:00
|
|
|
|
// CleanUp empties the trash
|
|
|
|
|
func (f *Fs) CleanUp() error {
|
|
|
|
|
err := f.pacer.Call(func() (bool, error) {
|
|
|
|
|
err := f.svc.Files.EmptyTrash().Do()
|
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-31 22:05:51 +02:00
|
|
|
|
// Move src to this remote using server side move operations.
|
|
|
|
|
//
|
|
|
|
|
// This is stored with the remote path given
|
|
|
|
|
//
|
|
|
|
|
// It returns the destination Object and a possible error
|
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantMove
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
|
|
|
|
srcObj, ok := src.(*Object)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(src, "Can't move - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return nil, fs.ErrorCantMove
|
|
|
|
|
}
|
2016-02-28 10:35:28 +01:00
|
|
|
|
if srcObj.isDocument {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.New("can't move a Google document")
|
2016-02-28 10:35:28 +01:00
|
|
|
|
}
|
2015-08-31 22:05:51 +02:00
|
|
|
|
|
2016-06-25 22:58:34 +02:00
|
|
|
|
// Temporary Object under construction
|
2015-09-22 19:47:16 +02:00
|
|
|
|
dstObj, dstInfo, err := f.createFileInfo(remote, srcObj.ModTime(), srcObj.bytes)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Do the move
|
2016-03-06 18:36:05 +01:00
|
|
|
|
var info *drive.File
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = f.svc.Files.Patch(srcObj.id, dstInfo).SetModifiedDate(true).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2016-03-06 18:36:05 +01:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dstObj.setMetaData(info)
|
|
|
|
|
return dstObj, nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
|
// DirMove moves src, srcRemote to this remote at dstRemote
|
|
|
|
|
// using server side move operations.
|
2015-08-31 22:05:51 +02:00
|
|
|
|
//
|
|
|
|
|
// Will only be called if src.Fs().Name() == f.Name()
|
|
|
|
|
//
|
|
|
|
|
// If it isn't possible then return fs.ErrorCantDirMove
|
|
|
|
|
//
|
|
|
|
|
// If destination exists then return fs.ErrorDirExists
|
2017-02-05 22:20:56 +01:00
|
|
|
|
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
2015-11-07 12:14:46 +01:00
|
|
|
|
srcFs, ok := src.(*Fs)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if !ok {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return fs.ErrorCantDirMove
|
|
|
|
|
}
|
2017-02-05 22:20:56 +01:00
|
|
|
|
srcPath := path.Join(srcFs.root, srcRemote)
|
|
|
|
|
dstPath := path.Join(f.root, dstRemote)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
|
2016-12-09 16:41:09 +01:00
|
|
|
|
// Refuse to move to or from the root
|
2017-02-05 22:20:56 +01:00
|
|
|
|
if srcPath == "" || dstPath == "" {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Debugf(src, "DirMove error: Can't move root")
|
2016-12-09 16:41:09 +01:00
|
|
|
|
return errors.New("can't move root directory")
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-05 22:20:56 +01:00
|
|
|
|
// find the root src directory
|
|
|
|
|
err := srcFs.dirCache.FindRoot(false)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// find the root dst directory
|
|
|
|
|
if dstRemote != "" {
|
|
|
|
|
err = f.dirCache.FindRoot(true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if f.dirCache.FoundRoot() {
|
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find ID of dst parent, creating subdirs if necessary
|
|
|
|
|
var leaf, directoryID string
|
|
|
|
|
findPath := dstRemote
|
|
|
|
|
if dstRemote == "" {
|
|
|
|
|
findPath = f.root
|
|
|
|
|
}
|
|
|
|
|
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check destination does not exist
|
|
|
|
|
if dstRemote != "" {
|
|
|
|
|
_, err = f.dirCache.FindDir(dstRemote, false)
|
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
|
// OK
|
|
|
|
|
} else if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
} else {
|
|
|
|
|
return fs.ErrorDirExists
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Find ID of src
|
|
|
|
|
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if err != nil {
|
2015-09-03 22:25:55 +02:00
|
|
|
|
return err
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Do the move
|
|
|
|
|
patch := drive.File{
|
|
|
|
|
Title: leaf,
|
2015-09-22 19:47:16 +02:00
|
|
|
|
Parents: []*drive.ParentReference{{Id: directoryID}},
|
2015-08-31 22:05:51 +02:00
|
|
|
|
}
|
2016-03-06 18:36:05 +01:00
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
_, err = f.svc.Files.Patch(srcID, &patch).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(f.isTeamDrive).Do()
|
2016-03-06 18:36:05 +01:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
2015-08-31 22:05:51 +02:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2017-02-05 22:20:56 +01:00
|
|
|
|
srcFs.dirCache.FlushDir(srcRemote)
|
2015-08-31 22:05:51 +02:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-05-25 23:05:49 +02:00
|
|
|
|
// DirChangeNotify polls for changes from the remote and hands the path to the
|
|
|
|
|
// given function. Only changes that can be resolved to a path through the
|
|
|
|
|
// DirCache will handled.
|
|
|
|
|
//
|
|
|
|
|
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
|
|
|
|
//
|
|
|
|
|
// Close the returned channel to stop being notified.
|
|
|
|
|
func (f *Fs) DirChangeNotify(notifyFunc func(string), pollInterval time.Duration) chan bool {
|
|
|
|
|
quit := make(chan bool)
|
|
|
|
|
go func() {
|
|
|
|
|
select {
|
|
|
|
|
case <-quit:
|
|
|
|
|
return
|
|
|
|
|
default:
|
|
|
|
|
for {
|
|
|
|
|
f.dirchangeNotifyRunner(notifyFunc, pollInterval)
|
|
|
|
|
fs.Debugf(f, "Notify listener service ran into issues, restarting shortly.")
|
|
|
|
|
time.Sleep(pollInterval)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
|
return quit
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (f *Fs) dirchangeNotifyRunner(notifyFunc func(string), pollInterval time.Duration) {
|
|
|
|
|
var err error
|
|
|
|
|
var changeList *drive.ChangeList
|
|
|
|
|
var pageToken string
|
|
|
|
|
var largestChangeID int64
|
|
|
|
|
|
|
|
|
|
var startPageToken *drive.StartPageToken
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
startPageToken, err = f.svc.Changes.GetStartPageToken().SupportsTeamDrives(f.isTeamDrive).Do()
|
2017-05-25 23:05:49 +02:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Debugf(f, "Failed to get StartPageToken: %v", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
pageToken = startPageToken.StartPageToken
|
|
|
|
|
|
|
|
|
|
for {
|
|
|
|
|
fs.Debugf(f, "Checking for changes on remote")
|
|
|
|
|
err = f.pacer.Call(func() (bool, error) {
|
|
|
|
|
changesCall := f.svc.Changes.List().PageToken(pageToken).Fields(googleapi.Field("nextPageToken,largestChangeId,newStartPageToken,items(fileId,file/parents(id))"))
|
|
|
|
|
if largestChangeID != 0 {
|
|
|
|
|
changesCall = changesCall.StartChangeId(largestChangeID)
|
|
|
|
|
}
|
|
|
|
|
if *driveListChunk > 0 {
|
|
|
|
|
changesCall = changesCall.MaxResults(*driveListChunk)
|
|
|
|
|
}
|
2017-06-01 21:12:11 +02:00
|
|
|
|
changeList, err = changesCall.SupportsTeamDrives(f.isTeamDrive).Do()
|
2017-05-25 23:05:49 +02:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
|
|
|
|
if err != nil {
|
|
|
|
|
fs.Debugf(f, "Failed to get Changes: %v", err)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pathsToClear := make([]string, 0)
|
|
|
|
|
for _, change := range changeList.Items {
|
|
|
|
|
if path, ok := f.dirCache.GetInv(change.FileId); ok {
|
|
|
|
|
pathsToClear = append(pathsToClear, path)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if change.File != nil {
|
|
|
|
|
for _, parent := range change.File.Parents {
|
|
|
|
|
if path, ok := f.dirCache.GetInv(parent.Id); ok {
|
|
|
|
|
pathsToClear = append(pathsToClear, path)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
lastNotifiedPath := ""
|
|
|
|
|
sort.Strings(pathsToClear)
|
|
|
|
|
for _, path := range pathsToClear {
|
|
|
|
|
if lastNotifiedPath != "" && (path == lastNotifiedPath || strings.HasPrefix(path+"/", lastNotifiedPath)) {
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
lastNotifiedPath = path
|
|
|
|
|
notifyFunc(path)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if changeList.LargestChangeId != 0 {
|
|
|
|
|
largestChangeID = changeList.LargestChangeId
|
|
|
|
|
}
|
|
|
|
|
if changeList.NewStartPageToken != "" {
|
|
|
|
|
pageToken = changeList.NewStartPageToken
|
|
|
|
|
fs.Debugf(f, "All changes were processed. Waiting for more.")
|
|
|
|
|
time.Sleep(pollInterval)
|
|
|
|
|
} else if changeList.NextPageToken != "" {
|
|
|
|
|
pageToken = changeList.NextPageToken
|
|
|
|
|
fs.Debugf(f, "There are more changes pending, checking now.")
|
|
|
|
|
} else {
|
|
|
|
|
fs.Debugf(f, "Did not get any page token, something went wrong! %+v", changeList)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-09 16:39:29 +01:00
|
|
|
|
// DirCacheFlush resets the directory cache - used in testing as an
|
|
|
|
|
// optional interface
|
|
|
|
|
func (f *Fs) DirCacheFlush() {
|
|
|
|
|
f.dirCache.ResetRoot()
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// Hashes returns the supported hash sets.
|
2018-01-12 17:30:54 +01:00
|
|
|
|
func (f *Fs) Hashes() hash.Set {
|
|
|
|
|
return hash.Set(hash.HashMD5)
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// ------------------------------------------------------------
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Fs returns the parent Fs
|
2016-02-18 12:35:25 +01:00
|
|
|
|
func (o *Object) Fs() fs.Info {
|
2015-11-07 12:14:46 +01:00
|
|
|
|
return o.fs
|
2014-03-28 18:56:04 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Return a string version
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) String() string {
|
2014-03-28 18:56:04 +01:00
|
|
|
|
if o == nil {
|
|
|
|
|
return "<nil>"
|
|
|
|
|
}
|
|
|
|
|
return o.remote
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Remote returns the remote path
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Remote() string {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
return o.remote
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-11 13:39:33 +01:00
|
|
|
|
// Hash returns the Md5sum of an object returning a lowercase hex string
|
2018-01-12 17:30:54 +01:00
|
|
|
|
func (o *Object) Hash(t hash.Type) (string, error) {
|
|
|
|
|
if t != hash.HashMD5 {
|
|
|
|
|
return "", hash.ErrHashUnsupported
|
2016-01-11 13:39:33 +01:00
|
|
|
|
}
|
2013-06-27 21:13:07 +02:00
|
|
|
|
return o.md5sum, nil
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Size returns the size of an object in bytes
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Size() int64 {
|
2016-01-26 17:52:53 +01:00
|
|
|
|
if o.isDocument && o.bytes < 0 {
|
|
|
|
|
// If it is a google doc then we must HEAD it to see
|
|
|
|
|
// how big it is
|
2016-09-10 12:29:57 +02:00
|
|
|
|
_, res, err := o.httpResponse("HEAD", nil)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
if err != nil {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
fs.Errorf(o, "Error reading size: %v", err)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
return 0
|
|
|
|
|
}
|
|
|
|
|
_ = res.Body.Close()
|
|
|
|
|
o.bytes = res.ContentLength
|
2017-02-09 12:01:20 +01:00
|
|
|
|
// fs.Debugf(o, "Read size of document: %v", o.bytes)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
2013-06-27 21:13:07 +02:00
|
|
|
|
return o.bytes
|
2013-01-19 11:11:55 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// setMetaData sets the fs data from a drive.File
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) setMetaData(info *drive.File) {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
o.id = info.Id
|
|
|
|
|
o.url = info.DownloadUrl
|
|
|
|
|
o.md5sum = strings.ToLower(info.Md5Checksum)
|
|
|
|
|
o.bytes = info.FileSize
|
|
|
|
|
o.modifiedDate = info.ModifiedDate
|
2016-09-21 23:13:24 +02:00
|
|
|
|
o.mimeType = info.MimeType
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// readMetaData gets the info if it hasn't already been fetched
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) readMetaData() (err error) {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
if o.id != "" {
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2017-03-15 21:55:05 +01:00
|
|
|
|
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-06-25 22:23:20 +02:00
|
|
|
|
if err == fs.ErrorDirNotFound {
|
|
|
|
|
return fs.ErrorObjectNotFound
|
|
|
|
|
}
|
2015-09-03 22:25:55 +02:00
|
|
|
|
return err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2017-06-11 23:43:31 +02:00
|
|
|
|
found, err := o.fs.list(directoryID, leaf, false, true, false, func(item *drive.File) bool {
|
2013-01-20 12:56:56 +01:00
|
|
|
|
if item.Title == leaf {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
o.setMetaData(item)
|
2013-01-20 12:56:56 +01:00
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
return false
|
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2013-01-20 12:56:56 +01:00
|
|
|
|
if !found {
|
2016-06-25 22:23:20 +02:00
|
|
|
|
return fs.ErrorObjectNotFound
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2013-01-20 12:56:56 +01:00
|
|
|
|
return nil
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ModTime returns the modification time of the object
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// It attempts to read the objects mtime and if that isn't present the
|
|
|
|
|
// LastModified returned in the http headers
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) ModTime() time.Time {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
err := o.readMetaData()
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2017-02-09 18:08:51 +01:00
|
|
|
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return time.Now()
|
|
|
|
|
}
|
2014-07-29 18:50:07 +02:00
|
|
|
|
modTime, err := time.Parse(timeFormatIn, o.modifiedDate)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2017-02-09 18:08:51 +01:00
|
|
|
|
fs.Debugf(o, "Failed to read mtime from object: %v", err)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return time.Now()
|
|
|
|
|
}
|
|
|
|
|
return modTime
|
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// SetModTime sets the modification time of the drive fs object
|
2016-03-22 16:07:10 +01:00
|
|
|
|
func (o *Object) SetModTime(modTime time.Time) error {
|
2013-06-27 21:13:07 +02:00
|
|
|
|
err := o.readMetaData()
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-03-22 16:07:10 +01:00
|
|
|
|
return err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2013-01-19 11:11:55 +01:00
|
|
|
|
// New metadata
|
2015-02-02 18:29:08 +01:00
|
|
|
|
updateInfo := &drive.File{
|
2014-07-29 18:50:07 +02:00
|
|
|
|
ModifiedDate: modTime.Format(timeFormatOut),
|
2013-01-19 11:11:55 +01:00
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// Set modified date
|
2015-02-02 18:29:08 +01:00
|
|
|
|
var info *drive.File
|
2015-11-07 12:14:46 +01:00
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = o.fs.svc.Files.Update(o.id, updateInfo).SetModifiedDate(true).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-03-22 16:07:10 +01:00
|
|
|
|
return err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2015-01-05 00:19:59 +01:00
|
|
|
|
// Update info from read data
|
|
|
|
|
o.setMetaData(info)
|
2016-03-22 16:07:10 +01:00
|
|
|
|
return nil
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-09-22 19:47:16 +02:00
|
|
|
|
// Storable returns a boolean as to whether this object is storable
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Storable() bool {
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return true
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-26 17:52:53 +01:00
|
|
|
|
// httpResponse gets an http.Response object for the object o.url
|
|
|
|
|
// using the method passed in
|
2016-09-10 12:29:57 +02:00
|
|
|
|
func (o *Object) httpResponse(method string, options []fs.OpenOption) (req *http.Request, res *http.Response, err error) {
|
2015-08-16 15:11:21 +02:00
|
|
|
|
if o.url == "" {
|
2016-09-10 12:29:57 +02:00
|
|
|
|
return nil, nil, errors.New("forbidden to download - check sharing permission")
|
2015-08-16 15:11:21 +02:00
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
req, err = http.NewRequest(method, o.url, nil)
|
2014-07-15 12:15:48 +02:00
|
|
|
|
if err != nil {
|
2016-09-10 12:29:57 +02:00
|
|
|
|
return req, nil, err
|
2014-07-15 12:15:48 +02:00
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
2015-11-07 12:14:46 +01:00
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
|
|
|
|
res, err = o.fs.client.Do(req)
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
2013-01-15 00:38:18 +01:00
|
|
|
|
if err != nil {
|
2016-09-10 12:29:57 +02:00
|
|
|
|
return req, nil, err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
return req, res, nil
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// openFile represents an Object open for reading
|
|
|
|
|
type openFile struct {
|
|
|
|
|
o *Object // Object we are reading for
|
|
|
|
|
in io.ReadCloser // reading from here
|
|
|
|
|
bytes int64 // number of bytes read on this connection
|
|
|
|
|
eof bool // whether we have read end of file
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
|
func (file *openFile) Read(p []byte) (n int, err error) {
|
|
|
|
|
n, err = file.in.Read(p)
|
|
|
|
|
file.bytes += int64(n)
|
|
|
|
|
if err == io.EOF {
|
|
|
|
|
file.eof = true
|
|
|
|
|
}
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Close the object and update bytes read
|
|
|
|
|
func (file *openFile) Close() (err error) {
|
|
|
|
|
// If end of file, update bytes read
|
|
|
|
|
if file.eof {
|
2017-02-09 12:01:20 +01:00
|
|
|
|
// fs.Debugf(file.o, "Updating size of doc after download to %v", file.bytes)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
file.o.bytes = file.bytes
|
|
|
|
|
}
|
|
|
|
|
return file.in.Close()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check it satisfies the interfaces
|
|
|
|
|
var _ io.ReadCloser = &openFile{}
|
|
|
|
|
|
|
|
|
|
// Open an object for read
|
2016-09-10 12:29:57 +02:00
|
|
|
|
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
|
|
|
|
req, res, err := o.httpResponse("GET", options)
|
2016-01-26 17:52:53 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
2016-09-10 12:29:57 +02:00
|
|
|
|
_, isRanging := req.Header["Range"]
|
|
|
|
|
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
2014-07-25 19:19:49 +02:00
|
|
|
|
_ = res.Body.Close() // ignore error
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
2016-01-26 17:52:53 +01:00
|
|
|
|
// If it is a document, update the size with what we are
|
|
|
|
|
// reading as it can change from the HEAD in the listing to
|
|
|
|
|
// this GET. This stops rclone marking the transfer as
|
|
|
|
|
// corrupted.
|
|
|
|
|
if o.isDocument {
|
|
|
|
|
return &openFile{o: o, in: res.Body}, nil
|
|
|
|
|
}
|
2013-01-15 00:38:18 +01:00
|
|
|
|
return res.Body, nil
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 18:46:57 +02:00
|
|
|
|
// Update the already existing object
|
2014-04-18 18:04:21 +02:00
|
|
|
|
//
|
|
|
|
|
// Copy the reader into the object updating modTime and size
|
|
|
|
|
//
|
|
|
|
|
// The new object may have been created if an error is returned
|
2017-05-28 13:44:22 +02:00
|
|
|
|
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
2016-02-18 12:35:25 +01:00
|
|
|
|
size := src.Size()
|
|
|
|
|
modTime := src.ModTime()
|
2016-01-26 17:52:53 +01:00
|
|
|
|
if o.isDocument {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return errors.New("can't update a google document")
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
2015-02-02 18:29:08 +01:00
|
|
|
|
updateInfo := &drive.File{
|
2014-04-18 18:46:57 +02:00
|
|
|
|
Id: o.id,
|
2016-09-21 23:13:24 +02:00
|
|
|
|
MimeType: fs.MimeType(src),
|
2014-07-29 18:50:07 +02:00
|
|
|
|
ModifiedDate: modTime.Format(timeFormatOut),
|
2014-04-18 18:04:21 +02:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-18 18:46:57 +02:00
|
|
|
|
// Make the API request to upload metadata and file data.
|
2015-02-02 18:29:08 +01:00
|
|
|
|
var err error
|
|
|
|
|
var info *drive.File
|
2015-03-14 18:55:38 +01:00
|
|
|
|
if size == 0 || size < int64(driveUploadCutoff) {
|
2015-03-02 10:05:23 +01:00
|
|
|
|
// Don't retry, return a retry error instead
|
2015-11-07 12:14:46 +01:00
|
|
|
|
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
info, err = o.fs.svc.Files.Update(updateInfo.Id, updateInfo).SetModifiedDate(true).Media(in, googleapi.ContentType("")).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
|
|
|
|
})
|
2015-03-02 10:05:23 +01:00
|
|
|
|
if err != nil {
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return err
|
2015-03-02 10:05:23 +01:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// Upload the file in chunks
|
2016-09-21 23:13:24 +02:00
|
|
|
|
info, err = o.fs.Upload(in, size, updateInfo.MimeType, updateInfo, o.remote)
|
2015-03-02 10:05:23 +01:00
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
2014-04-18 18:04:21 +02:00
|
|
|
|
}
|
|
|
|
|
o.setMetaData(info)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// Remove an object
|
2015-11-07 12:14:46 +01:00
|
|
|
|
func (o *Object) Remove() error {
|
2016-01-26 17:52:53 +01:00
|
|
|
|
if o.isDocument {
|
2016-06-12 16:06:02 +02:00
|
|
|
|
return errors.New("can't delete a google document")
|
2016-01-26 17:52:53 +01:00
|
|
|
|
}
|
2015-02-02 18:29:08 +01:00
|
|
|
|
var err error
|
2015-11-07 12:14:46 +01:00
|
|
|
|
err = o.fs.pacer.Call(func() (bool, error) {
|
2015-08-16 15:49:58 +02:00
|
|
|
|
if *driveUseTrash {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
_, err = o.fs.svc.Files.Trash(o.id).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
2015-08-16 15:49:58 +02:00
|
|
|
|
} else {
|
2017-06-01 21:12:11 +02:00
|
|
|
|
err = o.fs.svc.Files.Delete(o.id).Fields(googleapi.Field(partialFields)).SupportsTeamDrives(o.fs.isTeamDrive).Do()
|
2015-08-16 15:49:58 +02:00
|
|
|
|
}
|
2015-09-11 20:18:41 +02:00
|
|
|
|
return shouldRetry(err)
|
2015-02-02 18:29:08 +01:00
|
|
|
|
})
|
|
|
|
|
return err
|
2013-01-15 00:38:18 +01:00
|
|
|
|
}
|
|
|
|
|
|
2016-09-21 23:13:24 +02:00
|
|
|
|
// MimeType of an Object if known, "" otherwise
|
|
|
|
|
func (o *Object) MimeType() string {
|
|
|
|
|
err := o.readMetaData()
|
|
|
|
|
if err != nil {
|
2017-02-09 18:08:51 +01:00
|
|
|
|
fs.Debugf(o, "Failed to read metadata: %v", err)
|
2016-09-21 23:13:24 +02:00
|
|
|
|
return ""
|
|
|
|
|
}
|
|
|
|
|
return o.mimeType
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-15 00:38:18 +01:00
|
|
|
|
// Check the interfaces are satisfied
|
2015-08-31 22:05:51 +02:00
|
|
|
|
var (
|
2017-05-25 23:05:49 +02:00
|
|
|
|
_ fs.Fs = (*Fs)(nil)
|
|
|
|
|
_ fs.Purger = (*Fs)(nil)
|
2017-09-30 17:33:39 +02:00
|
|
|
|
_ fs.CleanUpper = (*Fs)(nil)
|
2017-08-03 21:42:35 +02:00
|
|
|
|
_ fs.PutStreamer = (*Fs)(nil)
|
2017-05-25 23:05:49 +02:00
|
|
|
|
_ fs.Copier = (*Fs)(nil)
|
|
|
|
|
_ fs.Mover = (*Fs)(nil)
|
|
|
|
|
_ fs.DirMover = (*Fs)(nil)
|
|
|
|
|
_ fs.DirCacheFlusher = (*Fs)(nil)
|
|
|
|
|
_ fs.DirChangeNotifier = (*Fs)(nil)
|
|
|
|
|
_ fs.PutUncheckeder = (*Fs)(nil)
|
2017-08-02 17:51:24 +02:00
|
|
|
|
_ fs.MergeDirser = (*Fs)(nil)
|
2017-05-25 23:05:49 +02:00
|
|
|
|
_ fs.Object = (*Object)(nil)
|
|
|
|
|
_ fs.MimeTyper = &Object{}
|
2015-08-31 22:05:51 +02:00
|
|
|
|
)
|