2015-09-22 19:47:16 +02:00
// Package drive interfaces with the Google Drive object storage system
2013-06-27 21:13:07 +02:00
package drive
2013-01-15 00:38:18 +01:00
// FIXME need to deal with some corner cases
// * multiple files with the same name
// * files can be in multiple directories
// * can have directory loops
2013-01-20 12:56:56 +01:00
// * files with / in name
2013-01-15 00:38:18 +01:00
import (
2018-07-26 15:45:41 +02:00
"bytes"
2019-06-17 10:34:30 +02:00
"context"
2019-10-16 12:22:25 +02:00
"crypto/tls"
2021-11-04 11:12:57 +01:00
"errors"
2013-01-15 00:38:18 +01:00
"fmt"
"io"
2018-02-07 12:53:46 +01:00
"mime"
2013-01-15 00:38:18 +01:00
"net/http"
2022-08-20 16:38:02 +02:00
"os"
2016-11-25 22:52:43 +01:00
"path"
2019-02-07 16:59:00 +01:00
"sort"
2017-07-06 16:32:57 +02:00
"strconv"
2013-01-15 00:38:18 +01:00
"strings"
2018-01-31 21:03:02 +01:00
"sync"
2020-05-30 13:08:22 +02:00
"sync/atomic"
2018-08-21 12:51:36 +02:00
"text/template"
2013-01-15 00:38:18 +01:00
"time"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs"
2020-05-07 19:35:39 +02:00
"github.com/rclone/rclone/fs/cache"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
2020-12-02 17:20:58 +01:00
"github.com/rclone/rclone/fs/filter"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
2020-09-15 14:32:08 +02:00
"github.com/rclone/rclone/fs/fspath"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/hash"
2020-08-06 16:24:28 +02:00
"github.com/rclone/rclone/fs/operations"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/dircache"
2020-01-14 18:33:35 +01:00
"github.com/rclone/rclone/lib/encoder"
2020-06-02 12:54:52 +02:00
"github.com/rclone/rclone/lib/env"
2019-07-28 19:47:38 +02:00
"github.com/rclone/rclone/lib/oauthutil"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
2016-06-12 16:06:27 +02:00
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
2018-09-18 12:29:05 +02:00
drive_v2 "google.golang.org/api/drive/v2"
2018-03-28 20:33:39 +02:00
drive "google.golang.org/api/drive/v3"
2016-06-12 16:06:27 +02:00
"google.golang.org/api/googleapi"
2022-06-24 21:45:38 +02:00
"google.golang.org/api/option"
2014-03-15 17:06:11 +01:00
)
2013-06-29 13:15:31 +02:00
2014-03-16 15:01:17 +01:00
// Constants
const (
2016-02-28 20:57:19 +01:00
rcloneClientID = "202264815644.apps.googleusercontent.com"
2016-08-14 13:04:43 +02:00
rcloneEncryptedClientSecret = "eX8GpZTVx3vxMWVkuuBdDWmAUE6rGhTwVrvG9GhllYccSdj2-mvHVg"
2016-02-28 20:57:19 +01:00
driveFolderType = "application/vnd.google-apps.folder"
2020-04-12 17:55:11 +02:00
shortcutMimeType = "application/vnd.google-apps.shortcut"
2020-06-27 12:10:09 +02:00
shortcutMimeTypeDangling = "application/vnd.google-apps.shortcut.dangling" // synthetic mime type for internal use
2016-02-28 20:57:19 +01:00
timeFormatIn = time . RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
2018-12-30 19:05:34 +01:00
defaultMinSleep = fs . Duration ( 100 * time . Millisecond )
2019-01-20 09:36:22 +01:00
defaultBurst = 100
2018-08-19 16:16:11 +02:00
defaultExportExtensions = "docx,xlsx,pptx,svg"
2018-01-23 14:36:20 +01:00
scopePrefix = "https://www.googleapis.com/auth/"
defaultScope = "drive"
2018-05-14 19:06:57 +02:00
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
2021-07-13 00:29:31 +02:00
minChunkSize = fs . SizeSuffix ( googleapi . MinUploadChunkSize )
2021-03-02 20:11:57 +01:00
defaultChunkSize = 8 * fs . Mebi
2022-05-16 18:20:48 +02:00
partialFields = "id,name,size,md5Checksum,trashed,explicitlyTrashed,modifiedTime,createdTime,mimeType,parents,webViewLink,shortcutDetails,exportLinks,resourceKey"
2020-05-30 13:08:22 +02:00
listRGrouping = 50 // number of IDs to search at once when using ListR
listRInputBuffer = 1000 // size of input buffer when using ListR
2020-09-24 15:53:42 +02:00
defaultXDGIcon = "text-html"
2014-03-16 15:01:17 +01:00
)
// Globals
var (
2014-07-13 18:53:11 +02:00
// Description of how to auth for this app
2015-08-18 09:55:09 +02:00
driveConfig = & oauth2 . Config {
2018-01-23 14:36:20 +01:00
Scopes : [ ] string { scopePrefix + "drive" } ,
2015-08-18 09:55:09 +02:00
Endpoint : google . Endpoint ,
ClientID : rcloneClientID ,
2018-01-18 21:19:55 +01:00
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2022-02-18 13:46:30 +01:00
RedirectURL : oauthutil . RedirectURL ,
2014-07-13 18:53:11 +02:00
}
2018-02-07 12:53:46 +01:00
_mimeTypeToExtensionDuplicates = map [ string ] string {
"application/x-vnd.oasis.opendocument.presentation" : ".odp" ,
"application/x-vnd.oasis.opendocument.spreadsheet" : ".ods" ,
"application/x-vnd.oasis.opendocument.text" : ".odt" ,
"image/jpg" : ".jpg" ,
"image/x-bmp" : ".bmp" ,
"image/x-png" : ".png" ,
"text/rtf" : ".rtf" ,
}
_mimeTypeToExtension = map [ string ] string {
"application/epub+zip" : ".epub" ,
"application/json" : ".json" ,
"application/msword" : ".doc" ,
"application/pdf" : ".pdf" ,
"application/rtf" : ".rtf" ,
"application/vnd.ms-excel" : ".xls" ,
"application/vnd.oasis.opendocument.presentation" : ".odp" ,
"application/vnd.oasis.opendocument.spreadsheet" : ".ods" ,
"application/vnd.oasis.opendocument.text" : ".odt" ,
"application/vnd.openxmlformats-officedocument.presentationml.presentation" : ".pptx" ,
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" : ".xlsx" ,
"application/vnd.openxmlformats-officedocument.wordprocessingml.document" : ".docx" ,
"application/x-msmetafile" : ".wmf" ,
"application/zip" : ".zip" ,
"image/bmp" : ".bmp" ,
"image/jpeg" : ".jpg" ,
"image/pjpeg" : ".pjpeg" ,
"image/png" : ".png" ,
"image/svg+xml" : ".svg" ,
"text/csv" : ".csv" ,
"text/html" : ".html" ,
"text/plain" : ".txt" ,
"text/tab-separated-values" : ".tsv" ,
2016-01-26 17:52:53 +01:00
}
2018-08-21 12:51:36 +02:00
_mimeTypeToExtensionLinks = map [ string ] string {
"application/x-link-desktop" : ".desktop" ,
"application/x-link-html" : ".link.html" ,
"application/x-link-url" : ".url" ,
"application/x-link-webloc" : ".webloc" ,
}
2018-08-30 18:03:48 +02:00
_mimeTypeCustomTransform = map [ string ] string {
"application/vnd.google-apps.script+json" : "application/json" ,
}
2020-09-24 15:53:42 +02:00
_mimeTypeToXDGLinkIcons = map [ string ] string {
"application/vnd.google-apps.document" : "x-office-document" ,
"application/vnd.google-apps.drawing" : "x-office-drawing" ,
"application/vnd.google-apps.presentation" : "x-office-presentation" ,
"application/vnd.google-apps.spreadsheet" : "x-office-spreadsheet" ,
}
2018-08-21 12:51:36 +02:00
fetchFormatsOnce sync . Once // make sure we fetch the export/import formats only once
_exportFormats map [ string ] [ ] string // allowed export MIME type conversions
_importFormats map [ string ] [ ] string // allowed import MIME type conversions
templatesOnce sync . Once // parse link templates only once
_linkTemplates map [ string ] * template . Template // available link types
2014-03-16 15:01:17 +01:00
)
2019-01-05 21:53:42 +01:00
// Parse the scopes option returning a slice of scopes
func driveScopes ( scopesString string ) ( scopes [ ] string ) {
if scopesString == "" {
scopesString = defaultScope
}
for _ , scope := range strings . Split ( scopesString , "," ) {
scope = strings . TrimSpace ( scope )
scopes = append ( scopes , scopePrefix + scope )
}
return scopes
}
// Returns true if one of the scopes was "drive.appfolder"
func driveScopesContainsAppFolder ( scopes [ ] string ) bool {
for _ , scope := range scopes {
if scope == scopePrefix + "drive.appfolder" {
return true
}
}
return false
}
2020-09-07 02:55:55 +02:00
func driveOAuthOptions ( ) [ ] fs . Option {
opts := [ ] fs . Option { }
for _ , opt := range oauthutil . SharedOptions {
if opt . Name == config . ConfigClientID {
opt . Help = "Google Application Client Id\nSetting your own is recommended.\nSee https://rclone.org/drive/#making-your-own-client-id for how to create your own.\nIf you leave this blank, it will use an internal key which is low performance."
}
opts = append ( opts , opt )
}
return opts
}
2013-06-29 13:15:31 +02:00
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "drive" ,
Description : "Google Drive" ,
NewFs : NewFs ,
2020-04-29 19:54:16 +02:00
CommandHelp : commandHelp ,
2021-04-29 10:28:18 +02:00
Config : func ( ctx context . Context , name string , m configmap . Mapper , config fs . ConfigIn ) ( * fs . ConfigOut , error ) {
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "couldn't parse config into struct: %w" , err )
2018-05-14 19:06:57 +02:00
}
2019-01-05 21:53:42 +01:00
2021-04-29 10:28:18 +02:00
switch config . State {
case "" :
// Fill in the scopes
driveConfig . Scopes = driveScopes ( opt . Scope )
// Set the root_folder_id if using drive.appfolder
if driveScopesContainsAppFolder ( driveConfig . Scopes ) {
m . Set ( "root_folder_id" , "appDataFolder" )
}
2019-01-05 21:53:42 +01:00
2021-04-29 10:28:18 +02:00
if opt . ServiceAccountFile == "" && opt . ServiceAccountCredentials == "" {
return oauthutil . ConfigOut ( "teamdrive" , & oauthutil . Options {
OAuth2Config : driveConfig ,
} )
}
return fs . ConfigGoto ( "teamdrive" )
case "teamdrive" :
if opt . TeamDriveID == "" {
2021-05-04 13:27:50 +02:00
return fs . ConfigConfirm ( "teamdrive_ok" , false , "config_change_team_drive" , "Configure this as a Shared Drive (Team Drive)?\n" )
2021-04-29 10:28:18 +02:00
}
2021-07-13 15:53:21 +02:00
return fs . ConfigConfirm ( "teamdrive_change" , false , "config_change_team_drive" , fmt . Sprintf ( "Change current Shared Drive (Team Drive) ID %q?\n" , opt . TeamDriveID ) )
2021-04-29 10:28:18 +02:00
case "teamdrive_ok" :
if config . Result == "false" {
m . Set ( "team_drive" , "" )
return nil , nil
}
2021-07-13 15:53:21 +02:00
return fs . ConfigGoto ( "teamdrive_config" )
case "teamdrive_change" :
if config . Result == "false" {
return nil , nil
}
return fs . ConfigGoto ( "teamdrive_config" )
case "teamdrive_config" :
2021-04-29 10:28:18 +02:00
f , err := newFs ( ctx , name , "" , m )
2017-11-29 22:34:19 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to make Fs to list Shared Drives: %w" , err )
2017-11-29 22:34:19 +01:00
}
2021-04-29 10:28:18 +02:00
teamDrives , err := f . listTeamDrives ( ctx )
if err != nil {
return nil , err
}
if len ( teamDrives ) == 0 {
return fs . ConfigError ( "" , "No Shared Drives found in your account" )
}
2021-05-04 13:27:50 +02:00
return fs . ConfigChoose ( "teamdrive_final" , "config_team_drive" , "Shared Drive" , len ( teamDrives ) , func ( i int ) ( string , string ) {
2021-04-29 10:28:18 +02:00
teamDrive := teamDrives [ i ]
return teamDrive . Id , teamDrive . Name
} )
case "teamdrive_final" :
driveID := config . Result
m . Set ( "team_drive" , driveID )
m . Set ( "root_folder_id" , "" )
opt . TeamDriveID = driveID
opt . RootFolderID = ""
return nil , nil
2015-08-18 09:55:09 +02:00
}
2021-04-29 10:28:18 +02:00
return nil , fmt . Errorf ( "unknown state %q" , config . State )
2014-07-13 18:53:11 +02:00
} ,
2020-09-07 02:55:55 +02:00
Options : append ( driveOAuthOptions ( ) , [ ] fs . Option { {
2018-01-23 14:36:20 +01:00
Name : "scope" ,
Help : "Scope that rclone should use when requesting access from drive." ,
Examples : [ ] fs . OptionExample { {
Value : "drive" ,
Help : "Full access all files, excluding Application Data Folder." ,
} , {
Value : "drive.readonly" ,
Help : "Read-only access to file metadata and file contents." ,
} , {
Value : "drive.file" ,
Help : "Access to files created by rclone only.\nThese are visible in the drive website.\nFile authorization is revoked when the user deauthorizes the app." ,
} , {
Value : "drive.appfolder" ,
Help : "Allows read and write access to the Application Data folder.\nThis is not visible in the drive website." ,
} , {
Value : "drive.metadata.readonly" ,
Help : "Allows read-only access to file metadata but\ndoes not allow any access to read or download file content." ,
} } ,
} , {
Name : "root_folder_id" ,
2021-08-16 11:30:01 +02:00
Help : ` ID of the root folder .
2019-10-21 09:47:17 +02:00
Leave blank normally .
Fill in to access "Computers" folders ( see docs ) , or for rclone to use
a non root folder as its starting point .
` ,
2022-06-23 22:08:09 +02:00
Advanced : true ,
2017-11-29 22:34:19 +01:00
} , {
Name : "service_account_file" ,
2021-08-16 11:30:01 +02:00
Help : "Service Account Credentials JSON file path.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env . ShellExpandHelp ,
2018-05-14 19:06:57 +02:00
} , {
Name : "service_account_credentials" ,
2021-08-16 11:30:01 +02:00
Help : "Service Account Credentials JSON blob.\n\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." ,
2018-10-12 09:38:40 +02:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "team_drive" ,
2021-08-16 11:30:01 +02:00
Help : "ID of the Shared Drive (Team Drive)." ,
2018-10-12 09:38:40 +02:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "auth_owner_only" ,
Default : false ,
Help : "Only consider files owned by the authenticated user." ,
Advanced : true ,
} , {
Name : "use_trash" ,
Default : true ,
2021-08-16 11:30:01 +02:00
Help : "Send files to the trash instead of deleting permanently.\n\nDefaults to true, namely sending files to the trash.\nUse `--drive-use-trash=false` to delete files permanently instead." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2022-02-04 12:37:58 +01:00
} , {
Name : "copy_shortcut_content" ,
Default : false ,
Help : ` Server side copy contents of shortcuts instead of the shortcut .
When doing server side copies , normally rclone will copy shortcuts as
shortcuts .
If this flag is used then rclone will copy the contents of shortcuts
rather than shortcuts themselves when doing server side copies . ` ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
Name : "skip_gdocs" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : "Skip google documents in all listings.\n\nIf given, gdocs practically become invisible to rclone." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2019-03-07 04:44:09 +01:00
} , {
Name : "skip_checksum_gphotos" ,
Default : false ,
Help : ` Skip MD5 checksum on Google photos and videos only .
Use this if you get checksum errors when transferring Google photos or
videos .
Setting this flag will cause Google photos and videos to return a
blank MD5 checksum .
2020-05-25 08:05:53 +02:00
Google photos are identified by being in the "photos" space .
2019-03-07 04:44:09 +01:00
Corrupted checksums are caused by Google modifying the image / video but
not updating the checksum . ` ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "shared_with_me" ,
Default : false ,
Help : ` Only show files that are shared with me .
Instructs rclone to operate on your "Shared with me" folder ( where
Google Drive lets you access the files and folders others have shared
with you ) .
2020-10-14 00:07:12 +02:00
This works both with the "list" ( lsd , lsl , etc . ) and the "copy"
commands ( copy , sync , etc . ) , and with all other commands too . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "trashed_only" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : "Only show files that are in the trash.\n\nThis will show trashed files in their original directory structure." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2020-08-21 18:30:41 +02:00
} , {
Name : "starred_only" ,
Default : false ,
Help : "Only show files that are starred." ,
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
Name : "formats" ,
2018-08-19 16:16:11 +02:00
Default : "" ,
2021-08-16 11:30:01 +02:00
Help : "Deprecated: See export_formats." ,
2018-08-19 16:16:11 +02:00
Advanced : true ,
Hide : fs . OptionHideConfigurator ,
} , {
Name : "export_formats" ,
Default : defaultExportExtensions ,
2018-05-14 19:06:57 +02:00
Help : "Comma separated list of preferred formats for downloading Google docs." ,
Advanced : true ,
2018-08-19 16:16:11 +02:00
} , {
Name : "import_formats" ,
Default : "" ,
Help : "Comma separated list of preferred formats for uploading Google docs." ,
Advanced : true ,
} , {
Name : "allow_import_name_change" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : "Allow the filetype to change when uploading Google docs.\n\nE.g. file.doc to file.docx. This will confuse sync and reupload every time." ,
2018-08-19 16:16:11 +02:00
Advanced : true ,
2018-05-14 19:06:57 +02:00
} , {
2018-10-01 19:36:15 +02:00
Name : "use_created_date" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : ` Use file created date instead of modified date .
2018-10-01 19:36:15 +02:00
Useful when downloading data and you want the creation date used in
place of the last modified date .
* * WARNING * * : This flag may have some unexpected consequences .
When uploading to your drive all files will be overwritten unless they
haven ' t been modified since their creation . And the inverse will occur
while downloading . This side effect can be avoided by using the
"--checksum" flag .
This feature was implemented to retain photos capture date as recorded
by google photos . You will first need to check the " Create a Google
Photos folder " option in your google drive settings . You can then copy
or move the photos locally and use the date the image was taken
( created ) set as the modification date . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2019-11-18 22:34:10 +01:00
} , {
Name : "use_shared_date" ,
Default : false ,
Help : ` Use date file was shared instead of modified date .
Note that , as with "--drive-use-created-date" , this flag may have
unexpected consequences when uploading / downloading files .
If both this flag and "--drive-use-created-date" are set , the created
date is used . ` ,
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2018-05-14 19:06:57 +02:00
} , {
Name : "list_chunk" ,
Default : 1000 ,
2021-08-16 11:30:01 +02:00
Help : "Size of listing chunk 100-1000, 0 to disable." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2020-07-25 19:32:49 +02:00
Name : "impersonate" ,
Default : "" ,
Help : ` Impersonate this user when using a service account. ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "alternate_export" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : "Deprecated: No longer needed." ,
2020-08-13 00:42:28 +02:00
Hide : fs . OptionHideBoth ,
2018-05-14 19:06:57 +02:00
} , {
Name : "upload_cutoff" ,
Default : defaultChunkSize ,
2021-08-16 11:30:01 +02:00
Help : "Cutoff for switching to chunked upload." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "chunk_size" ,
Default : defaultChunkSize ,
2021-08-16 11:30:01 +02:00
Help : ` Upload chunk size .
Must a power of 2 >= 256 k .
2018-10-01 19:36:15 +02:00
Making this larger will improve performance , but note that each chunk
is buffered in memory one per transfer .
Reducing this will reduce memory usage but decrease performance . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
2018-10-01 19:36:15 +02:00
Name : "acknowledge_abuse" ,
Default : false ,
Help : ` Set to allow files which return cannotDownloadAbusiveFile to be downloaded .
If downloading a file returns the error " This file has been identified
as malware or spam and cannot be downloaded " with the error code
"cannotDownloadAbusiveFile" then supply this flag to rclone to
indicate you acknowledge the risks of downloading the file and rclone
2023-02-01 13:11:46 +01:00
will download it anyway .
Note that if you are using service account it will need Manager
permission ( not Content Manager ) to for this flag to work . If the SA
does not have the right permission , Google will just ignore the flag . ` ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
} , {
Name : "keep_revision_forever" ,
Default : false ,
2018-10-01 19:36:15 +02:00
Help : "Keep new head revision of each file forever." ,
2018-05-14 19:06:57 +02:00
Advanced : true ,
2019-05-25 12:27:30 +02:00
} , {
Name : "size_as_quota" ,
Default : false ,
2020-01-31 11:02:14 +01:00
Help : ` Show sizes as storage quota usage , not actual size .
2019-05-25 12:27:30 +02:00
2020-04-29 19:54:16 +02:00
Show the size of a file as the storage quota used . This is the
2020-01-31 11:02:14 +01:00
current version plus any older versions that have been set to keep
forever .
* * WARNING * * : This flag may have some unexpected consequences .
It is not recommended to set this flag in your config - the
recommended usage is using the flag form -- drive - size - as - quota when
doing rclone ls / lsl / lsf / lsjson / etc only .
If you do use this flag for syncing ( not recommended ) then you will
need to use -- ignore size also . ` ,
2019-05-25 12:27:30 +02:00
Advanced : true ,
2020-01-31 11:02:14 +01:00
Hide : fs . OptionHideConfigurator ,
2018-09-18 12:29:05 +02:00
} , {
Name : "v2_download_min_size" ,
Default : fs . SizeSuffix ( - 1 ) ,
Help : "If Object's are greater, use drive v2 API to download." ,
Advanced : true ,
2018-12-30 19:05:34 +01:00
} , {
Name : "pacer_min_sleep" ,
Default : defaultMinSleep ,
Help : "Minimum time to sleep between API calls." ,
Advanced : true ,
2019-01-20 09:36:22 +01:00
} , {
Name : "pacer_burst" ,
Default : defaultBurst ,
Help : "Number of API calls to allow without sleeping." ,
Advanced : true ,
2019-06-06 18:04:51 +02:00
} , {
Name : "server_side_across_configs" ,
Default : false ,
2020-10-13 23:49:58 +02:00
Help : ` Allow server - side operations ( e . g . copy ) to work across different drive configs .
2019-06-06 18:04:51 +02:00
2020-10-13 23:43:40 +02:00
This can be useful if you wish to do a server - side copy between two
2019-06-06 18:04:51 +02:00
different Google drives . Note that this isn ' t enabled by default
2019-10-21 23:28:28 +02:00
because it isn ' t easy to tell if it will work between any two
2019-06-06 18:04:51 +02:00
configurations . ` ,
Advanced : true ,
2019-10-16 12:22:25 +02:00
} , {
Name : "disable_http2" ,
Default : true ,
2021-08-16 11:30:01 +02:00
Help : ` Disable drive using http2 .
2019-10-16 12:22:25 +02:00
There is currently an unsolved issue with the google drive backend and
HTTP / 2. HTTP / 2 is therefore disabled by default for the drive backend
but can be re - enabled here . When the issue is solved this flag will
be removed .
See : https : //github.com/rclone/rclone/issues/3631
2020-01-12 16:47:31 +01:00
` ,
Advanced : true ,
} , {
Name : "stop_on_upload_limit" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : ` Make upload limit errors be fatal .
2020-01-12 16:47:31 +01:00
2021-03-02 20:11:57 +01:00
At the time of writing it is only possible to upload 750 GiB of data to
2020-01-12 16:47:31 +01:00
Google Drive a day ( this is an undocumented limit ) . When this limit is
reached Google Drive produces a slightly different error message . When
this flag is set it causes these errors to be fatal . These will stop
the in - progress sync .
Note that this detection is relying on error message strings which
Google don ' t document so it may break in the future .
See : https : //github.com/rclone/rclone/issues/3857
2020-10-07 06:54:37 +02:00
` ,
Advanced : true ,
} , {
Name : "stop_on_download_limit" ,
Default : false ,
2021-08-16 11:30:01 +02:00
Help : ` Make download limit errors be fatal .
2020-10-07 06:54:37 +02:00
2021-03-02 20:11:57 +01:00
At the time of writing it is only possible to download 10 TiB of data from
2020-10-07 06:54:37 +02:00
Google Drive a day ( this is an undocumented limit ) . When this limit is
reached Google Drive produces a slightly different error message . When
this flag is set it causes these errors to be fatal . These will stop
the in - progress sync .
Note that this detection is relying on error message strings which
Google don ' t document so it may break in the future .
2019-10-16 12:22:25 +02:00
` ,
Advanced : true ,
2020-04-12 17:55:11 +02:00
} , {
Name : "skip_shortcuts" ,
2021-08-16 11:30:01 +02:00
Help : ` If set skip shortcut files .
2020-04-12 17:55:11 +02:00
Normally rclone dereferences shortcut files making them appear as if
they are the original file ( see [ the shortcuts section ] ( # shortcuts ) ) .
If this flag is set then rclone will ignore shortcut files completely .
2022-01-28 18:01:43 +01:00
` ,
Advanced : true ,
Default : false ,
} , {
Name : "skip_dangling_shortcuts" ,
Help : ` If set skip dangling shortcut files .
If this is set then rclone will not show any dangling shortcuts in listings .
2020-04-12 17:55:11 +02:00
` ,
Advanced : true ,
Default : false ,
2022-05-23 18:24:53 +02:00
} , {
Name : "resource_key" ,
Help : ` Resource key for accessing a link - shared file .
If you need to access files shared with a link like this
https : //drive.google.com/drive/folders/XXX?resourcekey=YYY&usp=sharing
Then you will need to use the first part "XXX" as the "root_folder_id"
and the second part "YYY" as the "resource_key" otherwise you will get
404 not found errors when trying to access the directory .
See : https : //developers.google.com/drive/api/guides/resource-keys
This resource key requirement only applies to a subset of old files .
Note also that opening the folder once in the web interface ( with the
user you ' ve authenticated rclone with ) seems to be enough so that the
resource key is no needed .
` ,
Advanced : true ,
2020-01-14 18:33:35 +01:00
} , {
Name : config . ConfigEncoding ,
Help : config . ConfigEncodingHelp ,
Advanced : true ,
2020-01-14 22:51:49 +01:00
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
// Don't encode / as it's a valid name character in drive.
Default : encoder . EncodeInvalidUtf8 ,
2020-08-02 01:32:21 +02:00
} } ... ) ,
2014-03-15 17:06:11 +01:00
} )
2016-02-06 10:22:52 +01:00
2018-02-07 12:53:46 +01:00
// register duplicate MIME types first
// this allows them to be used with mime.ExtensionsByType() but
// mime.TypeByExtension() will return the later registered MIME type
2018-08-21 12:51:36 +02:00
for _ , m := range [ ] map [ string ] string {
_mimeTypeToExtensionDuplicates , _mimeTypeToExtension , _mimeTypeToExtensionLinks ,
} {
2018-02-07 12:53:46 +01:00
for mimeType , extension := range m {
if err := mime . AddExtensionType ( extension , mimeType ) ; err != nil {
2021-04-06 22:27:34 +02:00
fs . Errorf ( "Failed to register MIME type %q: %v" , mimeType , err )
2018-02-07 12:53:46 +01:00
}
}
2016-02-06 10:22:52 +01:00
}
2013-06-29 13:15:31 +02:00
}
2018-05-14 19:06:57 +02:00
// Options defines the configuration for this backend
type Options struct {
2020-01-14 18:33:35 +01:00
Scope string ` config:"scope" `
RootFolderID string ` config:"root_folder_id" `
ServiceAccountFile string ` config:"service_account_file" `
ServiceAccountCredentials string ` config:"service_account_credentials" `
TeamDriveID string ` config:"team_drive" `
AuthOwnerOnly bool ` config:"auth_owner_only" `
UseTrash bool ` config:"use_trash" `
2022-02-04 12:37:58 +01:00
CopyShortcutContent bool ` config:"copy_shortcut_content" `
2020-01-14 18:33:35 +01:00
SkipGdocs bool ` config:"skip_gdocs" `
SkipChecksumGphotos bool ` config:"skip_checksum_gphotos" `
SharedWithMe bool ` config:"shared_with_me" `
TrashedOnly bool ` config:"trashed_only" `
2020-08-21 18:30:41 +02:00
StarredOnly bool ` config:"starred_only" `
2020-01-14 18:33:35 +01:00
Extensions string ` config:"formats" `
ExportExtensions string ` config:"export_formats" `
ImportExtensions string ` config:"import_formats" `
AllowImportNameChange bool ` config:"allow_import_name_change" `
UseCreatedDate bool ` config:"use_created_date" `
UseSharedDate bool ` config:"use_shared_date" `
ListChunk int64 ` config:"list_chunk" `
Impersonate string ` config:"impersonate" `
UploadCutoff fs . SizeSuffix ` config:"upload_cutoff" `
ChunkSize fs . SizeSuffix ` config:"chunk_size" `
AcknowledgeAbuse bool ` config:"acknowledge_abuse" `
KeepRevisionForever bool ` config:"keep_revision_forever" `
SizeAsQuota bool ` config:"size_as_quota" `
V2DownloadMinSize fs . SizeSuffix ` config:"v2_download_min_size" `
PacerMinSleep fs . Duration ` config:"pacer_min_sleep" `
PacerBurst int ` config:"pacer_burst" `
ServerSideAcrossConfigs bool ` config:"server_side_across_configs" `
DisableHTTP2 bool ` config:"disable_http2" `
StopOnUploadLimit bool ` config:"stop_on_upload_limit" `
2020-10-07 06:54:37 +02:00
StopOnDownloadLimit bool ` config:"stop_on_download_limit" `
2020-04-12 17:55:11 +02:00
SkipShortcuts bool ` config:"skip_shortcuts" `
2022-01-28 18:01:43 +01:00
SkipDanglingShortcuts bool ` config:"skip_dangling_shortcuts" `
2022-05-23 18:24:53 +02:00
ResourceKey string ` config:"resource_key" `
2020-01-14 18:33:35 +01:00
Enc encoder . MultiEncoder ` config:"encoding" `
2018-05-14 19:06:57 +02:00
}
2015-11-07 12:14:46 +01:00
// Fs represents a remote drive server
type Fs struct {
2018-08-19 16:16:11 +02:00
name string // name of this remote
root string // the path we are working on
opt Options // parsed options
2020-11-05 12:33:32 +01:00
ci * fs . ConfigInfo // global config
2018-08-19 16:16:11 +02:00
features * fs . Features // optional features
svc * drive . Service // the connection to the drive server
v2Svc * drive_v2 . Service // used to create download links for the v2 api
client * http . Client // authorized client
rootFolderID string // the id of the root folder
dirCache * dircache . DirCache // Map of directory path to directory id
2020-12-02 17:20:58 +01:00
lastQuery string // Last query string to check in unit tests
2019-02-09 21:52:15 +01:00
pacer * fs . Pacer // To pace the API calls
2018-08-19 16:16:11 +02:00
exportExtensions [ ] string // preferred extensions to download docs
importMimeTypes [ ] string // MIME types to convert to docs
isTeamDrive bool // true if this is a team drive
2020-04-12 17:55:11 +02:00
fileFields googleapi . Field // fields to fetch file info with
2020-04-29 19:54:16 +02:00
m configmap . Mapper
2020-05-30 13:08:22 +02:00
grouping int32 // number of IDs to search at once in ListR - read with atomic
listRmu * sync . Mutex // protects listRempties
listRempties map [ string ] struct { } // IDs of supposedly empty directories which triggered grouping disable
2022-05-23 18:24:53 +02:00
dirResourceKeys * sync . Map // map directory ID to resource key
2013-01-15 00:38:18 +01:00
}
2018-08-21 12:49:33 +02:00
type baseObject struct {
2021-03-11 18:40:29 +01:00
fs * Fs // what this object is part of
remote string // The remote path
id string // Drive Id of this object
modifiedDate string // RFC3339 time it was last modified
mimeType string // The object MIME type
bytes int64 // size of the object
parents [ ] string // IDs of the parent directories
2022-05-16 18:20:48 +02:00
resourceKey * string // resourceKey is needed for link shared objects
2018-08-21 12:49:33 +02:00
}
type documentObject struct {
baseObject
url string // Download URL of this object
documentMimeType string // the original document MIME type
extLen int // The length of the added export extension
}
2018-08-21 12:51:36 +02:00
type linkObject struct {
baseObject
content [ ] byte // The file content generated by a link template
extLen int // The length of the added export extension
}
2018-08-21 12:49:33 +02:00
2015-11-07 12:14:46 +01:00
// Object describes a drive object
type Object struct {
2018-08-21 12:49:33 +02:00
baseObject
url string // Download URL of this object
md5sum string // md5sum of the object
v2Download bool // generate v2 download link ondemand
2013-01-15 00:38:18 +01:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-01 21:45:27 +02:00
return f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2013-01-15 00:38:18 +01:00
return fmt . Sprintf ( "Google drive root '%s'" , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2019-02-07 18:41:17 +01:00
// shouldRetry determines whether a given err rates being retried
2021-03-16 16:50:02 +01:00
func ( f * Fs ) shouldRetry ( ctx context . Context , err error ) ( bool , error ) {
if fserrors . ContextError ( ctx , & err ) {
return false , err
}
2018-09-01 13:16:01 +02:00
if err == nil {
return false , nil
}
if fserrors . ShouldRetry ( err ) {
return true , err
}
switch gerr := err . ( type ) {
case * googleapi . Error :
if gerr . Code >= 500 && gerr . Code < 600 {
// All 5xx errors should be retried
return true , err
}
if len ( gerr . Errors ) > 0 {
reason := gerr . Errors [ 0 ] . Reason
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
2020-01-12 16:47:31 +01:00
if f . opt . StopOnUploadLimit && gerr . Errors [ 0 ] . Message == "User rate limit exceeded." {
fs . Errorf ( f , "Received upload limit error: %v" , err )
return false , fserrors . FatalError ( err )
}
2018-09-01 13:16:01 +02:00
return true , err
2020-10-07 06:54:37 +02:00
} else if f . opt . StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs . Errorf ( f , "Received download limit error: %v" , err )
return false , fserrors . FatalError ( err )
2023-03-06 16:02:36 +01:00
} else if f . opt . StopOnUploadLimit && ( reason == "quotaExceeded" || reason == "storageQuotaExceeded" ) {
2022-07-20 11:37:34 +02:00
fs . Errorf ( f , "Received upload limit error: %v" , err )
return false , fserrors . FatalError ( err )
2020-03-25 14:38:50 +01:00
} else if f . opt . StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
2021-02-07 12:38:17 +01:00
fs . Errorf ( f , "Received Shared Drive file limit error: %v" , err )
2020-03-25 14:38:50 +01:00
return false , fserrors . FatalError ( err )
2015-02-02 18:29:08 +01:00
}
}
}
2018-09-01 13:16:01 +02:00
return false , err
2015-02-02 18:29:08 +01:00
}
2013-01-15 00:38:18 +01:00
// parseParse parses a drive 'url'
func parseDrivePath ( path string ) ( root string , err error ) {
2014-03-27 18:49:36 +01:00
root = strings . Trim ( path , "/" )
2013-01-15 00:38:18 +01:00
return
}
2017-06-11 23:43:31 +02:00
// User function to process a File item from list
2013-01-20 12:56:56 +01:00
//
// Should return true to finish processing
2017-06-11 23:43:31 +02:00
type listFn func ( * drive . File ) bool
2013-01-20 12:56:56 +01:00
2018-07-24 17:14:23 +02:00
func containsString ( slice [ ] string , s string ) bool {
for _ , e := range slice {
if e == s {
return true
}
}
return false
}
2020-02-28 17:45:35 +01:00
// getFile returns drive.File for the ID passed and fields passed in
2021-03-16 16:50:02 +01:00
func ( f * Fs ) getFile ( ctx context . Context , ID string , fields googleapi . Field ) ( info * drive . File , err error ) {
2020-12-02 16:01:48 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-02-28 17:45:35 +01:00
info , err = f . svc . Files . Get ( ID ) .
Fields ( fields ) .
2019-10-21 09:47:17 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2019-10-21 09:47:17 +02:00
} )
2020-02-28 17:45:35 +01:00
return info , err
}
// getRootID returns the canonical ID for the "root" ID
2021-03-16 16:50:02 +01:00
func ( f * Fs ) getRootID ( ctx context . Context ) ( string , error ) {
info , err := f . getFile ( ctx , "root" , "id" )
2019-10-21 09:47:17 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return "" , fmt . Errorf ( "couldn't find root directory ID: %w" , err )
2019-10-21 09:47:17 +02:00
}
return info . Id , nil
}
2013-01-20 12:56:56 +01:00
// Lists the directory required calling the user function on each item found
//
// If the user fn ever returns true then it early exits with found = true
2013-01-15 00:38:18 +01:00
//
// Search params: https://developers.google.com/drive/search-parameters
2020-09-14 18:31:23 +02:00
func ( f * Fs ) list ( ctx context . Context , dirIDs [ ] string , title string , directoriesOnly , filesOnly , trashedOnly , includeAll bool , fn listFn ) ( found bool , err error ) {
2017-02-16 13:29:37 +01:00
var query [ ] string
2017-07-06 16:32:57 +02:00
if ! includeAll {
2020-09-14 18:31:23 +02:00
q := "trashed=" + strconv . FormatBool ( trashedOnly )
2018-05-14 19:06:57 +02:00
if f . opt . TrashedOnly {
2017-07-19 15:35:58 +02:00
q = fmt . Sprintf ( "(mimeType='%s' or %s)" , driveFolderType , q )
}
query = append ( query , q )
2017-02-16 13:29:37 +01:00
}
2020-08-21 18:30:41 +02:00
2017-03-25 17:16:56 +01:00
// Search with sharedWithMe will always return things listed in "Shared With Me" (without any parents)
// We must not filter with parent when we try list "ROOT" with drive-shared-with-me
// If we need to list file inside those shared folders, we must search it without sharedWithMe
2018-07-26 15:45:41 +02:00
parentsQuery := bytes . NewBufferString ( "(" )
2022-05-23 18:24:53 +02:00
var resourceKeys [ ] string
2018-07-26 15:45:41 +02:00
for _ , dirID := range dirIDs {
if dirID == "" {
continue
}
if parentsQuery . Len ( ) > 1 {
_ , _ = parentsQuery . WriteString ( " or " )
}
2020-08-21 18:30:41 +02:00
if ( f . opt . SharedWithMe || f . opt . StarredOnly ) && dirID == f . rootFolderID {
if f . opt . SharedWithMe {
_ , _ = parentsQuery . WriteString ( "sharedWithMe=true" )
}
if f . opt . StarredOnly {
if f . opt . SharedWithMe {
_ , _ = parentsQuery . WriteString ( " and " )
}
_ , _ = parentsQuery . WriteString ( "starred=true" )
}
2018-07-26 15:45:41 +02:00
} else {
_ , _ = fmt . Fprintf ( parentsQuery , "'%s' in parents" , dirID )
}
2022-05-23 18:24:53 +02:00
resourceKey , hasResourceKey := f . dirResourceKeys . Load ( dirID )
if hasResourceKey {
resourceKeys = append ( resourceKeys , fmt . Sprintf ( "%s/%s" , dirID , resourceKey ) )
}
2017-03-25 17:16:56 +01:00
}
2022-05-23 18:24:53 +02:00
resourceKeysHeader := strings . Join ( resourceKeys , "," )
2018-07-26 15:45:41 +02:00
if parentsQuery . Len ( ) > 1 {
_ = parentsQuery . WriteByte ( ')' )
query = append ( query , parentsQuery . String ( ) )
2013-01-23 22:19:26 +01:00
}
2018-08-21 12:54:12 +02:00
var stems [ ] string
2013-01-15 00:38:18 +01:00
if title != "" {
2020-01-14 18:33:35 +01:00
searchTitle := f . opt . Enc . FromStandardName ( title )
2013-01-15 00:38:18 +01:00
// Escaping the backslash isn't documented but seems to work
2022-05-16 18:11:45 +02:00
searchTitle = strings . ReplaceAll ( searchTitle , ` \ ` , ` \\ ` )
searchTitle = strings . ReplaceAll ( searchTitle , ` ' ` , ` \' ` )
2018-07-24 17:14:23 +02:00
2018-08-21 12:54:12 +02:00
var titleQuery bytes . Buffer
_ , _ = fmt . Fprintf ( & titleQuery , "(name='%s'" , searchTitle )
if ! directoriesOnly && ! f . opt . SkipGdocs {
// If the search title has an extension that is in the export extensions add a search
// for the filename without the extension.
// Assume that export extensions don't contain escape sequences.
for _ , ext := range f . exportExtensions {
if strings . HasSuffix ( searchTitle , ext ) {
stems = append ( stems , title [ : len ( title ) - len ( ext ) ] )
_ , _ = fmt . Fprintf ( & titleQuery , " or name='%s'" , searchTitle [ : len ( searchTitle ) - len ( ext ) ] )
}
}
2018-07-24 17:14:23 +02:00
}
2018-08-21 12:54:12 +02:00
_ = titleQuery . WriteByte ( ')' )
query = append ( query , titleQuery . String ( ) )
2013-01-15 00:38:18 +01:00
}
if directoriesOnly {
2020-04-12 17:55:11 +02:00
query = append ( query , fmt . Sprintf ( "(mimeType='%s' or mimeType='%s')" , driveFolderType , shortcutMimeType ) )
2013-01-15 00:38:18 +01:00
}
if filesOnly {
2017-02-16 13:29:37 +01:00
query = append ( query , fmt . Sprintf ( "mimeType!='%s'" , driveFolderType ) )
2013-01-15 00:38:18 +01:00
}
2020-12-02 17:20:58 +01:00
// Constrain query using filter if this remote is a sync/copy/walk source.
if fi , use := filter . GetConfig ( ctx ) , filter . GetUseFilter ( ctx ) ; fi != nil && use {
queryByTime := func ( op string , tm time . Time ) {
if tm . IsZero ( ) {
return
}
// https://developers.google.com/drive/api/v3/ref-search-terms#operators
// Query times use RFC 3339 format, default timezone is UTC
timeStr := tm . UTC ( ) . Format ( "2006-01-02T15:04:05" )
term := fmt . Sprintf ( "(modifiedTime %s '%s' or mimeType = '%s')" , op , timeStr , driveFolderType )
query = append ( query , term )
}
queryByTime ( ">=" , fi . ModTimeFrom )
queryByTime ( "<=" , fi . ModTimeTo )
}
2017-02-16 13:29:37 +01:00
list := f . svc . Files . List ( )
2020-12-02 17:20:58 +01:00
queryString := strings . Join ( query , " and " )
if queryString != "" {
list . Q ( queryString )
// fs.Debugf(f, "list query: %q", queryString)
2017-02-16 13:29:37 +01:00
}
2020-12-02 17:20:58 +01:00
f . lastQuery = queryString // for unit tests
2018-05-14 19:06:57 +02:00
if f . opt . ListChunk > 0 {
list . PageSize ( f . opt . ListChunk )
2016-12-29 19:04:37 +01:00
}
2019-07-30 19:49:06 +02:00
list . SupportsAllDrives ( true )
list . IncludeItemsFromAllDrives ( true )
2022-06-14 10:44:37 +02:00
if f . isTeamDrive && ! f . opt . SharedWithMe {
2019-07-30 19:49:06 +02:00
list . DriveId ( f . opt . TeamDriveID )
list . Corpora ( "drive" )
2017-06-01 21:12:11 +02:00
}
2018-01-23 14:36:20 +01:00
// If using appDataFolder then need to add Spaces
if f . rootFolderID == "appDataFolder" {
list . Spaces ( "appDataFolder" )
}
2022-05-23 18:24:53 +02:00
// Add resource Keys if necessary
if resourceKeysHeader != "" {
list . Header ( ) . Add ( "X-Goog-Drive-Resource-Keys" , resourceKeysHeader )
}
2017-04-23 21:12:28 +02:00
2020-04-12 17:55:11 +02:00
fields := fmt . Sprintf ( "files(%s),nextPageToken,incompleteSearch" , f . fileFields )
2017-04-27 11:00:04 +02:00
2013-01-20 12:56:56 +01:00
OUTER :
2013-01-15 00:38:18 +01:00
for {
2015-02-02 18:29:08 +01:00
var files * drive . FileList
2015-09-11 20:18:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2021-03-17 17:45:21 +01:00
files , err = list . Fields ( googleapi . Field ( fields ) ) . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return false , fmt . Errorf ( "couldn't list directory: %w" , err )
2013-01-20 12:56:56 +01:00
}
2019-10-26 23:01:07 +02:00
if files . IncompleteSearch {
fs . Errorf ( f , "search result INCOMPLETE" )
}
2018-01-24 00:46:41 +01:00
for _ , item := range files . Files {
2020-01-14 18:33:35 +01:00
item . Name = f . opt . Enc . ToStandardName ( item . Name )
2020-04-12 17:55:11 +02:00
if isShortcut ( item ) {
// ignore shortcuts if directed
if f . opt . SkipShortcuts {
continue
}
// skip file shortcuts if directory only
if directoriesOnly && item . ShortcutDetails . TargetMimeType != driveFolderType {
continue
}
// skip directory shortcuts if file only
if filesOnly && item . ShortcutDetails . TargetMimeType == driveFolderType {
continue
}
2021-03-16 16:50:02 +01:00
item , err = f . resolveShortcut ( ctx , item )
2020-04-12 17:55:11 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return false , fmt . Errorf ( "list: %w" , err )
2020-04-12 17:55:11 +02:00
}
2022-01-28 18:01:43 +01:00
// leave the dangling shortcut out of the listings
// we've already logged about the dangling shortcut in resolveShortcut
if f . opt . SkipDanglingShortcuts && item . MimeType == shortcutMimeTypeDangling {
continue
}
2020-04-12 17:55:11 +02:00
}
2018-06-09 13:00:50 +02:00
// Check the case of items is correct since
// the `=` operator is case insensitive.
2018-01-29 10:18:34 +01:00
if title != "" && title != item . Name {
2018-08-21 12:54:12 +02:00
found := false
for _ , stem := range stems {
if stem == item . Name {
found = true
break
}
}
if ! found {
2018-07-24 17:14:23 +02:00
continue
}
2021-03-16 16:50:02 +01:00
_ , exportName , _ , _ := f . findExportFormat ( ctx , item )
2018-07-24 17:14:23 +02:00
if exportName == "" || exportName != title {
continue
}
2018-01-29 10:18:34 +01:00
}
2013-01-20 12:56:56 +01:00
if fn ( item ) {
found = true
break OUTER
}
2013-01-15 00:38:18 +01:00
}
if files . NextPageToken == "" {
break
}
list . PageToken ( files . NextPageToken )
}
return
}
2015-03-14 18:55:38 +01:00
// Returns true of x is a power of 2 or zero
func isPowerOfTwo ( x int64 ) bool {
switch {
case x == 0 :
return true
case x < 0 :
return false
default :
return ( x & ( x - 1 ) ) == 0
}
}
2018-02-07 12:53:46 +01:00
// add a charset parameter to all text/* MIME types
2019-03-05 18:18:04 +01:00
func fixMimeType ( mimeTypeIn string ) string {
if mimeTypeIn == "" {
return ""
}
mediaType , param , err := mime . ParseMediaType ( mimeTypeIn )
2018-02-07 12:53:46 +01:00
if err != nil {
2019-03-05 18:18:04 +01:00
return mimeTypeIn
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
mimeTypeOut := mimeTypeIn
if strings . HasPrefix ( mediaType , "text/" ) && param [ "charset" ] == "" {
2018-02-07 12:53:46 +01:00
param [ "charset" ] = "utf-8"
2019-03-05 18:18:04 +01:00
mimeTypeOut = mime . FormatMediaType ( mediaType , param )
}
if mimeTypeOut == "" {
2021-11-04 11:12:57 +01:00
panic ( fmt . Errorf ( "unable to fix MIME type %q" , mimeTypeIn ) )
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
return mimeTypeOut
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
func fixMimeTypeMap ( in map [ string ] [ ] string ) ( out map [ string ] [ ] string ) {
out = make ( map [ string ] [ ] string , len ( in ) )
for k , v := range in {
2018-02-07 12:53:46 +01:00
for i , mt := range v {
2019-03-05 18:18:04 +01:00
v [ i ] = fixMimeType ( mt )
2016-01-26 17:52:53 +01:00
}
2019-03-05 18:18:04 +01:00
out [ fixMimeType ( k ) ] = v
2018-02-07 12:53:46 +01:00
}
2019-03-05 18:18:04 +01:00
return out
2018-02-07 12:53:46 +01:00
}
func isInternalMimeType ( mimeType string ) bool {
return strings . HasPrefix ( mimeType , "application/vnd.google-apps." )
}
2018-08-21 12:51:36 +02:00
func isLinkMimeType ( mimeType string ) bool {
return strings . HasPrefix ( mimeType , "application/x-link-" )
}
2018-02-07 12:53:46 +01:00
// parseExtensions parses a list of comma separated extensions
2018-08-19 16:16:11 +02:00
// into a list of unique extensions with leading "." and a list of associated MIME types
func parseExtensions ( extensionsIn ... string ) ( extensions , mimeTypes [ ] string , err error ) {
for _ , extensionText := range extensionsIn {
2018-02-07 12:53:46 +01:00
for _ , extension := range strings . Split ( extensionText , "," ) {
extension = strings . ToLower ( strings . TrimSpace ( extension ) )
2018-08-19 16:16:11 +02:00
if extension == "" {
continue
}
2018-02-07 12:53:46 +01:00
if len ( extension ) > 0 && extension [ 0 ] != '.' {
extension = "." + extension
}
2018-08-19 16:16:11 +02:00
mt := mime . TypeByExtension ( extension )
if mt == "" {
2021-11-04 11:12:57 +01:00
return extensions , mimeTypes , fmt . Errorf ( "couldn't find MIME type for extension %q" , extension )
2018-02-07 12:53:46 +01:00
}
2018-09-01 13:16:01 +02:00
if ! containsString ( extensions , extension ) {
2018-08-19 16:16:11 +02:00
extensions = append ( extensions , extension )
mimeTypes = append ( mimeTypes , mt )
2016-01-26 17:52:53 +01:00
}
}
2018-02-07 12:53:46 +01:00
}
2018-08-19 16:16:11 +02:00
return
2016-01-26 17:52:53 +01:00
}
2019-10-16 12:22:25 +02:00
// getClient makes an http client according to the options
2020-11-05 12:33:32 +01:00
func getClient ( ctx context . Context , opt * Options ) * http . Client {
2020-11-13 16:24:43 +01:00
t := fshttp . NewTransportCustom ( ctx , func ( t * http . Transport ) {
2019-10-16 12:22:25 +02:00
if opt . DisableHTTP2 {
t . TLSNextProto = map [ string ] func ( string , * tls . Conn ) http . RoundTripper { }
}
} )
return & http . Client {
Transport : t ,
}
}
2020-11-05 19:02:26 +01:00
func getServiceAccountClient ( ctx context . Context , opt * Options , credentialsData [ ] byte ) ( * http . Client , error ) {
2019-01-05 21:53:42 +01:00
scopes := driveScopes ( opt . Scope )
conf , err := google . JWTConfigFromJSON ( credentialsData , scopes ... )
2017-11-29 22:34:19 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "error processing credentials: %w" , err )
2017-11-29 22:34:19 +01:00
}
2018-05-14 19:06:57 +02:00
if opt . Impersonate != "" {
conf . Subject = opt . Impersonate
2018-02-01 11:10:51 +01:00
}
2020-11-05 12:33:32 +01:00
ctxWithSpecialClient := oauthutil . Context ( ctx , getClient ( ctx , opt ) )
2017-11-29 22:34:19 +01:00
return oauth2 . NewClient ( ctxWithSpecialClient , conf . TokenSource ( ctxWithSpecialClient ) ) , nil
}
2020-11-05 19:02:26 +01:00
func createOAuthClient ( ctx context . Context , opt * Options , name string , m configmap . Mapper ) ( * http . Client , error ) {
2017-11-29 22:34:19 +01:00
var oAuthClient * http . Client
var err error
2018-04-27 17:07:37 +02:00
// try loading service account credentials from env variable, then from a file
2018-05-14 19:06:57 +02:00
if len ( opt . ServiceAccountCredentials ) == 0 && opt . ServiceAccountFile != "" {
2022-08-20 16:38:02 +02:00
loadedCreds , err := os . ReadFile ( env . ShellExpand ( opt . ServiceAccountFile ) )
2018-04-27 17:07:37 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "error opening service account credentials file: %w" , err )
2018-04-27 17:07:37 +02:00
}
2018-05-14 19:06:57 +02:00
opt . ServiceAccountCredentials = string ( loadedCreds )
2018-04-27 17:07:37 +02:00
}
2018-05-14 19:06:57 +02:00
if opt . ServiceAccountCredentials != "" {
2020-11-05 19:02:26 +01:00
oAuthClient , err = getServiceAccountClient ( ctx , opt , [ ] byte ( opt . ServiceAccountCredentials ) )
2017-11-29 22:34:19 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to create oauth client from service account: %w" , err )
2017-11-29 22:34:19 +01:00
}
} else {
2020-11-05 12:33:32 +01:00
oAuthClient , _ , err = oauthutil . NewClientWithBaseClient ( ctx , name , m , driveConfig , getClient ( ctx , opt ) )
2017-11-29 22:34:19 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to create oauth client: %w" , err )
2017-11-29 22:34:19 +01:00
}
}
return oAuthClient , nil
}
2018-09-07 13:02:27 +02:00
func checkUploadChunkSize ( cs fs . SizeSuffix ) error {
if ! isPowerOfTwo ( int64 ( cs ) ) {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "%v isn't a power of two" , cs )
2018-09-07 13:02:27 +02:00
}
if cs < minChunkSize {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "%s is less than %s" , cs , minChunkSize )
2018-09-07 13:02:27 +02:00
}
return nil
}
func ( f * Fs ) setUploadChunkSize ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadChunkSize ( cs )
if err == nil {
old , f . opt . ChunkSize = f . opt . ChunkSize , cs
}
return
}
2018-10-13 23:45:17 +02:00
func checkUploadCutoff ( cs fs . SizeSuffix ) error {
return nil
}
func ( f * Fs ) setUploadCutoff ( cs fs . SizeSuffix ) ( old fs . SizeSuffix , err error ) {
err = checkUploadCutoff ( cs )
if err == nil {
old , f . opt . UploadCutoff = f . opt . UploadCutoff , cs
}
return
}
2020-07-03 12:35:49 +02:00
// newFs partially constructs Fs from the path
//
// It constructs a valid Fs but doesn't attempt to figure out whether
// it is a file or a directory.
2020-11-05 17:00:40 +01:00
func newFs ( ctx context . Context , name , path string , m configmap . Mapper ) ( * Fs , error ) {
2018-05-14 19:06:57 +02:00
// Parse config into Options struct
opt := new ( Options )
err := configstruct . Set ( m , opt )
if err != nil {
return nil , err
}
2018-10-13 23:45:17 +02:00
err = checkUploadCutoff ( opt . UploadCutoff )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "drive: upload cutoff: %w" , err )
2018-10-13 23:45:17 +02:00
}
2018-09-07 13:02:27 +02:00
err = checkUploadChunkSize ( opt . ChunkSize )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "drive: chunk size: %w" , err )
2015-03-14 18:55:38 +01:00
}
2020-11-05 19:02:26 +01:00
oAuthClient , err := createOAuthClient ( ctx , opt , name , m )
2018-01-20 19:04:23 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "drive: failed when making oauth client: %w" , err )
2018-01-20 19:04:23 +01:00
}
2013-01-15 00:38:18 +01:00
2014-03-16 15:01:17 +01:00
root , err := parseDrivePath ( path )
if err != nil {
return nil , err
2013-01-15 00:38:18 +01:00
}
2014-07-13 18:53:11 +02:00
2020-11-05 12:33:32 +01:00
ci := fs . GetConfig ( ctx )
2015-11-07 12:14:46 +01:00
f := & Fs {
2022-05-23 18:24:53 +02:00
name : name ,
root : root ,
opt : * opt ,
ci : ci ,
pacer : fs . NewPacer ( ctx , pacer . NewGoogleDrive ( pacer . MinSleep ( opt . PacerMinSleep ) , pacer . Burst ( opt . PacerBurst ) ) ) ,
m : m ,
grouping : listRGrouping ,
listRmu : new ( sync . Mutex ) ,
listRempties : make ( map [ string ] struct { } ) ,
dirResourceKeys : new ( sync . Map ) ,
2014-05-05 20:52:52 +02:00
}
2018-05-14 19:06:57 +02:00
f . isTeamDrive = opt . TeamDriveID != ""
2020-04-12 17:55:11 +02:00
f . fileFields = f . getFileFields ( )
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
DuplicateFiles : true ,
ReadMimeType : true ,
WriteMimeType : true ,
CanHaveEmptyDirectories : true ,
2019-06-06 18:04:51 +02:00
ServerSideAcrossConfigs : opt . ServerSideAcrossConfigs ,
2022-09-05 17:19:50 +02:00
FilterAware : true ,
2020-11-05 17:00:40 +01:00
} ) . Fill ( ctx , f )
2013-01-15 00:38:18 +01:00
// Create a new authorized Drive client.
2015-08-18 09:55:09 +02:00
f . client = oAuthClient
2022-06-24 21:45:38 +02:00
f . svc , err = drive . NewService ( context . Background ( ) , option . WithHTTPClient ( f . client ) )
2013-01-15 00:38:18 +01:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "couldn't create Drive client: %w" , err )
2013-01-15 00:38:18 +01:00
}
2018-09-18 12:29:05 +02:00
if f . opt . V2DownloadMinSize >= 0 {
2022-06-24 21:45:38 +02:00
f . v2Svc , err = drive_v2 . NewService ( context . Background ( ) , option . WithHTTPClient ( f . client ) )
2018-09-18 12:29:05 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "couldn't create Drive v2 client: %w" , err )
2018-09-18 12:29:05 +02:00
}
}
2020-07-03 12:35:49 +02:00
return f , nil
}
// NewFs constructs an Fs from the path, container:path
2020-11-05 16:18:51 +01:00
func NewFs ( ctx context . Context , name , path string , m configmap . Mapper ) ( fs . Fs , error ) {
2020-11-05 17:00:40 +01:00
f , err := newFs ( ctx , name , path , m )
2020-07-03 12:35:49 +02:00
if err != nil {
return nil , err
}
2020-07-25 19:32:49 +02:00
// Set the root folder ID
2020-07-03 12:35:49 +02:00
if f . opt . RootFolderID != "" {
2020-07-25 19:32:49 +02:00
// use root_folder ID if set
2020-07-03 12:35:49 +02:00
f . rootFolderID = f . opt . RootFolderID
2019-11-16 19:38:21 +01:00
} else if f . isTeamDrive {
2020-07-25 19:32:49 +02:00
// otherwise use team_drive if set
2019-11-16 19:38:21 +01:00
f . rootFolderID = f . opt . TeamDriveID
2019-10-21 09:47:17 +02:00
} else {
2020-07-25 19:32:49 +02:00
// otherwise look up the actual root ID
2021-03-16 16:50:02 +01:00
rootID , err := f . getRootID ( ctx )
2019-10-21 09:47:17 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
var gerr * googleapi . Error
if errors . As ( err , & gerr ) && gerr . Code == 404 {
2019-11-08 11:29:45 +01:00
// 404 means that this scope does not have permission to get the
// root so just use "root"
rootID = "root"
} else {
return nil , err
}
2019-10-21 09:47:17 +02:00
}
f . rootFolderID = rootID
2021-04-29 10:28:18 +02:00
fs . Debugf ( f , "'root_folder_id = %s' - save this in the config to speed up startup" , rootID )
2018-01-23 14:36:20 +01:00
}
2020-07-03 12:35:49 +02:00
f . dirCache = dircache . New ( f . root , f . rootFolderID , f )
2018-01-24 00:46:41 +01:00
2022-05-23 18:24:53 +02:00
// If resource key is set then cache it for the root folder id
if f . opt . ResourceKey != "" {
f . dirResourceKeys . Store ( f . rootFolderID , f . opt . ResourceKey )
}
2016-01-26 17:52:53 +01:00
// Parse extensions
2020-07-03 12:35:49 +02:00
if f . opt . Extensions != "" {
if f . opt . ExportExtensions != defaultExportExtensions {
2018-08-19 16:16:11 +02:00
return nil , errors . New ( "only one of 'formats' and 'export_formats' can be specified" )
}
2020-07-03 12:35:49 +02:00
f . opt . Extensions , f . opt . ExportExtensions = "" , f . opt . Extensions
2018-08-19 16:16:11 +02:00
}
2020-07-03 12:35:49 +02:00
f . exportExtensions , _ , err = parseExtensions ( f . opt . ExportExtensions , defaultExportExtensions )
2018-08-19 16:16:11 +02:00
if err != nil {
return nil , err
}
2020-07-03 12:35:49 +02:00
_ , f . importMimeTypes , err = parseExtensions ( f . opt . ImportExtensions )
2016-02-06 10:22:52 +01:00
if err != nil {
return nil , err
}
2016-01-26 17:52:53 +01:00
2014-05-05 20:52:52 +02:00
// Find the current root
2019-06-17 10:34:30 +02:00
err = f . dirCache . FindRoot ( ctx , false )
2014-05-05 20:52:52 +02:00
if err != nil {
// Assume it is a file
2020-07-03 12:35:49 +02:00
newRoot , remote := dircache . SplitPath ( f . root )
2018-03-28 20:33:39 +02:00
tempF := * f
tempF . dirCache = dircache . New ( newRoot , f . rootFolderID , & tempF )
tempF . root = newRoot
2014-05-05 20:52:52 +02:00
// Make new Fs which is the parent
2019-06-17 10:34:30 +02:00
err = tempF . dirCache . FindRoot ( ctx , false )
2014-05-05 20:52:52 +02:00
if err != nil {
// No root so return old f
return f , nil
}
2019-06-17 10:34:30 +02:00
_ , err := tempF . NewObject ( ctx , remote )
2014-05-05 20:52:52 +02:00
if err != nil {
2018-01-25 15:39:03 +01:00
// unable to list folder so return old f
2014-05-05 20:52:52 +02:00
return f , nil
}
2018-07-24 17:14:23 +02:00
// XXX: update the old f here instead of returning tempF, since
// `features` were already filled with functions having *f as a receiver.
2019-07-28 19:47:38 +02:00
// See https://github.com/rclone/rclone/issues/2182
2018-07-24 17:14:23 +02:00
f . dirCache = tempF . dirCache
f . root = tempF . root
return f , fs . ErrorIsFile
2014-05-05 20:52:52 +02:00
}
2015-09-03 22:25:55 +02:00
// fmt.Printf("Root id %s", f.dirCache.RootID())
2013-01-15 00:38:18 +01:00
return f , nil
}
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newBaseObject ( remote string , info * drive . File ) baseObject {
modifiedDate := info . ModifiedTime
if f . opt . UseCreatedDate {
modifiedDate = info . CreatedTime
2019-11-18 22:34:10 +01:00
} else if f . opt . UseSharedDate && info . SharedWithMeTime != "" {
modifiedDate = info . SharedWithMeTime
2013-01-15 00:38:18 +01:00
}
2019-05-25 12:27:30 +02:00
size := info . Size
if f . opt . SizeAsQuota {
size = info . QuotaBytesUsed
}
2018-08-21 12:49:33 +02:00
return baseObject {
fs : f ,
remote : remote ,
id : info . Id ,
modifiedDate : modifiedDate ,
mimeType : info . MimeType ,
2019-05-25 12:27:30 +02:00
bytes : size ,
2021-03-11 18:40:29 +01:00
parents : info . Parents ,
2018-08-21 12:49:33 +02:00
}
}
2020-04-12 17:55:11 +02:00
// getFileFields gets the fields for a normal file Get or List
func ( f * Fs ) getFileFields ( ) ( fields googleapi . Field ) {
fields = partialFields
if f . opt . AuthOwnerOnly {
fields += ",owners"
}
if f . opt . UseSharedDate {
fields += ",sharedWithMeTime"
}
if f . opt . SkipChecksumGphotos {
fields += ",spaces"
}
if f . opt . SizeAsQuota {
fields += ",quotaBytesUsed"
}
return fields
}
2020-05-20 12:39:20 +02:00
// newRegularObject creates an fs.Object for a normal drive.File
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newRegularObject ( remote string , info * drive . File ) fs . Object {
2019-03-07 04:44:09 +01:00
// wipe checksum if SkipChecksumGphotos and file is type Photo or Video
if f . opt . SkipChecksumGphotos {
for _ , space := range info . Spaces {
if space == "photos" {
info . Md5Checksum = ""
break
}
}
}
2022-05-16 18:20:48 +02:00
o := & Object {
2018-08-21 12:49:33 +02:00
baseObject : f . newBaseObject ( remote , info ) ,
2020-04-12 17:55:11 +02:00
url : fmt . Sprintf ( "%sfiles/%s?alt=media" , f . svc . BasePath , actualID ( info . Id ) ) ,
2018-08-21 12:49:33 +02:00
md5sum : strings . ToLower ( info . Md5Checksum ) ,
v2Download : f . opt . V2DownloadMinSize != - 1 && info . Size >= int64 ( f . opt . V2DownloadMinSize ) ,
2013-01-15 00:38:18 +01:00
}
2022-05-16 18:20:48 +02:00
if info . ResourceKey != "" {
o . resourceKey = & info . ResourceKey
}
return o
2014-05-05 20:52:52 +02:00
}
2020-05-20 12:39:20 +02:00
// newDocumentObject creates an fs.Object for a google docs drive.File
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newDocumentObject ( remote string , info * drive . File , extension , exportMimeType string ) ( fs . Object , error ) {
mediaType , _ , err := mime . ParseMediaType ( exportMimeType )
2018-08-19 16:16:11 +02:00
if err != nil {
return nil , err
}
2020-08-13 00:42:28 +02:00
url := info . ExportLinks [ mediaType ]
2018-08-21 12:49:33 +02:00
baseObject := f . newBaseObject ( remote + extension , info )
baseObject . bytes = - 1
baseObject . mimeType = exportMimeType
return & documentObject {
baseObject : baseObject ,
url : url ,
documentMimeType : info . MimeType ,
extLen : len ( extension ) ,
} , nil
}
2020-05-20 12:39:20 +02:00
// newLinkObject creates an fs.Object that represents a link a google docs drive.File
2018-08-21 12:51:36 +02:00
func ( f * Fs ) newLinkObject ( remote string , info * drive . File , extension , exportMimeType string ) ( fs . Object , error ) {
t := linkTemplate ( exportMimeType )
if t == nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "unsupported link type %s" , exportMimeType )
2018-08-21 12:51:36 +02:00
}
2020-09-24 15:53:42 +02:00
xdgIcon := _mimeTypeToXDGLinkIcons [ info . MimeType ]
if xdgIcon == "" {
xdgIcon = defaultXDGIcon
}
2018-08-21 12:51:36 +02:00
var buf bytes . Buffer
err := t . Execute ( & buf , struct {
2020-09-24 15:53:42 +02:00
URL , Title , XDGIcon string
2018-08-21 12:51:36 +02:00
} {
2020-09-24 15:53:42 +02:00
info . WebViewLink , info . Name , xdgIcon ,
2018-08-21 12:51:36 +02:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "executing template failed: %w" , err )
2018-08-21 12:51:36 +02:00
}
baseObject := f . newBaseObject ( remote + extension , info )
baseObject . bytes = int64 ( buf . Len ( ) )
baseObject . mimeType = exportMimeType
return & linkObject {
baseObject : baseObject ,
content : buf . Bytes ( ) ,
extLen : len ( extension ) ,
} , nil
}
2020-05-20 12:39:20 +02:00
// newObjectWithInfo creates an fs.Object for any drive.File
2018-08-21 12:49:33 +02:00
//
2020-05-20 12:39:20 +02:00
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
2021-03-16 16:50:02 +01:00
func ( f * Fs ) newObjectWithInfo ( ctx context . Context , remote string , info * drive . File ) ( fs . Object , error ) {
2021-07-14 11:23:35 +02:00
// If item has MD5 sum it is a file stored on drive
if info . Md5Checksum != "" {
2018-08-21 12:49:33 +02:00
return f . newRegularObject ( remote , info ) , nil
}
2021-03-16 16:50:02 +01:00
extension , exportName , exportMimeType , isDocument := f . findExportFormat ( ctx , info )
return f . newObjectWithExportInfo ( ctx , remote , info , extension , exportName , exportMimeType , isDocument )
2018-08-21 12:49:33 +02:00
}
2020-05-20 12:39:20 +02:00
// newObjectWithExportInfo creates an fs.Object for any drive.File and the result of findExportFormat
2018-08-21 12:49:33 +02:00
//
2020-05-20 12:39:20 +02:00
// When the drive.File cannot be represented as an fs.Object it will return (nil, nil).
2018-08-21 12:49:33 +02:00
func ( f * Fs ) newObjectWithExportInfo (
2021-03-16 16:50:02 +01:00
ctx context . Context , remote string , info * drive . File ,
2020-04-12 17:55:11 +02:00
extension , exportName , exportMimeType string , isDocument bool ) ( o fs . Object , err error ) {
// Note that resolveShortcut will have been called already if
// we are being called from a listing. However the drive.Item
// will have been resolved so this will do nothing.
2021-03-16 16:50:02 +01:00
info , err = f . resolveShortcut ( ctx , info )
2020-04-12 17:55:11 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "new object: %w" , err )
2020-04-12 17:55:11 +02:00
}
2018-08-21 12:49:33 +02:00
switch {
2020-04-12 17:55:11 +02:00
case info . MimeType == driveFolderType :
2021-09-06 14:54:08 +02:00
return nil , fs . ErrorIsDir
2020-04-12 17:55:11 +02:00
case info . MimeType == shortcutMimeType :
// We can only get here if f.opt.SkipShortcuts is set
// and not from a listing. This is unlikely.
fs . Debugf ( remote , "Ignoring shortcut as skip shortcuts is set" )
return nil , fs . ErrorObjectNotFound
2020-06-27 12:10:09 +02:00
case info . MimeType == shortcutMimeTypeDangling :
// Pretend a dangling shortcut is a regular object
// It will error if used, but appear in listings so it can be deleted
return f . newRegularObject ( remote , info ) , nil
2021-07-14 11:23:35 +02:00
case info . Md5Checksum != "" :
// If item has MD5 sum it is a file stored on drive
2018-08-21 12:49:33 +02:00
return f . newRegularObject ( remote , info ) , nil
case f . opt . SkipGdocs :
fs . Debugf ( remote , "Skipping google document type %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
default :
// If item MimeType is in the ExportFormats then it is a google doc
if ! isDocument {
fs . Debugf ( remote , "Ignoring unknown document type %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
}
if extension == "" {
fs . Debugf ( remote , "No export formats found for %q" , info . MimeType )
2020-04-24 11:01:06 +02:00
return nil , fs . ErrorObjectNotFound
2018-08-21 12:49:33 +02:00
}
2018-08-21 12:51:36 +02:00
if isLinkMimeType ( exportMimeType ) {
return f . newLinkObject ( remote , info , extension , exportMimeType )
}
2018-08-21 12:49:33 +02:00
return f . newDocumentObject ( remote , info , extension , exportMimeType )
}
2018-08-19 16:16:11 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) NewObject ( ctx context . Context , remote string ) ( fs . Object , error ) {
info , extension , exportName , exportMimeType , isDocument , err := f . getRemoteInfoWithExport ( ctx , remote )
2018-08-21 12:49:33 +02:00
if err != nil {
return nil , err
}
remote = remote [ : len ( remote ) - len ( extension ) ]
2021-03-16 16:50:02 +01:00
obj , err := f . newObjectWithExportInfo ( ctx , remote , info , extension , exportName , exportMimeType , isDocument )
2018-08-21 12:49:33 +02:00
switch {
case err != nil :
return nil , err
case obj == nil :
return nil , fs . ErrorObjectNotFound
default :
return obj , nil
}
2013-01-15 00:38:18 +01:00
}
2015-09-22 19:47:16 +02:00
// FindLeaf finds a directory of name leaf in the folder with ID pathID
2019-06-17 10:34:30 +02:00
func ( f * Fs ) FindLeaf ( ctx context . Context , pathID , leaf string ) ( pathIDOut string , found bool , err error ) {
2015-09-22 19:47:16 +02:00
// Find the leaf in pathID
2020-04-12 17:55:11 +02:00
pathID = actualID ( pathID )
2020-09-14 18:31:23 +02:00
found , err = f . list ( ctx , [ ] string { pathID } , leaf , true , false , f . opt . TrashedOnly , false , func ( item * drive . File ) bool {
2018-07-24 17:14:23 +02:00
if ! f . opt . SkipGdocs {
2021-03-16 16:50:02 +01:00
_ , exportName , _ , isDocument := f . findExportFormat ( ctx , item )
2018-07-24 17:14:23 +02:00
if exportName == leaf {
pathIDOut = item . Id
return true
}
2018-08-21 12:49:33 +02:00
if isDocument {
return false
}
}
if item . Name == leaf {
pathIDOut = item . Id
return true
2018-07-24 17:14:23 +02:00
}
2015-09-03 22:25:55 +02:00
return false
} )
2015-09-22 19:47:16 +02:00
return pathIDOut , found , err
2015-09-03 22:25:55 +02:00
}
2015-09-22 19:47:16 +02:00
// CreateDir makes a directory with pathID as parent and name leaf
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CreateDir ( ctx context . Context , pathID , leaf string ) ( newID string , err error ) {
2020-01-14 18:33:35 +01:00
leaf = f . opt . Enc . FromStandardName ( leaf )
2015-09-03 22:25:55 +02:00
// fmt.Println("Making", path)
// Define the metadata for the directory we are going to create.
2020-06-17 12:32:28 +02:00
pathID = actualID ( pathID )
2015-09-03 22:25:55 +02:00
createInfo := & drive . File {
2018-01-24 00:46:41 +01:00
Name : leaf ,
2015-09-03 22:25:55 +02:00
Description : leaf ,
MimeType : driveFolderType ,
2018-01-24 00:46:41 +01:00
Parents : [ ] string { pathID } ,
2015-09-03 22:25:55 +02:00
}
var info * drive . File
2015-09-11 20:18:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
info , err = f . svc . Files . Create ( createInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( "id" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2015-09-03 22:25:55 +02:00
} )
if err != nil {
return "" , err
}
return info . Id , nil
}
2016-02-06 10:22:52 +01:00
// isAuthOwned checks if any of the item owners is the authenticated owner
func isAuthOwned ( item * drive . File ) bool {
for _ , owner := range item . Owners {
2018-01-24 00:46:41 +01:00
if owner . Me {
2016-02-06 10:22:52 +01:00
return true
}
}
return false
}
2018-08-21 12:51:36 +02:00
// linkTemplate returns the Template for a MIME type or nil if the
// MIME type does not represent a link
func linkTemplate ( mt string ) * template . Template {
templatesOnce . Do ( func ( ) {
_linkTemplates = map [ string ] * template . Template {
"application/x-link-desktop" : template . Must (
template . New ( "application/x-link-desktop" ) . Parse ( desktopTemplate ) ) ,
"application/x-link-html" : template . Must (
template . New ( "application/x-link-html" ) . Parse ( htmlTemplate ) ) ,
"application/x-link-url" : template . Must (
template . New ( "application/x-link-url" ) . Parse ( urlTemplate ) ) ,
"application/x-link-webloc" : template . Must (
template . New ( "application/x-link-webloc" ) . Parse ( weblocTemplate ) ) ,
}
} )
return _linkTemplates [ mt ]
}
2021-03-16 16:50:02 +01:00
func ( f * Fs ) fetchFormats ( ctx context . Context ) {
2018-08-19 16:16:11 +02:00
fetchFormatsOnce . Do ( func ( ) {
2018-01-31 21:03:02 +01:00
var about * drive . About
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
about , err = f . svc . About . Get ( ) .
2018-08-19 16:16:11 +02:00
Fields ( "exportFormats,importFormats" ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2018-01-31 21:03:02 +01:00
} )
if err != nil {
2018-08-19 16:16:11 +02:00
fs . Errorf ( f , "Failed to get Drive exportFormats and importFormats: %v" , err )
2018-01-31 21:03:02 +01:00
_exportFormats = map [ string ] [ ] string { }
2018-08-19 16:16:11 +02:00
_importFormats = map [ string ] [ ] string { }
2018-01-31 21:03:02 +01:00
return
}
2018-02-07 12:53:46 +01:00
_exportFormats = fixMimeTypeMap ( about . ExportFormats )
2018-08-19 16:16:11 +02:00
_importFormats = fixMimeTypeMap ( about . ImportFormats )
2018-01-31 21:03:02 +01:00
} )
2018-08-19 16:16:11 +02:00
}
// exportFormats returns the export formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not export any drive formats
2021-03-16 16:50:02 +01:00
func ( f * Fs ) exportFormats ( ctx context . Context ) map [ string ] [ ] string {
f . fetchFormats ( ctx )
2018-01-31 21:03:02 +01:00
return _exportFormats
}
2018-08-19 16:16:11 +02:00
// importFormats returns the import formats from drive, fetching them
// if necessary.
//
// if the fetch fails then it will not import any drive formats
2021-03-16 16:50:02 +01:00
func ( f * Fs ) importFormats ( ctx context . Context ) map [ string ] [ ] string {
f . fetchFormats ( ctx )
2018-08-19 16:16:11 +02:00
return _importFormats
}
// findExportFormatByMimeType works out the optimum export settings
// for the given MIME type.
2016-02-06 10:22:52 +01:00
//
2018-08-19 16:16:11 +02:00
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", false)
2021-03-16 16:50:02 +01:00
func ( f * Fs ) findExportFormatByMimeType ( ctx context . Context , itemMimeType string ) (
2018-08-19 16:16:11 +02:00
extension , mimeType string , isDocument bool ) {
2021-03-16 16:50:02 +01:00
exportMimeTypes , isDocument := f . exportFormats ( ctx ) [ itemMimeType ]
2018-07-24 17:14:23 +02:00
if isDocument {
2018-08-19 16:16:11 +02:00
for _ , _extension := range f . exportExtensions {
2018-02-07 12:53:46 +01:00
_mimeType := mime . TypeByExtension ( _extension )
2018-08-21 12:51:36 +02:00
if isLinkMimeType ( _mimeType ) {
return _extension , _mimeType , true
}
2018-07-24 17:14:23 +02:00
for _ , emt := range exportMimeTypes {
2018-02-07 12:53:46 +01:00
if emt == _mimeType {
2018-08-30 18:03:48 +02:00
return _extension , emt , true
}
if _mimeType == _mimeTypeCustomTransform [ emt ] {
return _extension , emt , true
2018-07-24 17:14:23 +02:00
}
2018-01-24 00:46:41 +01:00
}
2016-02-06 10:22:52 +01:00
}
}
2022-02-22 17:38:46 +01:00
// If using a link type export and a more specific export
// hasn't been found all docs should be exported
for _ , _extension := range f . exportExtensions {
_mimeType := mime . TypeByExtension ( _extension )
if isLinkMimeType ( _mimeType ) {
return _extension , _mimeType , true
}
}
2016-02-06 10:22:52 +01:00
// else return empty
2018-08-19 16:16:11 +02:00
return "" , "" , isDocument
}
// findExportFormatByMimeType works out the optimum export settings
// for the given drive.File.
//
// Look through the exportExtensions and find the first format that can be
// converted. If none found then return ("", "", "", false)
2021-03-16 16:50:02 +01:00
func ( f * Fs ) findExportFormat ( ctx context . Context , item * drive . File ) ( extension , filename , mimeType string , isDocument bool ) {
2022-02-22 17:38:46 +01:00
// If item has MD5 sum it is a file stored on drive
if item . Md5Checksum != "" {
return
}
// Folders can't be documents
if item . MimeType == driveFolderType {
return
}
2021-03-16 16:50:02 +01:00
extension , mimeType , isDocument = f . findExportFormatByMimeType ( ctx , item . MimeType )
2018-08-19 16:16:11 +02:00
if extension != "" {
filename = item . Name + extension
}
return
}
// findImportFormat finds the matching upload MIME type for a file
// If the given MIME type is in importMimeTypes, the matching upload
// MIME type is returned
//
// When no match is found "" is returned.
2021-03-16 16:50:02 +01:00
func ( f * Fs ) findImportFormat ( ctx context . Context , mimeType string ) string {
2018-08-19 16:16:11 +02:00
mimeType = fixMimeType ( mimeType )
2021-03-16 16:50:02 +01:00
ifs := f . importFormats ( ctx )
2018-08-19 16:16:11 +02:00
for _ , mt := range f . importMimeTypes {
if mt == mimeType {
importMimeTypes := ifs [ mimeType ]
if l := len ( importMimeTypes ) ; l > 0 {
if l > 1 {
fs . Infof ( f , "found %d import formats for %q: %q" , l , mimeType , importMimeTypes )
}
return importMimeTypes [ 0 ]
}
}
}
return ""
2016-02-06 10:22:52 +01:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) List ( ctx context . Context , dir string ) ( entries fs . DirEntries , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2017-06-11 23:43:31 +02:00
if err != nil {
return nil , err
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2017-06-11 23:43:31 +02:00
var iErr error
2020-09-14 18:31:23 +02:00
_ , err = f . list ( ctx , [ ] string { directoryID } , "" , false , false , f . opt . TrashedOnly , false , func ( item * drive . File ) bool {
2021-03-16 16:50:02 +01:00
entry , err := f . itemToDirEntry ( ctx , path . Join ( dir , item . Name ) , item )
2018-07-26 15:45:41 +02:00
if err != nil {
2018-09-01 13:16:01 +02:00
iErr = err
2018-07-26 15:45:41 +02:00
return true
}
if entry != nil {
entries = append ( entries , entry )
}
return false
} )
if err != nil {
return nil , err
}
if iErr != nil {
return nil , iErr
}
2019-11-27 17:10:24 +01:00
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f . isTeamDrive && len ( entries ) == 0 && f . root == "" && dir == "" {
err = f . teamDriveOK ( ctx )
if err != nil {
return nil , err
}
}
2018-07-26 15:45:41 +02:00
return entries , nil
}
2019-02-07 16:59:00 +01:00
// listREntry is a task to be executed by a litRRunner
type listREntry struct {
id , path string
}
// listRSlices is a helper struct to sort two slices at once
type listRSlices struct {
dirs [ ] string
paths [ ] string
}
func ( s listRSlices ) Sort ( ) {
sort . Sort ( s )
}
func ( s listRSlices ) Len ( ) int {
return len ( s . dirs )
}
func ( s listRSlices ) Swap ( i , j int ) {
s . dirs [ i ] , s . dirs [ j ] = s . dirs [ j ] , s . dirs [ i ]
s . paths [ i ] , s . paths [ j ] = s . paths [ j ] , s . paths [ i ]
}
func ( s listRSlices ) Less ( i , j int ) bool {
return s . dirs [ i ] < s . dirs [ j ]
}
2020-05-20 12:39:20 +02:00
// listRRunner will read dirIDs from the in channel, perform the file listing and call cb with each DirEntry.
2018-07-26 15:45:41 +02:00
//
2019-02-07 16:59:00 +01:00
// In each cycle it will read up to grouping entries from the in channel without blocking.
2018-07-26 15:45:41 +02:00
// If an error occurs it will be send to the out channel and then return. Once the in channel is closed,
// nil is send to the out channel and the function returns.
2020-08-20 12:47:27 +02:00
func ( f * Fs ) listRRunner ( ctx context . Context , wg * sync . WaitGroup , in chan listREntry , out chan <- error , cb func ( fs . DirEntry ) error , sendJob func ( listREntry ) ) {
2018-07-26 15:45:41 +02:00
var dirs [ ] string
2019-02-07 16:59:00 +01:00
var paths [ ] string
2020-05-30 13:08:22 +02:00
var grouping int32
2018-07-26 15:45:41 +02:00
for dir := range in {
2019-02-07 16:59:00 +01:00
dirs = append ( dirs [ : 0 ] , dir . id )
paths = append ( paths [ : 0 ] , dir . path )
2020-05-30 13:08:22 +02:00
grouping = atomic . LoadInt32 ( & f . grouping )
2018-07-26 15:45:41 +02:00
waitloop :
2020-05-30 13:08:22 +02:00
for i := int32 ( 1 ) ; i < grouping ; i ++ {
2018-07-26 15:45:41 +02:00
select {
case d , ok := <- in :
if ! ok {
break waitloop
}
2019-02-07 16:59:00 +01:00
dirs = append ( dirs , d . id )
paths = append ( paths , d . path )
default :
2018-07-26 15:45:41 +02:00
}
}
2019-02-07 16:59:00 +01:00
listRSlices { dirs , paths } . Sort ( )
2018-07-26 15:45:41 +02:00
var iErr error
2020-05-30 13:08:22 +02:00
foundItems := false
2020-09-14 18:31:23 +02:00
_ , err := f . list ( ctx , dirs , "" , false , false , f . opt . TrashedOnly , false , func ( item * drive . File ) bool {
2020-03-06 16:28:36 +01:00
// shared with me items have no parents when at the root
if f . opt . SharedWithMe && len ( item . Parents ) == 0 && len ( paths ) == 1 && paths [ 0 ] == "" {
item . Parents = dirs
}
2019-02-07 16:59:00 +01:00
for _ , parent := range item . Parents {
2020-01-04 13:59:05 +01:00
var i int
2020-05-30 13:08:22 +02:00
foundItems = true
2020-03-06 16:31:23 +01:00
earlyExit := false
2020-01-04 13:59:05 +01:00
// If only one item in paths then no need to search for the ID
// assuming google drive is doing its job properly.
//
// Note that we at the root when len(paths) == 1 && paths[0] == ""
if len ( paths ) == 1 {
// don't check parents at root because
// - shared with me items have no parents at the root
2020-10-13 23:49:58 +02:00
// - if using a root alias, e.g. "root" or "appDataFolder" the ID won't match
2020-01-04 13:59:05 +01:00
i = 0
2020-03-06 16:31:23 +01:00
// items at root can have more than one parent so we need to put
// the item in just once.
earlyExit = true
2020-01-04 13:59:05 +01:00
} else {
// only handle parents that are in the requested dirs list if not at root
i = sort . SearchStrings ( dirs , parent )
if i == len ( dirs ) || dirs [ i ] != parent {
continue
}
2019-02-07 16:59:00 +01:00
}
remote := path . Join ( paths [ i ] , item . Name )
2021-03-16 16:50:02 +01:00
entry , err := f . itemToDirEntry ( ctx , remote , item )
2019-02-07 16:59:00 +01:00
if err != nil {
iErr = err
return true
2018-07-26 15:45:41 +02:00
}
2019-02-07 16:59:00 +01:00
err = cb ( entry )
if err != nil {
iErr = err
return true
}
2020-03-06 16:31:23 +01:00
// If didn't check parents then insert only once
if earlyExit {
break
}
2018-01-31 21:03:02 +01:00
}
2018-07-26 15:45:41 +02:00
return false
} )
2020-05-30 13:08:22 +02:00
// Found no items in more than one directory. Retry these as
// individual directories This is to work around a bug in google
// drive where (A in parents) or (B in parents) returns nothing
// sometimes. See #3114, #4289 and
// https://issuetracker.google.com/issues/149522397
if len ( dirs ) > 1 && ! foundItems {
if atomic . SwapInt32 ( & f . grouping , 1 ) != 1 {
2020-07-25 17:50:55 +02:00
fs . Debugf ( f , "Disabling ListR to work around bug in drive as multi listing (%d) returned no entries" , len ( dirs ) )
2020-05-30 13:08:22 +02:00
}
f . listRmu . Lock ( )
for i := range dirs {
2020-08-20 12:47:27 +02:00
// Requeue the jobs
job := listREntry { id : dirs [ i ] , path : paths [ i ] }
sendJob ( job )
2020-05-30 13:08:22 +02:00
// Make a note of these dirs - if they all turn
// out to be empty then we can re-enable grouping
f . listRempties [ dirs [ i ] ] = struct { } { }
}
f . listRmu . Unlock ( )
2020-08-20 12:47:27 +02:00
fs . Debugf ( f , "Recycled %d entries" , len ( dirs ) )
2020-05-30 13:08:22 +02:00
}
// If using a grouping of 1 and dir was empty then check to see if it
// is part of the group that caused grouping to be disabled.
if grouping == 1 && len ( dirs ) == 1 && ! foundItems {
f . listRmu . Lock ( )
if _ , found := f . listRempties [ dirs [ 0 ] ] ; found {
// Remove the ID
delete ( f . listRempties , dirs [ 0 ] )
// If no empties left => all the directories that
// triggered the grouping being set to 1 were actually
// empty so must have made a mistake
if len ( f . listRempties ) == 0 {
if atomic . SwapInt32 ( & f . grouping , listRGrouping ) != listRGrouping {
2020-07-25 17:50:55 +02:00
fs . Debugf ( f , "Re-enabling ListR as previous detection was in error" )
2020-05-30 13:08:22 +02:00
}
}
}
f . listRmu . Unlock ( )
}
2018-07-26 15:45:41 +02:00
for range dirs {
wg . Done ( )
2013-01-15 00:38:18 +01:00
}
2018-07-26 15:45:41 +02:00
if iErr != nil {
out <- iErr
return
}
if err != nil {
out <- err
return
}
}
out <- nil
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ListR ( ctx context . Context , dir string , callback fs . ListRCallback ) ( err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
2018-07-26 15:45:41 +02:00
if err != nil {
return err
2017-06-11 23:43:31 +02:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2018-07-26 15:45:41 +02:00
mu := sync . Mutex { } // protects in and overflow
wg := sync . WaitGroup { }
2020-05-30 13:08:22 +02:00
in := make ( chan listREntry , listRInputBuffer )
2020-11-05 12:33:32 +01:00
out := make ( chan error , f . ci . Checkers )
2018-07-26 15:45:41 +02:00
list := walk . NewListRHelper ( callback )
2019-02-07 18:41:17 +01:00
overflow := [ ] listREntry { }
2019-11-27 17:10:24 +01:00
listed := 0
2018-07-26 15:45:41 +02:00
2020-08-20 12:47:27 +02:00
// Send a job to the input channel if not closed. If the job
// won't fit then queue it in the overflow slice.
//
// This will not block if the channel is full.
sendJob := func ( job listREntry ) {
2018-07-26 15:45:41 +02:00
mu . Lock ( )
defer mu . Unlock ( )
2020-08-20 12:47:27 +02:00
if in == nil {
return
}
wg . Add ( 1 )
select {
case in <- job :
default :
overflow = append ( overflow , job )
wg . Add ( - 1 )
}
}
// Send the entry to the caller, queueing any directories as new jobs
cb := func ( entry fs . DirEntry ) error {
if d , isDir := entry . ( * fs . Dir ) ; isDir {
2020-04-12 17:55:11 +02:00
job := listREntry { actualID ( d . ID ( ) ) , d . Remote ( ) }
2020-08-20 12:47:27 +02:00
sendJob ( job )
2018-07-26 15:45:41 +02:00
}
2020-08-20 12:47:27 +02:00
mu . Lock ( )
defer mu . Unlock ( )
2019-11-27 17:10:24 +01:00
listed ++
2018-07-26 15:45:41 +02:00
return list . Add ( entry )
}
wg . Add ( 1 )
2019-02-07 16:59:00 +01:00
in <- listREntry { directoryID , dir }
2018-07-26 15:45:41 +02:00
2020-11-05 12:33:32 +01:00
for i := 0 ; i < f . ci . Checkers ; i ++ {
2020-08-20 12:47:27 +02:00
go f . listRRunner ( ctx , & wg , in , out , cb , sendJob )
2018-07-26 15:45:41 +02:00
}
go func ( ) {
// wait until the all directories are processed
wg . Wait ( )
// if the input channel overflowed add the collected entries to the channel now
2019-02-07 18:41:17 +01:00
for len ( overflow ) > 0 {
2018-07-26 15:45:41 +02:00
mu . Lock ( )
2019-02-07 18:41:17 +01:00
l := len ( overflow )
2020-05-20 12:39:20 +02:00
// only fill half of the channel to prevent entries being put into overflow again
2020-05-30 13:08:22 +02:00
if l > listRInputBuffer / 2 {
l = listRInputBuffer / 2
2018-07-26 15:45:41 +02:00
}
wg . Add ( l )
2019-02-07 18:41:17 +01:00
for _ , d := range overflow [ : l ] {
2018-07-26 15:45:41 +02:00
in <- d
}
2019-02-07 18:41:17 +01:00
overflow = overflow [ l : ]
2018-07-26 15:45:41 +02:00
mu . Unlock ( )
// wait again for the completion of all directories
wg . Wait ( )
}
mu . Lock ( )
if in != nil {
// notify all workers to exit
close ( in )
in = nil
}
mu . Unlock ( )
} ( )
// wait until the all workers to finish
2020-11-05 12:33:32 +01:00
for i := 0 ; i < f . ci . Checkers ; i ++ {
2018-07-26 15:45:41 +02:00
e := <- out
mu . Lock ( )
// if one worker returns an error early, close the input so all other workers exit
if e != nil && in != nil {
err = e
close ( in )
in = nil
}
mu . Unlock ( )
}
close ( out )
if err != nil {
return err
}
2019-11-27 17:10:24 +01:00
err = list . Flush ( )
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f . isTeamDrive && listed == 0 && f . root == "" && dir == "" {
err = f . teamDriveOK ( ctx )
if err != nil {
return err
}
}
return nil
2018-07-26 15:45:41 +02:00
}
2020-04-12 17:55:11 +02:00
const shortcutSeparator = '\t'
// joinID adds an actual drive ID to the shortcut ID it came from
//
// directoryIDs in the dircache are these composite directory IDs so
// we must always unpack them before use.
func joinID ( actual , shortcut string ) string {
return actual + string ( shortcutSeparator ) + shortcut
}
// splitID separates an actual ID and a shortcut ID from a composite
// ID. If there was no shortcut ID then it will return "" for it.
func splitID ( compositeID string ) ( actualID , shortcutID string ) {
i := strings . IndexRune ( compositeID , shortcutSeparator )
if i < 0 {
return compositeID , ""
}
return compositeID [ : i ] , compositeID [ i + 1 : ]
}
// isShortcutID returns true if compositeID refers to a shortcut
func isShortcutID ( compositeID string ) bool {
2022-06-08 22:25:17 +02:00
return strings . ContainsRune ( compositeID , shortcutSeparator )
2020-04-12 17:55:11 +02:00
}
// actualID returns an actual ID from a composite ID
func actualID ( compositeID string ) ( actualID string ) {
actualID , _ = splitID ( compositeID )
return actualID
}
// shortcutID returns a shortcut ID from a composite ID if available,
// or the actual ID if not.
func shortcutID ( compositeID string ) ( shortcutID string ) {
actualID , shortcutID := splitID ( compositeID )
if shortcutID != "" {
return shortcutID
}
return actualID
}
// isShortcut returns true of the item is a shortcut
func isShortcut ( item * drive . File ) bool {
return item . MimeType == shortcutMimeType && item . ShortcutDetails != nil
}
// Dereference shortcut if required. It returns the newItem (which may
// be just item).
//
// If we return a new item then the ID will be adjusted to be a
// composite of the actual ID and the shortcut ID. This is to make
// sure that we have decided in all use places what we are doing with
// the ID.
//
// Note that we assume shortcuts can't point to shortcuts. Google
// drive web interface doesn't offer the option to create a shortcut
// to a shortcut. The documentation is silent on the issue.
2021-03-16 16:50:02 +01:00
func ( f * Fs ) resolveShortcut ( ctx context . Context , item * drive . File ) ( newItem * drive . File , err error ) {
2020-04-12 17:55:11 +02:00
if f . opt . SkipShortcuts || item . MimeType != shortcutMimeType {
return item , nil
}
if item . ShortcutDetails == nil {
fs . Errorf ( nil , "Expecting shortcutDetails in %v" , item )
return item , nil
}
2021-03-16 16:50:02 +01:00
newItem , err = f . getFile ( ctx , item . ShortcutDetails . TargetId , f . fileFields )
2020-04-12 17:55:11 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
var gerr * googleapi . Error
if errors . As ( err , & gerr ) && gerr . Code == 404 {
2020-06-27 12:10:09 +02:00
// 404 means dangling shortcut, so just return the shortcut with the mime type mangled
fs . Logf ( nil , "Dangling shortcut %q detected" , item . Name )
item . MimeType = shortcutMimeTypeDangling
return item , nil
}
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to resolve shortcut: %w" , err )
2020-04-12 17:55:11 +02:00
}
2020-06-11 20:23:50 +02:00
// make sure we use the Name, Parents and Trashed from the original item
2020-04-12 17:55:11 +02:00
newItem . Name = item . Name
2020-05-11 13:32:50 +02:00
newItem . Parents = item . Parents
2020-06-11 20:23:50 +02:00
newItem . Trashed = item . Trashed
2020-04-12 17:55:11 +02:00
// the new ID is a composite ID
newItem . Id = joinID ( newItem . Id , item . Id )
return newItem , nil
}
2020-05-20 12:39:20 +02:00
// itemToDirEntry converts a drive.File to an fs.DirEntry.
// When the drive.File cannot be represented as an fs.DirEntry
2018-08-21 12:49:33 +02:00
// (nil, nil) is returned.
2021-03-16 16:50:02 +01:00
func ( f * Fs ) itemToDirEntry ( ctx context . Context , remote string , item * drive . File ) ( entry fs . DirEntry , err error ) {
2018-07-26 15:45:41 +02:00
switch {
case item . MimeType == driveFolderType :
// cache the directory ID for later lookups
f . dirCache . Put ( remote , item . Id )
2022-05-23 18:24:53 +02:00
// cache the resource key for later lookups
if item . ResourceKey != "" {
f . dirResourceKeys . Store ( item . Id , item . ResourceKey )
}
2018-07-26 15:45:41 +02:00
when , _ := time . Parse ( timeFormatIn , item . ModifiedTime )
d := fs . NewDir ( remote , when ) . SetID ( item . Id )
2021-03-11 18:40:29 +01:00
if len ( item . Parents ) > 0 {
d . SetParentID ( item . Parents [ 0 ] )
}
2018-07-26 15:45:41 +02:00
return d , nil
case f . opt . AuthOwnerOnly && ! isAuthOwned ( item ) :
// ignore object
default :
2021-03-16 16:50:02 +01:00
entry , err = f . newObjectWithInfo ( ctx , remote , item )
2020-04-24 11:01:06 +02:00
if err == fs . ErrorObjectNotFound {
return nil , nil
}
return entry , err
2018-07-26 15:45:41 +02:00
}
return nil , nil
2013-01-23 23:43:20 +01:00
}
2018-08-21 12:49:33 +02:00
// Creates a drive.File info from the parameters passed in.
2013-01-15 00:38:18 +01:00
//
2015-02-14 19:48:08 +01:00
// Used to create new objects
2019-06-17 10:34:30 +02:00
func ( f * Fs ) createFileInfo ( ctx context . Context , remote string , modTime time . Time ) ( * drive . File , error ) {
2020-05-11 18:24:37 +02:00
leaf , directoryID , err := f . dirCache . FindPath ( ctx , remote , true )
2014-04-18 18:46:57 +02:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2014-04-18 18:46:57 +02:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2014-04-18 18:46:57 +02:00
2020-01-14 18:33:35 +01:00
leaf = f . opt . Enc . FromStandardName ( leaf )
2014-04-18 18:46:57 +02:00
// Define the metadata for the file we are going to create.
2015-02-02 18:29:08 +01:00
createInfo := & drive . File {
2018-01-24 00:46:41 +01:00
Name : leaf ,
2014-04-18 18:46:57 +02:00
Description : leaf ,
2018-01-24 00:46:41 +01:00
Parents : [ ] string { directoryID } ,
ModifiedTime : modTime . Format ( timeFormatOut ) ,
2014-04-18 18:46:57 +02:00
}
2018-08-21 12:49:33 +02:00
return createInfo , nil
2015-02-14 19:48:08 +01:00
}
// Put the object
//
2022-08-05 17:35:41 +02:00
// Copy the reader in to the new object which is returned.
2015-02-14 19:48:08 +01:00
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Put ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
existingObj , err := f . NewObject ( ctx , src . Remote ( ) )
2016-06-12 16:06:27 +02:00
switch err {
case nil :
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
return existingObj , existingObj . Update ( ctx , in , src , options ... )
2016-06-25 22:23:20 +02:00
case fs . ErrorObjectNotFound :
2016-06-12 16:06:27 +02:00
// Not found so create it
2019-06-17 10:34:30 +02:00
return f . PutUnchecked ( ctx , in , src , options ... )
2016-06-12 16:06:27 +02:00
default :
return nil , err
}
}
2017-08-03 21:42:35 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutStream ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( ctx , in , src , options ... )
2017-08-03 21:42:35 +02:00
}
2016-06-12 16:06:27 +02:00
// PutUnchecked uploads the object
//
// This will create a duplicate if we upload a new file without
// checking to see if there is one already - use Put() for that.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) PutUnchecked ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2016-02-18 12:35:25 +01:00
remote := src . Remote ( )
size := src . Size ( )
2019-06-17 10:34:30 +02:00
modTime := src . ModTime ( ctx )
2018-08-19 16:16:11 +02:00
srcMimeType := fs . MimeTypeFromName ( remote )
srcExt := path . Ext ( remote )
exportExt := ""
importMimeType := ""
if f . importMimeTypes != nil && ! f . opt . SkipGdocs {
2021-03-16 16:50:02 +01:00
importMimeType = f . findImportFormat ( ctx , srcMimeType )
2018-08-19 16:16:11 +02:00
if isInternalMimeType ( importMimeType ) {
remote = remote [ : len ( remote ) - len ( srcExt ) ]
2021-03-16 16:50:02 +01:00
exportExt , _ , _ = f . findExportFormatByMimeType ( ctx , importMimeType )
2018-08-19 16:16:11 +02:00
if exportExt == "" {
2022-06-08 22:54:39 +02:00
return nil , fmt . Errorf ( "no export format found for %q" , importMimeType )
2018-08-19 16:16:11 +02:00
}
if exportExt != srcExt && ! f . opt . AllowImportNameChange {
2022-06-08 22:54:39 +02:00
return nil , fmt . Errorf ( "can't convert %q to a document with a different export filetype (%q)" , srcExt , exportExt )
2018-08-19 16:16:11 +02:00
}
}
}
2016-02-18 12:35:25 +01:00
2019-06-17 10:34:30 +02:00
createInfo , err := f . createFileInfo ( ctx , remote , modTime )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2018-08-19 16:16:11 +02:00
if importMimeType != "" {
createInfo . MimeType = importMimeType
2019-07-04 09:13:27 +02:00
} else {
createInfo . MimeType = fs . MimeTypeFromName ( remote )
2018-08-19 16:16:11 +02:00
}
2014-04-18 18:46:57 +02:00
2015-02-02 18:29:08 +01:00
var info * drive . File
2019-05-11 11:03:51 +02:00
if size >= 0 && size < int64 ( f . opt . UploadCutoff ) {
2015-03-02 10:05:23 +01:00
// Make the API request to upload metadata and file data.
// Don't retry, return a retry error instead
2015-09-11 20:18:41 +02:00
err = f . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
info , err = f . svc . Files . Create ( createInfo ) .
2021-07-13 00:29:31 +02:00
Media ( in , googleapi . ContentType ( srcMimeType ) , googleapi . ChunkSize ( 0 ) ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
KeepRevisionForever ( f . opt . KeepRevisionForever ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2015-09-11 20:18:41 +02:00
} )
2015-03-02 10:05:23 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2015-03-02 10:05:23 +01:00
}
} else {
// Upload the file in chunks
2019-09-04 21:21:10 +02:00
info , err = f . Upload ( ctx , in , size , srcMimeType , "" , remote , createInfo )
2015-03-02 10:05:23 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , err
2015-03-02 10:05:23 +01:00
}
2014-04-18 18:46:57 +02:00
}
2021-03-16 16:50:02 +01:00
return f . newObjectWithInfo ( ctx , remote , info )
2013-01-15 00:38:18 +01:00
}
2017-08-02 17:51:24 +02:00
// MergeDirs merges the contents of all the directories passed
// in into the first one and rmdirs the other directories.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) MergeDirs ( ctx context . Context , dirs [ ] fs . Directory ) error {
2020-04-12 17:55:11 +02:00
if len ( dirs ) < 2 {
return nil
}
newDirs := dirs [ : 0 ]
for _ , dir := range dirs {
if isShortcutID ( dir . ID ( ) ) {
fs . Infof ( dir , "skipping shortcut directory" )
continue
}
newDirs = append ( newDirs , dir )
}
dirs = newDirs
2017-08-02 17:51:24 +02:00
if len ( dirs ) < 2 {
return nil
}
dstDir := dirs [ 0 ]
for _ , srcDir := range dirs [ 1 : ] {
2020-05-25 08:05:53 +02:00
// list the objects
2017-08-02 17:51:24 +02:00
infos := [ ] * drive . File { }
2020-09-14 18:31:23 +02:00
_ , err := f . list ( ctx , [ ] string { srcDir . ID ( ) } , "" , false , false , f . opt . TrashedOnly , true , func ( info * drive . File ) bool {
2017-08-02 17:51:24 +02:00
infos = append ( infos , info )
return false
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "MergeDirs list failed on %v: %w" , srcDir , err )
2017-08-02 17:51:24 +02:00
}
// move them into place
for _ , info := range infos {
2018-01-24 00:46:41 +01:00
fs . Infof ( srcDir , "merging %q" , info . Name )
2017-08-02 17:51:24 +02:00
// Move the file into the destination
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
_ , err = f . svc . Files . Update ( info . Id , nil ) .
RemoveParents ( srcDir . ID ( ) ) .
AddParents ( dstDir . ID ( ) ) .
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2017-08-02 17:51:24 +02:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "MergeDirs move failed on %q in %v: %w" , info . Name , srcDir , err )
2017-08-02 17:51:24 +02:00
}
}
// rmdir (into trash) the now empty source directory
2018-04-12 19:01:57 +02:00
fs . Infof ( srcDir , "removing empty directory" )
2020-04-12 17:55:11 +02:00
err = f . delete ( ctx , srcDir . ID ( ) , true )
2017-08-02 17:51:24 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "MergeDirs move failed to rmdir %q: %w" , srcDir , err )
2017-08-02 17:51:24 +02:00
}
}
return nil
}
2013-01-15 00:38:18 +01:00
// Mkdir creates the container if it doesn't exist
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Mkdir ( ctx context . Context , dir string ) error {
2020-05-11 18:24:37 +02:00
_ , err := f . dirCache . FindDir ( ctx , dir , true )
2016-11-25 22:52:43 +01:00
return err
2013-01-15 00:38:18 +01:00
}
2020-04-12 17:55:11 +02:00
// delete a file or directory unconditionally by ID
func ( f * Fs ) delete ( ctx context . Context , id string , useTrash bool ) error {
2017-08-02 17:51:24 +02:00
return f . pacer . Call ( func ( ) ( bool , error ) {
var err error
if useTrash {
2018-01-24 00:46:41 +01:00
info := drive . File {
Trashed : true ,
}
2020-04-12 17:55:11 +02:00
_ , err = f . svc . Files . Update ( id , & info ) .
2018-08-05 12:27:17 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2017-08-02 17:51:24 +02:00
} else {
2020-04-12 17:55:11 +02:00
err = f . svc . Files . Delete ( id ) .
2018-08-05 12:27:17 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2017-08-02 17:51:24 +02:00
}
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2017-08-02 17:51:24 +02:00
} )
}
2020-06-04 23:25:14 +02:00
// purgeCheck removes the dir directory, if check is set then it
// refuses to do so if it has anything in
func ( f * Fs ) purgeCheck ( ctx context . Context , dir string , check bool ) error {
2016-11-25 22:52:43 +01:00
root := path . Join ( f . root , dir )
dc := f . dirCache
2019-06-17 10:34:30 +02:00
directoryID , err := dc . FindDir ( ctx , dir , false )
2013-01-15 00:38:18 +01:00
if err != nil {
return err
}
2020-04-12 17:55:11 +02:00
directoryID , shortcutID := splitID ( directoryID )
// if directory is a shortcut remove it regardless
if shortcutID != "" {
return f . delete ( ctx , shortcutID , f . opt . UseTrash )
}
2017-02-16 13:29:37 +01:00
var trashedFiles = false
2020-06-04 23:25:14 +02:00
if check {
2020-09-14 18:31:23 +02:00
found , err := f . list ( ctx , [ ] string { directoryID } , "" , false , false , f . opt . TrashedOnly , true , func ( item * drive . File ) bool {
2020-06-04 23:25:14 +02:00
if ! item . Trashed {
fs . Debugf ( dir , "Rmdir: contains file: %q" , item . Name )
return true
}
fs . Debugf ( dir , "Rmdir: contains trashed file: %q" , item . Name )
trashedFiles = true
return false
} )
if err != nil {
return err
}
if found {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "directory not empty" )
2017-02-16 13:29:37 +01:00
}
2013-01-15 00:38:18 +01:00
}
2016-11-25 22:52:43 +01:00
if root != "" {
2017-08-02 17:51:24 +02:00
// trash the directory if it had trashed files
// in or the user wants to trash, otherwise
// delete it.
2020-04-12 17:55:11 +02:00
err = f . delete ( ctx , directoryID , trashedFiles || f . opt . UseTrash )
2013-01-18 18:01:47 +01:00
if err != nil {
return err
}
2020-06-04 23:25:14 +02:00
} else if check {
return errors . New ( "can't purge root directory" )
2013-01-18 18:01:47 +01:00
}
2016-11-25 22:52:43 +01:00
f . dirCache . FlushDir ( dir )
if err != nil {
return err
}
2013-01-18 18:01:47 +01:00
return nil
}
2020-06-04 23:25:14 +02:00
// Rmdir deletes a directory
//
// Returns an error if it isn't empty
func ( f * Fs ) Rmdir ( ctx context . Context , dir string ) error {
return f . purgeCheck ( ctx , dir , true )
}
2015-09-22 19:47:16 +02:00
// Precision of the object storage system
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2013-01-19 00:21:02 +01:00
return time . Millisecond
}
2020-10-13 23:43:40 +02:00
// Copy src to this remote using server-side copy operations.
2015-02-14 19:48:08 +01:00
//
2022-08-05 17:35:41 +02:00
// This is stored with the remote path given.
2015-02-14 19:48:08 +01:00
//
2022-08-05 17:35:41 +02:00
// It returns the destination Object and a possible error.
2015-02-14 19:48:08 +01:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Copy ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-08-21 12:49:33 +02:00
var srcObj * baseObject
ext := ""
2020-08-18 20:36:50 +02:00
isDoc := false
2018-08-21 12:49:33 +02:00
switch src := src . ( type ) {
case * Object :
srcObj = & src . baseObject
case * documentObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2020-08-18 20:36:50 +02:00
isDoc = true
2018-08-21 12:51:36 +02:00
case * linkObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:49:33 +02:00
default :
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-02-14 19:48:08 +01:00
return nil , fs . ErrorCantCopy
}
2018-08-21 12:49:33 +02:00
2020-08-19 12:04:12 +02:00
// Look to see if there is an existing object before we remove
// the extension from the remote
existingObject , _ := f . NewObject ( ctx , remote )
// Adjust the remote name to be without the extension if we
// are about to create a doc.
2018-08-21 12:49:33 +02:00
if ext != "" {
if ! strings . HasSuffix ( remote , ext ) {
fs . Debugf ( src , "Can't copy - not same document type" )
return nil , fs . ErrorCantCopy
}
remote = remote [ : len ( remote ) - len ( ext ) ]
2016-02-28 10:35:28 +01:00
}
2015-02-14 19:48:08 +01:00
2019-06-17 10:34:30 +02:00
createInfo , err := f . createFileInfo ( ctx , remote , src . ModTime ( ctx ) )
2015-02-14 19:48:08 +01:00
if err != nil {
return nil , err
}
2020-08-18 20:36:50 +02:00
if isDoc {
2020-02-28 17:45:35 +01:00
// preserve the description on copy for docs
2021-03-16 16:50:02 +01:00
info , err := f . getFile ( ctx , actualID ( srcObj . id ) , "description" )
2020-02-28 17:45:35 +01:00
if err != nil {
2020-09-10 21:41:26 +02:00
fs . Errorf ( srcObj , "Failed to read description for Google Doc: %v" , err )
} else {
createInfo . Description = info . Description
2020-02-28 17:45:35 +01:00
}
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive
createInfo . Description = ""
}
2022-02-04 12:37:58 +01:00
// get the ID of the thing to copy
// copy the contents if CopyShortcutContent
// else copy the shortcut only
2020-04-12 17:55:11 +02:00
id := shortcutID ( srcObj . id )
2022-02-04 12:37:58 +01:00
if f . opt . CopyShortcutContent {
id = actualID ( srcObj . id )
}
2015-02-14 19:48:08 +01:00
var info * drive . File
2018-08-21 12:49:33 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2022-05-16 18:20:48 +02:00
copy := f . svc . Files . Copy ( id , createInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2022-05-16 18:20:48 +02:00
KeepRevisionForever ( f . opt . KeepRevisionForever )
srcObj . addResourceKey ( copy . Header ( ) )
info , err = copy . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2015-02-14 19:48:08 +01:00
} )
if err != nil {
return nil , err
}
2021-03-16 16:50:02 +01:00
newObject , err := f . newObjectWithInfo ( ctx , remote , info )
2019-03-25 18:12:10 +01:00
if err != nil {
return nil , err
}
2020-08-18 20:36:50 +02:00
// Google docs aren't preserving their mod time after copy, so set them explicitly
// See: https://github.com/rclone/rclone/issues/4517
//
// FIXME remove this when google fixes the problem!
if isDoc {
// A short sleep is needed here in order to make the
// change effective, without it is is ignored. This is
// probably some eventual consistency nastiness.
sleepTime := 2 * time . Second
2020-08-19 12:04:12 +02:00
fs . Debugf ( f , "Sleeping for %v before setting the modtime to work around drive bug - see #4517" , sleepTime )
2020-08-18 20:36:50 +02:00
time . Sleep ( sleepTime )
err = newObject . SetModTime ( ctx , src . ModTime ( ctx ) )
if err != nil {
return nil , err
}
}
2019-03-25 18:12:10 +01:00
if existingObject != nil {
2019-06-17 10:34:30 +02:00
err = existingObject . Remove ( ctx )
2019-03-25 18:12:10 +01:00
if err != nil {
fs . Errorf ( existingObject , "Failed to remove existing object after copy: %v" , err )
}
}
return newObject , nil
2015-02-14 19:48:08 +01:00
}
2013-01-18 18:01:47 +01:00
// Purge deletes all the files and the container
//
2014-07-13 10:30:14 +02:00
// Optional interface: Only implement this if you have a way of
// deleting all the files quicker than just running Remove() on the
// result of List()
2020-06-04 23:25:14 +02:00
func ( f * Fs ) Purge ( ctx context . Context , dir string ) error {
2019-08-14 14:34:52 +02:00
if f . opt . TrashedOnly {
2022-06-08 22:54:39 +02:00
return errors . New ( "can't purge with --drive-trashed-only, use delete if you want to selectively delete files" )
2019-08-14 14:34:52 +02:00
}
2020-06-04 23:25:14 +02:00
return f . purgeCheck ( ctx , dir , false )
2013-01-15 00:38:18 +01:00
}
2020-09-14 18:31:23 +02:00
type cleanupResult struct {
Errors int
}
func ( r cleanupResult ) Error ( ) string {
return fmt . Sprintf ( "%d errors during cleanup - see log" , r . Errors )
}
func ( f * Fs ) cleanupTeamDrive ( ctx context . Context , dir string , directoryID string ) ( r cleanupResult , err error ) {
_ , err = f . list ( ctx , [ ] string { directoryID } , "" , false , false , true , false , func ( item * drive . File ) bool {
remote := path . Join ( dir , item . Name )
if item . ExplicitlyTrashed { // description is wrong - can also be set for folders - no need to recurse them
err := f . delete ( ctx , item . Id , false )
if err != nil {
r . Errors ++
fs . Errorf ( remote , "%v" , err )
}
return false
}
if item . MimeType == driveFolderType {
if ! isShortcutID ( item . Id ) {
rNew , _ := f . cleanupTeamDrive ( ctx , remote , item . Id )
r . Errors += rNew . Errors
}
return false
}
return false
} )
if err != nil {
2021-11-04 11:12:57 +01:00
err = fmt . Errorf ( "failed to list directory: %w" , err )
2020-09-14 18:31:23 +02:00
r . Errors ++
fs . Errorf ( dir , "%v" , err )
}
if r . Errors != 0 {
return r , r
}
return r , nil
}
2017-09-01 19:54:14 +02:00
// CleanUp empties the trash
2019-06-17 10:34:30 +02:00
func ( f * Fs ) CleanUp ( ctx context . Context ) error {
2020-09-14 18:31:23 +02:00
if f . isTeamDrive {
directoryID , err := f . dirCache . FindDir ( ctx , "" , false )
if err != nil {
return err
}
directoryID = actualID ( directoryID )
_ , err = f . cleanupTeamDrive ( ctx , "" , directoryID )
return err
}
2017-09-01 19:54:14 +02:00
err := f . pacer . Call ( func ( ) ( bool , error ) {
2021-03-17 17:45:21 +01:00
err := f . svc . Files . EmptyTrash ( ) . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2017-09-01 19:54:14 +02:00
} )
if err != nil {
return err
}
2021-01-09 14:14:07 +01:00
fs . Logf ( f , "Note that emptying the trash happens in the background and can take some time." )
2017-09-01 19:54:14 +02:00
return nil
}
2019-11-27 17:10:24 +01:00
// teamDriveOK checks to see if we can access the team drive
func ( f * Fs ) teamDriveOK ( ctx context . Context ) ( err error ) {
if ! f . isTeamDrive {
return nil
}
var td * drive . Drive
err = f . pacer . Call ( func ( ) ( bool , error ) {
2021-03-17 17:45:21 +01:00
td , err = f . svc . Drives . Get ( f . opt . TeamDriveID ) . Fields ( "name,id,capabilities,createdTime,restrictions" ) . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2019-11-27 17:10:24 +01:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "failed to get Shared Drive info: %w" , err )
2019-11-27 17:10:24 +01:00
}
2021-02-07 12:38:17 +01:00
fs . Debugf ( f , "read info from Shared Drive %q" , td . Name )
2019-11-27 17:10:24 +01:00
return err
}
2018-02-09 21:48:32 +01:00
// About gets quota information
2019-06-17 10:34:30 +02:00
func ( f * Fs ) About ( ctx context . Context ) ( * fs . Usage , error ) {
2018-05-03 09:51:08 +02:00
if f . isTeamDrive {
2019-11-27 17:10:24 +01:00
err := f . teamDriveOK ( ctx )
if err != nil {
return nil , err
}
2018-05-03 09:51:08 +02:00
// Teamdrives don't appear to have a usage API so just return empty
return & fs . Usage { } , nil
}
2018-02-09 21:48:32 +01:00
var about * drive . About
var err error
err = f . pacer . Call ( func ( ) ( bool , error ) {
2021-03-17 17:45:21 +01:00
about , err = f . svc . About . Get ( ) . Fields ( "storageQuota" ) . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2018-02-09 21:48:32 +01:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to get Drive storageQuota: %w" , err )
2018-02-09 21:48:32 +01:00
}
2018-04-16 23:19:25 +02:00
q := about . StorageQuota
usage := & fs . Usage {
Used : fs . NewUsageValue ( q . UsageInDrive ) , // bytes in use
Trashed : fs . NewUsageValue ( q . UsageInDriveTrash ) , // bytes in trash
2020-10-13 23:49:58 +02:00
Other : fs . NewUsageValue ( q . Usage - q . UsageInDrive ) , // other usage e.g. gmail in drive
2018-04-16 23:19:25 +02:00
}
if q . Limit > 0 {
usage . Total = fs . NewUsageValue ( q . Limit ) // quota of bytes that can be used
usage . Free = fs . NewUsageValue ( q . Limit - q . Usage ) // bytes which can be uploaded before reaching the quota
}
return usage , nil
2018-02-09 21:48:32 +01:00
}
2020-10-13 23:43:40 +02:00
// Move src to this remote using server-side move operations.
2015-08-31 22:05:51 +02:00
//
2022-08-05 17:35:41 +02:00
// This is stored with the remote path given.
2015-08-31 22:05:51 +02:00
//
2022-08-05 17:35:41 +02:00
// It returns the destination Object and a possible error.
2015-08-31 22:05:51 +02:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantMove
2019-06-17 10:34:30 +02:00
func ( f * Fs ) Move ( ctx context . Context , src fs . Object , remote string ) ( fs . Object , error ) {
2018-08-21 12:49:33 +02:00
var srcObj * baseObject
ext := ""
switch src := src . ( type ) {
case * Object :
srcObj = & src . baseObject
case * documentObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:51:36 +02:00
case * linkObject :
2018-09-01 13:16:01 +02:00
srcObj , ext = & src . baseObject , src . ext ( )
2018-08-21 12:49:33 +02:00
default :
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't move - not same remote type" )
2015-08-31 22:05:51 +02:00
return nil , fs . ErrorCantMove
}
2018-08-21 12:49:33 +02:00
if ext != "" {
if ! strings . HasSuffix ( remote , ext ) {
fs . Debugf ( src , "Can't move - not same document type" )
return nil , fs . ErrorCantMove
}
remote = remote [ : len ( remote ) - len ( ext ) ]
2016-02-28 10:35:28 +01:00
}
2018-08-21 12:49:33 +02:00
2019-06-17 10:34:30 +02:00
_ , srcParentID , err := srcObj . fs . dirCache . FindPath ( ctx , src . Remote ( ) , false )
2018-01-24 00:46:41 +01:00
if err != nil {
return nil , err
}
2020-04-12 17:55:11 +02:00
srcParentID = actualID ( srcParentID )
2015-08-31 22:05:51 +02:00
2016-06-25 22:58:34 +02:00
// Temporary Object under construction
2019-06-17 10:34:30 +02:00
dstInfo , err := f . createFileInfo ( ctx , remote , src . ModTime ( ctx ) )
2015-08-31 22:05:51 +02:00
if err != nil {
return nil , err
}
2018-01-24 00:46:41 +01:00
dstParents := strings . Join ( dstInfo . Parents , "," )
dstInfo . Parents = nil
2015-08-31 22:05:51 +02:00
// Do the move
2016-03-06 18:36:05 +01:00
var info * drive . File
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
info , err = f . svc . Files . Update ( shortcutID ( srcObj . id ) , dstInfo ) .
2018-08-05 12:27:17 +02:00
RemoveParents ( srcParentID ) .
AddParents ( dstParents ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2016-03-06 18:36:05 +01:00
} )
2015-08-31 22:05:51 +02:00
if err != nil {
return nil , err
}
2021-03-16 16:50:02 +01:00
return f . newObjectWithInfo ( ctx , remote , info )
2015-08-31 22:05:51 +02:00
}
2018-03-29 09:10:19 +02:00
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
2020-05-31 23:18:01 +02:00
func ( f * Fs ) PublicLink ( ctx context . Context , remote string , expire fs . Duration , unlink bool ) ( link string , err error ) {
2019-06-17 10:34:30 +02:00
id , err := f . dirCache . FindDir ( ctx , remote , false )
2018-03-29 09:10:19 +02:00
if err == nil {
fs . Debugf ( f , "attempting to share directory '%s'" , remote )
2020-04-12 17:55:11 +02:00
id = shortcutID ( id )
2018-03-29 09:10:19 +02:00
} else {
fs . Debugf ( f , "attempting to share single file '%s'" , remote )
2019-06-17 10:34:30 +02:00
o , err := f . NewObject ( ctx , remote )
2018-08-21 12:49:33 +02:00
if err != nil {
return "" , err
2018-03-29 09:10:19 +02:00
}
2020-04-12 17:55:11 +02:00
id = shortcutID ( o . ( fs . IDer ) . ID ( ) )
2018-03-29 09:10:19 +02:00
}
permission := & drive . Permission {
AllowFileDiscovery : false ,
Role : "reader" ,
Type : "anyone" ,
}
err = f . pacer . Call ( func ( ) ( bool , error ) {
// TODO: On TeamDrives this might fail if lacking permissions to change ACLs.
// Need to either check `canShare` attribute on the object or see if a sufficient permission is already present.
2018-08-05 12:27:17 +02:00
_ , err = f . svc . Permissions . Create ( id , permission ) .
2018-09-01 13:16:01 +02:00
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2018-03-29 09:10:19 +02:00
} )
if err != nil {
return "" , err
}
return fmt . Sprintf ( "https://drive.google.com/open?id=%s" , id ) , nil
}
2017-02-05 22:20:56 +01:00
// DirMove moves src, srcRemote to this remote at dstRemote
2020-10-13 23:43:40 +02:00
// using server-side move operations.
2015-08-31 22:05:51 +02:00
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantDirMove
//
// If destination exists then return fs.ErrorDirExists
2019-06-17 10:34:30 +02:00
func ( f * Fs ) DirMove ( ctx context . Context , src fs . Fs , srcRemote , dstRemote string ) error {
2015-11-07 12:14:46 +01:00
srcFs , ok := src . ( * Fs )
2015-08-31 22:05:51 +02:00
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( srcFs , "Can't move directory - not same remote type" )
2015-08-31 22:05:51 +02:00
return fs . ErrorCantDirMove
}
2020-05-12 12:39:30 +02:00
srcID , srcDirectoryID , srcLeaf , dstDirectoryID , dstLeaf , err := f . dirCache . DirMove ( ctx , srcFs . dirCache , srcFs . root , srcRemote , f . root , dstRemote )
2017-02-05 22:20:56 +01:00
if err != nil {
return err
}
2020-05-12 12:39:30 +02:00
_ = srcLeaf
2017-02-05 22:20:56 +01:00
2020-05-12 12:39:30 +02:00
dstDirectoryID = actualID ( dstDirectoryID )
2020-04-12 17:55:11 +02:00
srcDirectoryID = actualID ( srcDirectoryID )
2018-04-14 18:15:00 +02:00
2015-08-31 22:05:51 +02:00
// Do the move
patch := drive . File {
2020-05-12 12:39:30 +02:00
Name : dstLeaf ,
2015-08-31 22:05:51 +02:00
}
2016-03-06 18:36:05 +01:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
_ , err = f . svc . Files . Update ( shortcutID ( srcID ) , & patch ) .
2018-08-05 12:27:17 +02:00
RemoveParents ( srcDirectoryID ) .
AddParents ( dstDirectoryID ) .
Fields ( "" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2016-03-06 18:36:05 +01:00
} )
2015-08-31 22:05:51 +02:00
if err != nil {
return err
}
2017-02-05 22:20:56 +01:00
srcFs . dirCache . FlushDir ( srcRemote )
2015-08-31 22:05:51 +02:00
return nil
}
2018-03-08 21:03:34 +01:00
// ChangeNotify calls the passed function with a path that has had changes.
// If the implementation uses polling, it should adhere to the given interval.
2017-05-25 23:05:49 +02:00
//
2019-02-07 18:41:17 +01:00
// Automatically restarts itself in case of unexpected behavior of the remote.
2017-05-25 23:05:49 +02:00
//
// Close the returned channel to stop being notified.
2019-06-17 10:34:30 +02:00
func ( f * Fs ) ChangeNotify ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , pollIntervalChan <- chan time . Duration ) {
2017-05-25 23:05:49 +02:00
go func ( ) {
2018-08-25 21:28:57 +02:00
// get the StartPageToken early so all changes from now on get processed
2021-03-16 16:50:02 +01:00
startPageToken , err := f . changeNotifyStartPageToken ( ctx )
2018-08-25 21:28:57 +02:00
if err != nil {
fs . Infof ( f , "Failed to get StartPageToken: %s" , err )
}
var ticker * time . Ticker
var tickerC <- chan time . Time
for {
select {
case pollInterval , ok := <- pollIntervalChan :
if ! ok {
if ticker != nil {
ticker . Stop ( )
}
return
}
if ticker != nil {
ticker . Stop ( )
ticker , tickerC = nil , nil
}
if pollInterval != 0 {
ticker = time . NewTicker ( pollInterval )
tickerC = ticker . C
}
case <- tickerC :
if startPageToken == "" {
2021-03-16 16:50:02 +01:00
startPageToken , err = f . changeNotifyStartPageToken ( ctx )
2018-08-25 21:28:57 +02:00
if err != nil {
fs . Infof ( f , "Failed to get StartPageToken: %s" , err )
continue
}
}
fs . Debugf ( f , "Checking for changes on remote" )
2019-09-06 14:50:16 +02:00
startPageToken , err = f . changeNotifyRunner ( ctx , notifyFunc , startPageToken )
2018-08-25 21:28:57 +02:00
if err != nil {
fs . Infof ( f , "Change notify listener failure: %s" , err )
}
2017-05-25 23:05:49 +02:00
}
}
} ( )
}
2021-03-16 16:50:02 +01:00
func ( f * Fs ) changeNotifyStartPageToken ( ctx context . Context ) ( pageToken string , err error ) {
2017-05-25 23:05:49 +02:00
var startPageToken * drive . StartPageToken
err = f . pacer . Call ( func ( ) ( bool , error ) {
2019-10-20 17:05:53 +02:00
changes := f . svc . Changes . GetStartPageToken ( ) . SupportsAllDrives ( true )
if f . isTeamDrive {
changes . DriveId ( f . opt . TeamDriveID )
}
2021-03-16 16:53:35 +01:00
startPageToken , err = changes . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2017-05-25 23:05:49 +02:00
} )
if err != nil {
return
}
2018-08-25 21:28:57 +02:00
return startPageToken . StartPageToken , nil
}
2017-05-25 23:05:49 +02:00
2019-09-06 14:50:16 +02:00
func ( f * Fs ) changeNotifyRunner ( ctx context . Context , notifyFunc func ( string , fs . EntryType ) , startPageToken string ) ( newStartPageToken string , err error ) {
2018-08-25 21:28:57 +02:00
pageToken := startPageToken
2017-05-25 23:05:49 +02:00
for {
2018-03-08 21:03:34 +01:00
var changeList * drive . ChangeList
2017-05-25 23:05:49 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2018-08-05 12:27:17 +02:00
changesCall := f . svc . Changes . List ( pageToken ) .
Fields ( "nextPageToken,newStartPageToken,changes(fileId,file(name,parents,mimeType))" )
2018-05-14 19:06:57 +02:00
if f . opt . ListChunk > 0 {
changesCall . PageSize ( f . opt . ListChunk )
2017-05-25 23:05:49 +02:00
}
2019-07-30 19:49:06 +02:00
changesCall . SupportsAllDrives ( true )
changesCall . IncludeItemsFromAllDrives ( true )
2018-06-07 12:35:55 +02:00
if f . isTeamDrive {
2019-10-20 17:05:53 +02:00
changesCall . DriveId ( f . opt . TeamDriveID )
2018-06-07 12:35:55 +02:00
}
2019-10-24 13:51:01 +02:00
// If using appDataFolder then need to add Spaces
if f . rootFolderID == "appDataFolder" {
changesCall . Spaces ( "appDataFolder" )
}
2021-03-17 17:45:21 +01:00
changeList , err = changesCall . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2017-05-25 23:05:49 +02:00
} )
if err != nil {
return
}
2018-03-08 21:03:34 +01:00
type entryType struct {
path string
entryType fs . EntryType
}
var pathsToClear [ ] entryType
2018-01-24 00:46:41 +01:00
for _ , change := range changeList . Changes {
2018-09-25 17:55:33 +02:00
// find the previous path
2017-05-25 23:05:49 +02:00
if path , ok := f . dirCache . GetInv ( change . FileId ) ; ok {
2018-03-08 21:03:34 +01:00
if change . File != nil && change . File . MimeType != driveFolderType {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryObject } )
} else {
pathsToClear = append ( pathsToClear , entryType { path : path , entryType : fs . EntryDirectory } )
}
2017-05-25 23:05:49 +02:00
}
2018-09-25 17:55:33 +02:00
// find the new path
2018-05-02 15:12:47 +02:00
if change . File != nil {
2020-01-14 18:33:35 +01:00
change . File . Name = f . opt . Enc . ToStandardName ( change . File . Name )
2018-05-02 15:12:47 +02:00
changeType := fs . EntryDirectory
if change . File . MimeType != driveFolderType {
changeType = fs . EntryObject
}
2018-03-08 21:03:34 +01:00
// translate the parent dir of this object
if len ( change . File . Parents ) > 0 {
2019-02-07 16:18:52 +01:00
for _ , parent := range change . File . Parents {
if parentPath , ok := f . dirCache . GetInv ( parent ) ; ok {
// and append the drive file name to compute the full file name
newPath := path . Join ( parentPath , change . File . Name )
// this will now clear the actual file too
pathsToClear = append ( pathsToClear , entryType { path : newPath , entryType : changeType } )
}
2017-05-25 23:05:49 +02:00
}
2018-03-08 21:03:34 +01:00
} else { // a true root object that is changed
2018-05-02 15:12:47 +02:00
pathsToClear = append ( pathsToClear , entryType { path : change . File . Name , entryType : changeType } )
2017-05-25 23:05:49 +02:00
}
}
}
2018-03-08 21:03:34 +01:00
2018-09-25 17:50:33 +02:00
visitedPaths := make ( map [ string ] struct { } )
2018-03-08 21:03:34 +01:00
for _ , entry := range pathsToClear {
if _ , ok := visitedPaths [ entry . path ] ; ok {
2017-05-25 23:05:49 +02:00
continue
}
2018-09-25 17:50:33 +02:00
visitedPaths [ entry . path ] = struct { } { }
2018-03-08 21:03:34 +01:00
notifyFunc ( entry . path , entry . entryType )
2017-05-25 23:05:49 +02:00
}
2018-08-25 21:28:57 +02:00
switch {
case changeList . NewStartPageToken != "" :
return changeList . NewStartPageToken , nil
case changeList . NextPageToken != "" :
2017-05-25 23:05:49 +02:00
pageToken = changeList . NextPageToken
2018-08-25 21:28:57 +02:00
default :
2017-05-25 23:05:49 +02:00
return
}
}
}
2016-12-09 16:39:29 +01:00
// DirCacheFlush resets the directory cache - used in testing as an
// optional interface
func ( f * Fs ) DirCacheFlush ( ) {
f . dirCache . ResetRoot ( )
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . MD5 )
2016-01-11 13:39:33 +01:00
}
2020-04-29 19:54:16 +02:00
func ( f * Fs ) changeChunkSize ( chunkSizeString string ) ( err error ) {
chunkSizeInt , err := strconv . ParseInt ( chunkSizeString , 10 , 64 )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "couldn't convert chunk size to int: %w" , err )
2020-04-29 19:54:16 +02:00
}
chunkSize := fs . SizeSuffix ( chunkSizeInt )
if chunkSize == f . opt . ChunkSize {
return nil
}
err = checkUploadChunkSize ( chunkSize )
if err == nil {
f . opt . ChunkSize = chunkSize
}
return err
}
2020-11-05 19:02:26 +01:00
func ( f * Fs ) changeServiceAccountFile ( ctx context . Context , file string ) ( err error ) {
2020-04-29 19:54:16 +02:00
fs . Debugf ( nil , "Changing Service Account File from %s to %s" , f . opt . ServiceAccountFile , file )
if file == f . opt . ServiceAccountFile {
return nil
}
oldSvc := f . svc
oldv2Svc := f . v2Svc
oldOAuthClient := f . client
oldFile := f . opt . ServiceAccountFile
oldCredentials := f . opt . ServiceAccountCredentials
defer func ( ) {
// Undo all the changes instead of doing selective undo's
if err != nil {
f . svc = oldSvc
f . v2Svc = oldv2Svc
f . client = oldOAuthClient
f . opt . ServiceAccountFile = oldFile
f . opt . ServiceAccountCredentials = oldCredentials
}
} ( )
f . opt . ServiceAccountFile = file
f . opt . ServiceAccountCredentials = ""
2020-11-05 19:02:26 +01:00
oAuthClient , err := createOAuthClient ( ctx , & f . opt , f . name , f . m )
2020-04-29 19:54:16 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "drive: failed when making oauth client: %w" , err )
2020-04-29 19:54:16 +02:00
}
f . client = oAuthClient
2022-06-24 21:45:38 +02:00
f . svc , err = drive . NewService ( context . Background ( ) , option . WithHTTPClient ( f . client ) )
2020-04-29 19:54:16 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "couldn't create Drive client: %w" , err )
2020-04-29 19:54:16 +02:00
}
if f . opt . V2DownloadMinSize >= 0 {
2022-06-24 21:45:38 +02:00
f . v2Svc , err = drive_v2 . NewService ( context . Background ( ) , option . WithHTTPClient ( f . client ) )
2020-04-29 19:54:16 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "couldn't create Drive v2 client: %w" , err )
2020-04-29 19:54:16 +02:00
}
}
return nil
}
2020-05-07 19:35:39 +02:00
// Create a shortcut from (f, srcPath) to (dstFs, dstPath)
//
// Will not overwrite existing files
2020-05-11 12:37:48 +02:00
func ( f * Fs ) makeShortcut ( ctx context . Context , srcPath string , dstFs * Fs , dstPath string ) ( o fs . Object , err error ) {
2020-05-07 19:35:39 +02:00
srcFs := f
2020-05-11 12:37:48 +02:00
srcPath = strings . Trim ( srcPath , "/" )
dstPath = strings . Trim ( dstPath , "/" )
if dstPath == "" {
return nil , errors . New ( "shortcut destination can't be root directory" )
}
2020-05-07 19:35:39 +02:00
// Find source
var srcID string
isDir := false
2020-05-11 12:37:48 +02:00
if srcPath == "" {
// source is root directory
2020-05-11 18:24:37 +02:00
srcID , err = f . dirCache . RootID ( ctx , false )
2020-05-11 12:37:48 +02:00
if err != nil {
return nil , err
}
isDir = true
} else if srcObj , err := srcFs . NewObject ( ctx , srcPath ) ; err != nil {
2021-09-06 14:54:08 +02:00
if err != fs . ErrorIsDir {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "can't find source: %w" , err )
2020-05-07 19:35:39 +02:00
}
// source was a directory
srcID , err = srcFs . dirCache . FindDir ( ctx , srcPath , false )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed to find source dir: %w" , err )
2020-05-07 19:35:39 +02:00
}
isDir = true
} else {
// source was a file
srcID = srcObj . ( * Object ) . id
}
srcID = actualID ( srcID ) // link to underlying object not to shortcut
2020-04-29 19:54:16 +02:00
2020-05-07 19:35:39 +02:00
// Find destination
_ , err = dstFs . NewObject ( ctx , dstPath )
if err != fs . ErrorObjectNotFound {
if err == nil {
err = errors . New ( "existing file" )
2021-09-06 14:54:08 +02:00
} else if err == fs . ErrorIsDir {
2020-05-07 19:35:39 +02:00
err = errors . New ( "existing directory" )
}
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "not overwriting shortcut target: %w" , err )
2020-05-07 19:35:39 +02:00
}
// Create destination shortcut
createInfo , err := dstFs . createFileInfo ( ctx , dstPath , time . Now ( ) )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "shortcut destination failed: %w" , err )
2020-05-07 19:35:39 +02:00
}
createInfo . MimeType = shortcutMimeType
createInfo . ShortcutDetails = & drive . FileShortcutDetails {
TargetId : srcID ,
}
var info * drive . File
2020-12-02 16:01:48 +01:00
err = dstFs . pacer . Call ( func ( ) ( bool , error ) {
2020-05-07 19:35:39 +02:00
info , err = dstFs . svc . Files . Create ( createInfo ) .
Fields ( partialFields ) .
SupportsAllDrives ( true ) .
KeepRevisionForever ( dstFs . opt . KeepRevisionForever ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return dstFs . shouldRetry ( ctx , err )
2020-05-07 19:35:39 +02:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "shortcut creation failed: %w" , err )
2020-05-07 19:35:39 +02:00
}
if isDir {
return nil , nil
}
2021-03-16 16:50:02 +01:00
return dstFs . newObjectWithInfo ( ctx , dstPath , info )
2020-05-07 19:35:39 +02:00
}
2020-07-03 12:17:41 +02:00
// List all team drives
2021-04-03 15:21:20 +02:00
func ( f * Fs ) listTeamDrives ( ctx context . Context ) ( drives [ ] * drive . Drive , err error ) {
drives = [ ] * drive . Drive { }
listTeamDrives := f . svc . Drives . List ( ) . PageSize ( 100 )
2020-07-03 12:17:41 +02:00
var defaultFs Fs // default Fs with default Options
for {
2021-04-03 15:21:20 +02:00
var teamDrives * drive . DriveList
2020-07-03 12:17:41 +02:00
err = f . pacer . Call ( func ( ) ( bool , error ) {
2021-03-17 17:45:21 +01:00
teamDrives , err = listTeamDrives . Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return defaultFs . shouldRetry ( ctx , err )
2020-07-03 12:17:41 +02:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
return drives , fmt . Errorf ( "listing Team Drives failed: %w" , err )
2020-07-03 12:17:41 +02:00
}
2021-04-03 15:21:20 +02:00
drives = append ( drives , teamDrives . Drives ... )
2020-07-03 12:17:41 +02:00
if teamDrives . NextPageToken == "" {
break
}
listTeamDrives . PageToken ( teamDrives . NextPageToken )
}
return drives , nil
}
2020-08-06 16:24:28 +02:00
type unTrashResult struct {
Untrashed int
Errors int
}
func ( r unTrashResult ) Error ( ) string {
return fmt . Sprintf ( "%d errors while untrashing - see log" , r . Errors )
}
// Restore the trashed files from dir, directoryID recursing if needed
func ( f * Fs ) unTrash ( ctx context . Context , dir string , directoryID string , recurse bool ) ( r unTrashResult , err error ) {
directoryID = actualID ( directoryID )
fs . Debugf ( dir , "finding trash to restore in directory %q" , directoryID )
2020-09-14 18:31:23 +02:00
_ , err = f . list ( ctx , [ ] string { directoryID } , "" , false , false , f . opt . TrashedOnly , true , func ( item * drive . File ) bool {
2020-08-06 16:24:28 +02:00
remote := path . Join ( dir , item . Name )
if item . ExplicitlyTrashed {
fs . Infof ( remote , "restoring %q" , item . Id )
if operations . SkipDestructive ( ctx , remote , "restore" ) {
return false
}
update := drive . File {
ForceSendFields : [ ] string { "Trashed" } , // necessary to set false value
Trashed : false ,
}
err := f . pacer . Call ( func ( ) ( bool , error ) {
_ , err := f . svc . Files . Update ( item . Id , & update ) .
SupportsAllDrives ( true ) .
Fields ( "trashed" ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return f . shouldRetry ( ctx , err )
2020-08-06 16:24:28 +02:00
} )
if err != nil {
2021-11-04 11:12:57 +01:00
err = fmt . Errorf ( "failed to restore: %w" , err )
2020-08-06 16:24:28 +02:00
r . Errors ++
fs . Errorf ( remote , "%v" , err )
} else {
r . Untrashed ++
}
}
if recurse && item . MimeType == "application/vnd.google-apps.folder" {
if ! isShortcutID ( item . Id ) {
rNew , _ := f . unTrash ( ctx , remote , item . Id , recurse )
r . Untrashed += rNew . Untrashed
r . Errors += rNew . Errors
}
}
return false
} )
if err != nil {
2021-11-04 11:12:57 +01:00
err = fmt . Errorf ( "failed to list directory: %w" , err )
2020-08-06 16:24:28 +02:00
r . Errors ++
fs . Errorf ( dir , "%v" , err )
}
if r . Errors != 0 {
return r , r
}
return r , nil
}
// Untrash dir
func ( f * Fs ) unTrashDir ( ctx context . Context , dir string , recurse bool ) ( r unTrashResult , err error ) {
directoryID , err := f . dirCache . FindDir ( ctx , dir , false )
if err != nil {
r . Errors ++
return r , err
}
return f . unTrash ( ctx , dir , directoryID , true )
}
2020-09-15 14:32:08 +02:00
// copy file with id to dest
func ( f * Fs ) copyID ( ctx context . Context , id , dest string ) ( err error ) {
2021-03-16 16:50:02 +01:00
info , err := f . getFile ( ctx , id , f . fileFields )
2020-09-15 14:32:08 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "couldn't find id: %w" , err )
2020-09-15 14:32:08 +02:00
}
if info . MimeType == driveFolderType {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "can't copy directory use: rclone copy --drive-root-folder-id %s %s %s" , id , fs . ConfigString ( f ) , dest )
2020-09-15 14:32:08 +02:00
}
info . Name = f . opt . Enc . ToStandardName ( info . Name )
2021-03-16 16:50:02 +01:00
o , err := f . newObjectWithInfo ( ctx , info . Name , info )
2020-09-15 14:32:08 +02:00
if err != nil {
return err
}
destDir , destLeaf , err := fspath . Split ( dest )
if err != nil {
return err
}
if destLeaf == "" {
2021-04-03 14:47:41 +02:00
destLeaf = path . Base ( o . Remote ( ) )
2020-09-15 14:32:08 +02:00
}
2021-02-01 16:17:25 +01:00
if destDir == "" {
destDir = "."
}
2020-11-05 16:18:51 +01:00
dstFs , err := cache . Get ( ctx , destDir )
2020-09-15 14:32:08 +02:00
if err != nil {
return err
}
_ , err = operations . Copy ( ctx , dstFs , nil , destLeaf , o )
if err != nil {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "copy failed: %w" , err )
2020-09-15 14:32:08 +02:00
}
return nil
}
2020-05-07 19:35:39 +02:00
var commandHelp = [ ] fs . CommandHelp { {
Name : "get" ,
Short : "Get command for fetching the drive config parameters" ,
Long : ` This is a get command which will be used to fetch the various drive config parameters
Usage Examples :
rclone backend get drive : [ - o service_account_file ] [ - o chunk_size ]
rclone rc backend / command command = get fs = drive : [ - o service_account_file ] [ - o chunk_size ]
` ,
Opts : map [ string ] string {
"chunk_size" : "show the current upload chunk size" ,
"service_account_file" : "show the current service account file" ,
2020-04-29 19:54:16 +02:00
} ,
2020-05-07 19:35:39 +02:00
} , {
Name : "set" ,
Short : "Set command for updating the drive config parameters" ,
Long : ` This is a set command which will be used to update the various drive config parameters
Usage Examples :
rclone backend set drive : [ - o service_account_file = sa . json ] [ - o chunk_size = 67108864 ]
rclone rc backend / command command = set fs = drive : [ - o service_account_file = sa . json ] [ - o chunk_size = 67108864 ]
` ,
Opts : map [ string ] string {
"chunk_size" : "update the current upload chunk size" ,
"service_account_file" : "update the current service account file" ,
2020-04-29 19:54:16 +02:00
} ,
2020-05-07 19:35:39 +02:00
} , {
Name : "shortcut" ,
Short : "Create shortcuts from files or directories" ,
Long : ` This command creates shortcuts from files or directories .
Usage :
rclone backend shortcut drive : source_item destination_shortcut
rclone backend shortcut drive : source_item - o target = drive2 : destination_shortcut
In the first example this creates a shortcut from the "source_item"
which can be a file or a directory to the "destination_shortcut" . The
"source_item" and the "destination_shortcut" should be relative paths
from "drive:"
In the second example this creates a shortcut from the "source_item"
relative to "drive:" to the "destination_shortcut" relative to
"drive2:" . This may fail with a permission error if the user
authenticated with "drive2:" can ' t read files from "drive:" .
` ,
Opts : map [ string ] string {
"target" : "optional target remote for the shortcut destination" ,
} ,
2020-07-03 12:17:41 +02:00
} , {
Name : "drives" ,
2021-02-07 12:38:17 +01:00
Short : "List the Shared Drives available to this account" ,
Long : ` This command lists the Shared Drives ( Team Drives ) available to this
2020-07-03 12:17:41 +02:00
account .
Usage :
2021-10-05 13:20:16 +02:00
rclone backend [ - o config ] drives drive :
2020-07-03 12:17:41 +02:00
This will return a JSON list of objects like this
[
{
"id" : "0ABCDEF-01234567890" ,
"kind" : "drive#teamDrive" ,
"name" : "My Drive"
} ,
{
"id" : "0ABCDEFabcdefghijkl" ,
"kind" : "drive#teamDrive" ,
"name" : "Test Drive"
}
]
2021-10-05 13:20:16 +02:00
With the - o config parameter it will output the list in a format
suitable for adding to a config file to make aliases for all the
2022-04-21 10:58:59 +02:00
drives found and a combined drive .
2021-10-05 13:20:16 +02:00
[ My Drive ]
type = alias
remote = drive , team_drive = 0 ABCDEF - 01234567 890 , root_folder_id = :
[ Test Drive ]
type = alias
remote = drive , team_drive = 0 ABCDEFabcdefghijkl , root_folder_id = :
2022-04-21 10:58:59 +02:00
[ AllDrives ]
type = combine
2022-07-10 16:34:48 +02:00
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
2021-10-05 13:20:16 +02:00
2022-04-21 10:58:59 +02:00
Adding this to the rclone config file will cause those team drives to
2022-08-14 04:56:32 +02:00
be accessible with the aliases shown . Any illegal characters will be
2022-04-21 10:58:59 +02:00
substituted with "_" and duplicate names will have numbers suffixed .
It will also add a remote called AllDrives which shows all the shared
drives combined into one directory tree .
2020-07-03 12:17:41 +02:00
` ,
2020-08-06 16:24:28 +02:00
} , {
Name : "untrash" ,
Short : "Untrash files and directories" ,
Long : ` This command untrashes all the files and directories in the directory
passed in recursively .
Usage :
This takes an optional directory to trash which make this easier to
use via the API .
rclone backend untrash drive : directory
2023-01-20 21:47:36 +01:00
rclone backend -- interactive untrash drive : directory subdir
2020-08-06 16:24:28 +02:00
2023-01-20 21:47:36 +01:00
Use the -- interactive / - i or -- dry - run flag to see what would be restored before restoring it .
2020-08-06 16:24:28 +02:00
Result :
{
"Untrashed" : 17 ,
"Errors" : 0
}
` ,
2020-09-15 14:32:08 +02:00
} , {
Name : "copyid" ,
Short : "Copy files by ID" ,
Long : ` This command copies files by ID
Usage :
rclone backend copyid drive : ID path
rclone backend copyid drive : ID1 path1 ID2 path2
It copies the drive file with ID given to the path ( an rclone path which
will be passed internally to rclone copyto ) . The ID and path pairs can be
repeated .
The path should end with a / to indicate copy the file as named to
this directory . If it doesn ' t end with a / then the last path
component will be used as the file name .
2020-10-13 23:43:40 +02:00
If the destination is a drive backend then server - side copying will be
2020-09-15 14:32:08 +02:00
attempted if possible .
2023-01-20 21:47:36 +01:00
Use the -- interactive / - i or -- dry - run flag to see what would be copied before copying .
2020-09-15 14:32:08 +02:00
` ,
2022-06-20 12:38:10 +02:00
} , {
Name : "exportformats" ,
Short : "Dump the export formats for debug purposes" ,
} , {
Name : "importformats" ,
Short : "Dump the import formats for debug purposes" ,
2020-05-07 19:35:39 +02:00
} }
2020-04-29 19:54:16 +02:00
// Command the backend to run a named command
//
// The command run is name
// args may be used to read arguments from
// opts may be used to read optional arguments from
//
// The result should be capable of being JSON encoded
// If it is a string or a []string it will be shown to the user
// otherwise it will be JSON encoded and shown to the user like that
func ( f * Fs ) Command ( ctx context . Context , name string , arg [ ] string , opt map [ string ] string ) ( out interface { } , err error ) {
switch name {
case "get" :
out := make ( map [ string ] string )
if _ , ok := opt [ "service_account_file" ] ; ok {
out [ "service_account_file" ] = f . opt . ServiceAccountFile
}
if _ , ok := opt [ "chunk_size" ] ; ok {
2022-06-08 22:25:17 +02:00
out [ "chunk_size" ] = f . opt . ChunkSize . String ( )
2020-04-29 19:54:16 +02:00
}
return out , nil
case "set" :
out := make ( map [ string ] map [ string ] string )
if serviceAccountFile , ok := opt [ "service_account_file" ] ; ok {
serviceAccountMap := make ( map [ string ] string )
serviceAccountMap [ "previous" ] = f . opt . ServiceAccountFile
2020-11-05 19:02:26 +01:00
if err = f . changeServiceAccountFile ( ctx , serviceAccountFile ) ; err != nil {
2020-04-29 19:54:16 +02:00
return out , err
}
f . m . Set ( "service_account_file" , serviceAccountFile )
serviceAccountMap [ "current" ] = f . opt . ServiceAccountFile
out [ "service_account_file" ] = serviceAccountMap
}
if chunkSize , ok := opt [ "chunk_size" ] ; ok {
chunkSizeMap := make ( map [ string ] string )
2022-06-08 22:25:17 +02:00
chunkSizeMap [ "previous" ] = f . opt . ChunkSize . String ( )
2020-04-29 19:54:16 +02:00
if err = f . changeChunkSize ( chunkSize ) ; err != nil {
return out , err
}
2022-06-08 22:25:17 +02:00
chunkSizeString := f . opt . ChunkSize . String ( )
2020-04-29 19:54:16 +02:00
f . m . Set ( "chunk_size" , chunkSizeString )
chunkSizeMap [ "current" ] = chunkSizeString
out [ "chunk_size" ] = chunkSizeMap
}
return out , nil
2020-05-07 19:35:39 +02:00
case "shortcut" :
if len ( arg ) != 2 {
return nil , errors . New ( "need exactly 2 arguments" )
}
dstFs := f
target , ok := opt [ "target" ]
if ok {
2020-11-05 16:18:51 +01:00
targetFs , err := cache . Get ( ctx , target )
2020-05-07 19:35:39 +02:00
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "couldn't find target: %w" , err )
2020-05-07 19:35:39 +02:00
}
dstFs , ok = targetFs . ( * Fs )
if ! ok {
return nil , errors . New ( "target is not a drive backend" )
}
}
2020-05-11 12:37:48 +02:00
return f . makeShortcut ( ctx , arg [ 0 ] , dstFs , arg [ 1 ] )
2020-07-03 12:17:41 +02:00
case "drives" :
2021-10-05 13:20:16 +02:00
drives , err := f . listTeamDrives ( ctx )
if err != nil {
return nil , err
}
if _ , ok := opt [ "config" ] ; ok {
lines := [ ] string { }
2022-04-21 10:58:59 +02:00
upstreams := [ ] string { }
names := make ( map [ string ] struct { } , len ( drives ) )
for i , drive := range drives {
2022-12-12 20:05:12 +01:00
name := fspath . MakeConfigName ( drive . Name )
2022-04-21 10:58:59 +02:00
for {
if _ , found := names [ name ] ; ! found {
break
}
name += fmt . Sprintf ( "-%d" , i )
}
names [ name ] = struct { } { }
2021-10-05 13:20:16 +02:00
lines = append ( lines , "" )
2022-04-21 10:58:59 +02:00
lines = append ( lines , fmt . Sprintf ( "[%s]" , name ) )
2022-06-08 22:25:17 +02:00
lines = append ( lines , "type = alias" )
2021-10-05 13:20:16 +02:00
lines = append ( lines , fmt . Sprintf ( "remote = %s,team_drive=%s,root_folder_id=:" , f . name , drive . Id ) )
2022-04-21 10:58:59 +02:00
upstreams = append ( upstreams , fmt . Sprintf ( ` "%s=%s:" ` , name , name ) )
2021-10-05 13:20:16 +02:00
}
2022-04-21 10:58:59 +02:00
lines = append ( lines , "" )
2022-06-08 22:25:17 +02:00
lines = append ( lines , "[AllDrives]" )
lines = append ( lines , "type = combine" )
2022-04-21 10:58:59 +02:00
lines = append ( lines , fmt . Sprintf ( "upstreams = %s" , strings . Join ( upstreams , " " ) ) )
2021-10-05 13:20:16 +02:00
return lines , nil
}
return drives , nil
2020-08-06 16:24:28 +02:00
case "untrash" :
dir := ""
if len ( arg ) > 0 {
dir = arg [ 0 ]
}
return f . unTrashDir ( ctx , dir , true )
2020-09-15 14:32:08 +02:00
case "copyid" :
if len ( arg ) % 2 != 0 {
return nil , errors . New ( "need an even number of arguments" )
}
for len ( arg ) > 0 {
id , dest := arg [ 0 ] , arg [ 1 ]
arg = arg [ 2 : ]
err = f . copyID ( ctx , id , dest )
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "failed copying %q to %q: %w" , id , dest , err )
2020-09-15 14:32:08 +02:00
}
}
return nil , nil
2022-06-20 12:38:10 +02:00
case "exportformats" :
return f . exportFormats ( ctx ) , nil
case "importformats" :
return f . importFormats ( ctx ) , nil
2020-04-29 19:54:16 +02:00
default :
return nil , fs . ErrorCommandNotFound
}
}
2013-01-15 00:38:18 +01:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2014-03-28 18:56:04 +01:00
}
2018-08-21 12:49:33 +02:00
// Return a string version
func ( o * baseObject ) String ( ) string {
return o . remote
}
2014-03-28 18:56:04 +01:00
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-03-28 18:56:04 +01:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Remote ( ) string {
2013-06-27 21:13:07 +02:00
return o . remote
2013-01-15 00:38:18 +01:00
}
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2019-06-17 10:34:30 +02:00
func ( o * Object ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2013-06-27 21:13:07 +02:00
return o . md5sum , nil
2013-01-15 00:38:18 +01:00
}
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) Hash ( ctx context . Context , t hash . Type ) ( string , error ) {
2018-08-21 12:49:33 +02:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
}
return "" , nil
}
2013-01-15 00:38:18 +01:00
// Size returns the size of an object in bytes
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Size ( ) int64 {
2013-06-27 21:13:07 +02:00
return o . bytes
2013-01-19 11:11:55 +01:00
}
2018-08-21 12:49:33 +02:00
// getRemoteInfoWithExport returns a drive.File and the export settings for the remote
2019-06-17 10:34:30 +02:00
func ( f * Fs ) getRemoteInfoWithExport ( ctx context . Context , remote string ) (
2018-08-21 12:49:33 +02:00
info * drive . File , extension , exportName , exportMimeType string , isDocument bool , err error ) {
2020-05-11 18:24:37 +02:00
leaf , directoryID , err := f . dirCache . FindPath ( ctx , remote , false )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-06-25 22:23:20 +02:00
if err == fs . ErrorDirNotFound {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , fs . ErrorObjectNotFound
2016-06-25 22:23:20 +02:00
}
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , err
2013-01-15 00:38:18 +01:00
}
2020-04-12 17:55:11 +02:00
directoryID = actualID ( directoryID )
2013-01-15 00:38:18 +01:00
2020-09-14 18:31:23 +02:00
found , err := f . list ( ctx , [ ] string { directoryID } , leaf , false , false , f . opt . TrashedOnly , false , func ( item * drive . File ) bool {
2018-08-21 12:49:33 +02:00
if ! f . opt . SkipGdocs {
2021-03-16 16:50:02 +01:00
extension , exportName , exportMimeType , isDocument = f . findExportFormat ( ctx , item )
2018-07-24 17:14:23 +02:00
if exportName == leaf {
2018-08-21 12:49:33 +02:00
info = item
2018-07-24 17:14:23 +02:00
return true
}
2018-08-21 12:49:33 +02:00
if isDocument {
return false
}
}
if item . Name == leaf {
info = item
return true
2018-07-24 17:14:23 +02:00
}
2013-01-20 12:56:56 +01:00
return false
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , err
2013-01-15 00:38:18 +01:00
}
2013-01-20 12:56:56 +01:00
if ! found {
2018-08-21 12:49:33 +02:00
return nil , "" , "" , "" , false , fs . ErrorObjectNotFound
2013-01-15 00:38:18 +01:00
}
2018-08-21 12:49:33 +02:00
return
2013-01-15 00:38:18 +01:00
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) ModTime ( ctx context . Context ) time . Time {
2014-07-29 18:50:07 +02:00
modTime , err := time . Parse ( timeFormatIn , o . modifiedDate )
2013-01-15 00:38:18 +01:00
if err != nil {
2017-02-09 18:08:51 +01:00
fs . Debugf ( o , "Failed to read mtime from object: %v" , err )
2013-01-15 00:38:18 +01:00
return time . Now ( )
}
return modTime
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the drive fs object
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) SetModTime ( ctx context . Context , modTime time . Time ) error {
2013-01-19 11:11:55 +01:00
// New metadata
2015-02-02 18:29:08 +01:00
updateInfo := & drive . File {
2018-01-24 00:46:41 +01:00
ModifiedTime : modTime . Format ( timeFormatOut ) ,
2013-01-19 11:11:55 +01:00
}
2013-01-15 00:38:18 +01:00
// Set modified date
2015-02-02 18:29:08 +01:00
var info * drive . File
2018-08-21 12:49:33 +02:00
err := o . fs . pacer . Call ( func ( ) ( bool , error ) {
var err error
2020-04-12 17:55:11 +02:00
info , err = o . fs . svc . Files . Update ( actualID ( o . id ) , updateInfo ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return o . fs . shouldRetry ( ctx , err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2013-01-15 00:38:18 +01:00
}
2015-01-05 00:19:59 +01:00
// Update info from read data
2018-08-21 12:49:33 +02:00
o . modifiedDate = info . ModifiedTime
2016-03-22 16:07:10 +01:00
return nil
2013-01-15 00:38:18 +01:00
}
2015-09-22 19:47:16 +02:00
// Storable returns a boolean as to whether this object is storable
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) Storable ( ) bool {
2013-01-15 00:38:18 +01:00
return true
}
2022-05-16 18:20:48 +02:00
// addResourceKey adds a X-Goog-Drive-Resource-Keys header for this
// object if required.
func ( o * baseObject ) addResourceKey ( header http . Header ) {
if o . resourceKey != nil {
header . Add ( "X-Goog-Drive-Resource-Keys" , fmt . Sprintf ( "%s/%s" , o . id , * o . resourceKey ) )
}
}
2018-08-21 12:49:33 +02:00
// httpResponse gets an http.Response object for the object
// using the url and method passed in
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) httpResponse ( ctx context . Context , url , method string , options [ ] fs . OpenOption ) ( req * http . Request , res * http . Response , err error ) {
2018-08-21 12:49:33 +02:00
if url == "" {
2016-09-10 12:29:57 +02:00
return nil , nil , errors . New ( "forbidden to download - check sharing permission" )
2015-08-16 15:11:21 +02:00
}
2021-02-03 18:41:27 +01:00
req , err = http . NewRequestWithContext ( ctx , method , url , nil )
2014-07-15 12:15:48 +02:00
if err != nil {
2016-09-10 12:29:57 +02:00
return req , nil , err
2014-07-15 12:15:48 +02:00
}
2016-09-10 12:29:57 +02:00
fs . OpenOptionAddHTTPHeaders ( req . Header , options )
2019-03-10 16:47:34 +01:00
if o . bytes == 0 {
// Don't supply range requests for 0 length objects as they always fail
delete ( req . Header , "Range" )
}
2022-05-16 18:20:48 +02:00
o . addResourceKey ( req . Header )
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
res , err = o . fs . client . Do ( req )
2018-04-11 21:46:50 +02:00
if err == nil {
err = googleapi . CheckResponse ( res )
if err != nil {
_ = res . Body . Close ( ) // ignore error
}
}
2021-03-16 16:50:02 +01:00
return o . fs . shouldRetry ( ctx , err )
2015-02-02 18:29:08 +01:00
} )
2013-01-15 00:38:18 +01:00
if err != nil {
2016-09-10 12:29:57 +02:00
return req , nil , err
2013-01-15 00:38:18 +01:00
}
2016-09-10 12:29:57 +02:00
return req , res , nil
2016-01-26 17:52:53 +01:00
}
2020-05-20 12:39:20 +02:00
// openDocumentFile represents a documentObject open for reading.
2018-08-21 12:49:33 +02:00
// Updates the object size after read successfully.
type openDocumentFile struct {
o * documentObject // Object we are reading for
in io . ReadCloser // reading from here
bytes int64 // number of bytes read on this connection
eof bool // whether we have read end of file
errored bool // whether we have encountered an error during reading
2016-01-26 17:52:53 +01:00
}
// Read bytes from the object - see io.Reader
2018-08-21 12:49:33 +02:00
func ( file * openDocumentFile ) Read ( p [ ] byte ) ( n int , err error ) {
2016-01-26 17:52:53 +01:00
n , err = file . in . Read ( p )
file . bytes += int64 ( n )
2018-01-24 00:46:41 +01:00
if err != nil && err != io . EOF {
file . errored = true
}
2016-01-26 17:52:53 +01:00
if err == io . EOF {
file . eof = true
}
return
}
// Close the object and update bytes read
2018-08-21 12:49:33 +02:00
func ( file * openDocumentFile ) Close ( ) ( err error ) {
2016-01-26 17:52:53 +01:00
// If end of file, update bytes read
2018-01-24 00:46:41 +01:00
if file . eof && ! file . errored {
fs . Debugf ( file . o , "Updating size of doc after download to %v" , file . bytes )
2016-01-26 17:52:53 +01:00
file . o . bytes = file . bytes
}
return file . in . Close ( )
}
// Check it satisfies the interfaces
2018-08-21 12:49:33 +02:00
var _ io . ReadCloser = ( * openDocumentFile ) ( nil )
2016-01-26 17:52:53 +01:00
2018-06-08 17:05:25 +02:00
// Checks to see if err is a googleapi.Error with of type what
func isGoogleError ( err error , what string ) bool {
if gerr , ok := err . ( * googleapi . Error ) ; ok {
for _ , error := range gerr . Errors {
if error . Reason == what {
return true
}
}
}
return false
}
2018-08-21 12:49:33 +02:00
// open a url for reading
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) open ( ctx context . Context , url string , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
_ , res , err := o . httpResponse ( ctx , url , "GET" , options )
2016-01-26 17:52:53 +01:00
if err != nil {
2018-06-08 17:05:25 +02:00
if isGoogleError ( err , "cannotDownloadAbusiveFile" ) {
2018-05-14 19:06:57 +02:00
if o . fs . opt . AcknowledgeAbuse {
2018-06-08 17:05:25 +02:00
// Retry acknowledging abuse
2018-08-21 12:49:33 +02:00
if strings . ContainsRune ( url , '?' ) {
url += "&"
2018-06-08 17:05:25 +02:00
} else {
2018-08-21 12:49:33 +02:00
url += "?"
2018-06-08 17:05:25 +02:00
}
2018-08-21 12:49:33 +02:00
url += "acknowledgeAbuse=true"
2019-09-04 21:21:10 +02:00
_ , res , err = o . httpResponse ( ctx , url , "GET" , options )
2018-06-08 17:05:25 +02:00
} else {
2022-06-08 22:54:39 +02:00
err = fmt . Errorf ( "use the --drive-acknowledge-abuse flag to download this file: %w" , err )
2018-06-08 17:05:25 +02:00
}
}
if err != nil {
2021-11-04 11:12:57 +01:00
return nil , fmt . Errorf ( "open file failed: %w" , err )
2018-06-08 17:05:25 +02:00
}
2013-01-15 00:38:18 +01:00
}
return res . Body , nil
}
2018-08-21 12:49:33 +02:00
// Open an object for read
2019-06-17 10:34:30 +02:00
func ( o * Object ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2020-06-27 12:10:09 +02:00
if o . mimeType == shortcutMimeTypeDangling {
return nil , errors . New ( "can't read dangling shortcut" )
}
2018-08-21 12:49:33 +02:00
if o . v2Download {
var v2File * drive_v2 . File
err = o . fs . pacer . Call ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
v2File , err = o . fs . v2Svc . Files . Get ( actualID ( o . id ) ) .
2018-08-21 12:49:33 +02:00
Fields ( "downloadUrl" ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return o . fs . shouldRetry ( ctx , err )
2018-08-21 12:49:33 +02:00
} )
if err == nil {
fs . Debugf ( o , "Using v2 download: %v" , v2File . DownloadUrl )
o . url = v2File . DownloadUrl
o . v2Download = false
}
2014-04-18 18:04:21 +02:00
}
2019-09-04 21:21:10 +02:00
return o . baseObject . open ( ctx , o . url , options ... )
2018-08-21 12:49:33 +02:00
}
2019-06-17 10:34:30 +02:00
func ( o * documentObject ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2018-08-21 12:49:33 +02:00
// Update the size with what we are reading as it can change from
// the HEAD in the listing to this GET. This stops rclone marking
// the transfer as corrupted.
2019-01-31 11:39:13 +01:00
var offset , end int64 = 0 , - 1
var newOptions = options [ : 0 ]
2018-08-21 12:49:33 +02:00
for _ , o := range options {
2019-01-31 11:39:13 +01:00
// Note that Range requests don't work on Google docs:
2018-08-21 12:49:33 +02:00
// https://developers.google.com/drive/v3/web/manage-downloads#partial_download
2019-01-31 11:39:13 +01:00
// So do a subset of them manually
switch x := o . ( type ) {
case * fs . RangeOption :
offset , end = x . Start , x . End
case * fs . SeekOption :
offset , end = x . Offset , - 1
default :
newOptions = append ( newOptions , o )
2018-08-19 16:16:11 +02:00
}
}
2019-01-31 11:39:13 +01:00
options = newOptions
if offset != 0 {
return nil , errors . New ( "partial downloads are not supported while exporting Google Documents" )
}
2019-09-04 21:21:10 +02:00
in , err = o . baseObject . open ( ctx , o . url , options ... )
2018-08-21 12:49:33 +02:00
if in != nil {
in = & openDocumentFile { o : o , in : in }
}
2019-01-31 11:39:13 +01:00
if end >= 0 {
in = readers . NewLimitedReadCloser ( in , end - offset + 1 )
}
2018-08-21 12:49:33 +02:00
return
}
2019-06-17 10:34:30 +02:00
func ( o * linkObject ) Open ( ctx context . Context , options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2018-08-21 12:51:36 +02:00
var offset , limit int64 = 0 , - 1
var data = o . content
for _ , option := range options {
switch x := option . ( type ) {
case * fs . SeekOption :
offset = x . Offset
case * fs . RangeOption :
offset , limit = x . Decode ( int64 ( len ( data ) ) )
default :
if option . Mandatory ( ) {
fs . Logf ( o , "Unsupported mandatory option: %v" , option )
}
}
}
if l := int64 ( len ( data ) ) ; offset > l {
offset = l
}
data = data [ offset : ]
if limit != - 1 && limit < int64 ( len ( data ) ) {
data = data [ : limit ]
}
2022-08-20 16:38:02 +02:00
return io . NopCloser ( bytes . NewReader ( data ) ) , nil
2018-08-21 12:51:36 +02:00
}
2018-08-19 16:16:11 +02:00
2019-09-04 21:21:10 +02:00
func ( o * baseObject ) update ( ctx context . Context , updateInfo * drive . File , uploadMimeType string , in io . Reader ,
2018-08-21 12:49:33 +02:00
src fs . ObjectInfo ) ( info * drive . File , err error ) {
2014-04-18 18:46:57 +02:00
// Make the API request to upload metadata and file data.
2018-08-21 12:49:33 +02:00
size := src . Size ( )
2019-05-11 11:03:51 +02:00
if size >= 0 && size < int64 ( o . fs . opt . UploadCutoff ) {
2015-03-02 10:05:23 +01:00
// Don't retry, return a retry error instead
2015-11-07 12:14:46 +01:00
err = o . fs . pacer . CallNoRetry ( func ( ) ( bool , error ) {
2020-04-12 17:55:11 +02:00
info , err = o . fs . svc . Files . Update ( actualID ( o . id ) , updateInfo ) .
2021-07-13 00:29:31 +02:00
Media ( in , googleapi . ContentType ( uploadMimeType ) , googleapi . ChunkSize ( 0 ) ) .
2018-09-01 13:16:01 +02:00
Fields ( partialFields ) .
2019-07-30 19:49:06 +02:00
SupportsAllDrives ( true ) .
2018-08-05 12:27:17 +02:00
KeepRevisionForever ( o . fs . opt . KeepRevisionForever ) .
2021-03-16 16:53:35 +01:00
Context ( ctx ) . Do ( )
2021-03-16 16:50:02 +01:00
return o . fs . shouldRetry ( ctx , err )
2015-09-11 20:18:41 +02:00
} )
2018-08-21 12:49:33 +02:00
return
2014-04-18 18:04:21 +02:00
}
2018-08-21 12:49:33 +02:00
// Upload the file in chunks
2019-09-04 21:21:10 +02:00
return o . fs . Upload ( ctx , in , size , uploadMimeType , o . id , o . remote , updateInfo )
2018-08-21 12:49:33 +02:00
}
// Update the already existing object
//
2022-08-05 17:35:41 +02:00
// Copy the reader into the object updating modTime and size.
2018-08-21 12:49:33 +02:00
//
// The new object may have been created if an error is returned
2019-06-17 10:34:30 +02:00
func ( o * Object ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2020-04-12 17:55:11 +02:00
// If o is a shortcut
if isShortcutID ( o . id ) {
// Delete it first
err := o . fs . delete ( ctx , shortcutID ( o . id ) , o . fs . opt . UseTrash )
if err != nil {
return err
}
// Then put the file as a new file
newObj , err := o . fs . PutUnchecked ( ctx , in , src , options ... )
if err != nil {
return err
}
// Update the object
if newO , ok := newObj . ( * Object ) ; ok {
* o = * newO
} else {
fs . Debugf ( newObj , "Failed to update object %T from new object %T" , o , newObj )
}
return nil
}
2019-06-17 10:34:30 +02:00
srcMimeType := fs . MimeType ( ctx , src )
2018-08-21 12:49:33 +02:00
updateInfo := & drive . File {
MimeType : srcMimeType ,
2019-06-17 10:34:30 +02:00
ModifiedTime : src . ModTime ( ctx ) . Format ( timeFormatOut ) ,
2018-08-21 12:49:33 +02:00
}
2019-09-04 21:21:10 +02:00
info , err := o . baseObject . update ( ctx , updateInfo , srcMimeType , in , src )
2018-08-21 12:49:33 +02:00
if err != nil {
return err
}
2021-03-16 16:50:02 +01:00
newO , err := o . fs . newObjectWithInfo ( ctx , src . Remote ( ) , info )
2019-01-11 18:00:59 +01:00
if err != nil {
return err
}
2018-08-21 12:49:33 +02:00
switch newO := newO . ( type ) {
case * Object :
* o = * newO
default :
return errors . New ( "object type changed by update" )
}
return nil
}
2019-06-17 10:34:30 +02:00
func ( o * documentObject ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
srcMimeType := fs . MimeType ( ctx , src )
2018-08-21 12:49:33 +02:00
importMimeType := ""
updateInfo := & drive . File {
MimeType : srcMimeType ,
2019-06-17 10:34:30 +02:00
ModifiedTime : src . ModTime ( ctx ) . Format ( timeFormatOut ) ,
2018-08-21 12:49:33 +02:00
}
if o . fs . importMimeTypes == nil || o . fs . opt . SkipGdocs {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "can't update google document type without --drive-import-formats" )
2018-08-21 12:49:33 +02:00
}
2021-03-16 16:50:02 +01:00
importMimeType = o . fs . findImportFormat ( ctx , updateInfo . MimeType )
2018-08-21 12:49:33 +02:00
if importMimeType == "" {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "no import format found for %q" , srcMimeType )
2018-08-21 12:49:33 +02:00
}
if importMimeType != o . documentMimeType {
2021-11-04 11:12:57 +01:00
return fmt . Errorf ( "can't change google document type (o: %q, src: %q, import: %q)" , o . documentMimeType , srcMimeType , importMimeType )
2018-08-21 12:49:33 +02:00
}
updateInfo . MimeType = importMimeType
2019-09-04 21:21:10 +02:00
info , err := o . baseObject . update ( ctx , updateInfo , srcMimeType , in , src )
2018-08-21 12:49:33 +02:00
if err != nil {
return err
}
remote := src . Remote ( )
remote = remote [ : len ( remote ) - o . extLen ]
2021-03-16 16:50:02 +01:00
newO , err := o . fs . newObjectWithInfo ( ctx , remote , info )
2019-01-11 18:00:59 +01:00
if err != nil {
return err
}
2018-08-21 12:49:33 +02:00
switch newO := newO . ( type ) {
case * documentObject :
* o = * newO
default :
return errors . New ( "object type changed by update" )
2018-08-19 16:16:11 +02:00
}
2014-04-18 18:04:21 +02:00
return nil
}
2019-06-17 10:34:30 +02:00
func ( o * linkObject ) Update ( ctx context . Context , in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2018-08-21 12:51:36 +02:00
return errors . New ( "cannot update link files" )
}
2013-01-15 00:38:18 +01:00
// Remove an object
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) Remove ( ctx context . Context ) error {
2021-03-11 18:40:29 +01:00
if len ( o . parents ) > 1 {
2020-03-31 18:25:15 +02:00
return errors . New ( "can't delete safely - has multiple parents" )
}
2020-04-12 17:55:11 +02:00
return o . fs . delete ( ctx , shortcutID ( o . id ) , o . fs . opt . UseTrash )
2013-01-15 00:38:18 +01:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
2019-06-17 10:34:30 +02:00
func ( o * baseObject ) MimeType ( ctx context . Context ) string {
2016-09-21 23:13:24 +02:00
return o . mimeType
}
2018-05-13 10:16:56 +02:00
// ID returns the ID of the Object if known, or "" if not
2018-08-21 12:49:33 +02:00
func ( o * baseObject ) ID ( ) string {
2018-05-13 10:16:56 +02:00
return o . id
}
2021-03-11 18:40:29 +01:00
// ParentID returns the ID of the Object parent if known, or "" if not
func ( o * baseObject ) ParentID ( ) string {
if len ( o . parents ) > 0 {
return o . parents [ 0 ]
}
return ""
}
2018-09-01 13:16:01 +02:00
func ( o * documentObject ) ext ( ) string {
return o . baseObject . remote [ len ( o . baseObject . remote ) - o . extLen : ]
}
func ( o * linkObject ) ext ( ) string {
return o . baseObject . remote [ len ( o . baseObject . remote ) - o . extLen : ]
}
2018-08-21 12:51:36 +02:00
// templates for document link files
const (
urlTemplate = ` [ InternetShortcut ] { { "\r" } }
URL = { { . URL } } { { "\r" } }
`
weblocTemplate = ` < ? xml version = "1.0" encoding = "UTF-8" ? >
< ! DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd" >
< plist version = "1.0" >
< dict >
< key > URL < / key >
< string > { { . URL } } < / string >
< / dict >
< / plist >
`
desktopTemplate = ` [ Desktop Entry ]
Encoding = UTF - 8
Name = { { . Title } }
URL = { { . URL } }
2020-09-24 15:53:42 +02:00
Icon = { { . XDGIcon } }
2018-08-21 12:51:36 +02:00
Type = Link
`
htmlTemplate = ` < html >
< head >
< meta http - equiv = "refresh" content = "0; url={{ .URL }}" / >
< title > { { . Title } } < / title >
< / head >
< body >
Loading < a href = "{{ .URL }}" > { { . Title } } < / a >
< / body >
< / html >
`
)
2013-01-15 00:38:18 +01:00
// Check the interfaces are satisfied
2015-08-31 22:05:51 +02:00
var (
2018-03-08 21:03:34 +01:00
_ fs . Fs = ( * Fs ) ( nil )
_ fs . Purger = ( * Fs ) ( nil )
_ fs . CleanUpper = ( * Fs ) ( nil )
_ fs . PutStreamer = ( * Fs ) ( nil )
_ fs . Copier = ( * Fs ) ( nil )
_ fs . Mover = ( * Fs ) ( nil )
_ fs . DirMover = ( * Fs ) ( nil )
2020-04-29 19:54:16 +02:00
_ fs . Commander = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . DirCacheFlusher = ( * Fs ) ( nil )
_ fs . ChangeNotifier = ( * Fs ) ( nil )
_ fs . PutUncheckeder = ( * Fs ) ( nil )
2018-03-29 09:10:19 +02:00
_ fs . PublicLinker = ( * Fs ) ( nil )
2018-07-26 15:45:41 +02:00
_ fs . ListRer = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . MergeDirser = ( * Fs ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . Abouter = ( * Fs ) ( nil )
2018-03-08 21:03:34 +01:00
_ fs . Object = ( * Object ) ( nil )
2018-04-16 23:19:25 +02:00
_ fs . MimeTyper = ( * Object ) ( nil )
2018-05-13 10:16:56 +02:00
_ fs . IDer = ( * Object ) ( nil )
2021-03-11 18:40:29 +01:00
_ fs . ParentIDer = ( * Object ) ( nil )
2018-08-21 12:49:33 +02:00
_ fs . Object = ( * documentObject ) ( nil )
_ fs . MimeTyper = ( * documentObject ) ( nil )
_ fs . IDer = ( * documentObject ) ( nil )
2021-03-11 18:40:29 +01:00
_ fs . ParentIDer = ( * documentObject ) ( nil )
2018-08-21 12:51:36 +02:00
_ fs . Object = ( * linkObject ) ( nil )
_ fs . MimeTyper = ( * linkObject ) ( nil )
_ fs . IDer = ( * linkObject ) ( nil )
2021-03-11 18:40:29 +01:00
_ fs . ParentIDer = ( * linkObject ) ( nil )
2015-08-31 22:05:51 +02:00
)