2015-09-22 19:47:16 +02:00
// Package googlecloudstorage provides an interface to Google Cloud Storage
2014-07-13 18:54:03 +02:00
package googlecloudstorage
/ *
2014-07-15 00:35:41 +02:00
Notes
2014-07-13 18:54:03 +02:00
2014-07-15 00:35:41 +02:00
Can ' t set Updated but can set Metadata on object creation
2014-07-13 18:54:03 +02:00
2014-07-15 00:35:41 +02:00
Patch needs full_control not just read_write
FIXME Patch / Delete / Get isn ' t working with files with spaces in - giving 404 error
- https : //code.google.com/p/google-api-go-client/issues/detail?id=64
2014-07-13 18:54:03 +02:00
* /
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
2016-04-20 16:40:40 +02:00
"io/ioutil"
2015-08-18 09:55:09 +02:00
"log"
2014-07-13 18:54:03 +02:00
"net/http"
2016-04-20 16:40:40 +02:00
"os"
2014-07-14 11:45:28 +02:00
"path"
2014-07-13 18:54:03 +02:00
"regexp"
"strings"
2017-06-07 15:16:50 +02:00
"sync"
2014-07-13 18:54:03 +02:00
"time"
2016-06-12 16:06:02 +02:00
"github.com/ncw/rclone/fs"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/config"
"github.com/ncw/rclone/fs/config/flags"
2018-01-18 21:19:55 +01:00
"github.com/ncw/rclone/fs/config/obscure"
2018-01-12 17:30:54 +01:00
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
2018-01-11 17:29:20 +01:00
"github.com/ncw/rclone/lib/oauthutil"
2016-06-12 16:06:02 +02:00
"github.com/pkg/errors"
2015-08-18 09:55:09 +02:00
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
2014-12-12 21:02:08 +01:00
"google.golang.org/api/googleapi"
2017-09-16 22:46:02 +02:00
storage "google.golang.org/api/storage/v1"
2014-07-13 18:54:03 +02:00
)
const (
2016-02-28 20:57:19 +01:00
rcloneClientID = "202264815644.apps.googleusercontent.com"
2016-08-14 13:04:43 +02:00
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
2016-02-28 20:57:19 +01:00
timeFormatIn = time . RFC3339
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
metaMtime = "mtime" // key to store mtime under in metadata
2017-06-06 16:03:52 +02:00
listChunks = 1000 // chunk size to read directory listings
2014-07-13 18:54:03 +02:00
)
var (
2018-01-12 17:30:54 +01:00
gcsLocation = flags . StringP ( "gcs-location" , "" , "" , "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2)." )
gcsStorageClass = flags . StringP ( "gcs-storage-class" , "" , "" , "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY)." )
2014-07-13 18:54:03 +02:00
// Description of how to auth for this app
2015-08-18 09:55:09 +02:00
storageConfig = & oauth2 . Config {
Scopes : [ ] string { storage . DevstorageFullControlScope } ,
Endpoint : google . Endpoint ,
ClientID : rcloneClientID ,
2018-01-18 21:19:55 +01:00
ClientSecret : obscure . MustReveal ( rcloneEncryptedClientSecret ) ,
2015-08-18 09:55:09 +02:00
RedirectURL : oauthutil . TitleBarRedirectURL ,
2014-07-13 18:54:03 +02:00
}
)
// Register with Fs
func init ( ) {
2016-02-18 12:35:25 +01:00
fs . Register ( & fs . RegInfo {
2016-02-15 19:11:53 +01:00
Name : "google cloud storage" ,
Description : "Google Cloud Storage (this is not Google Drive)" ,
NewFs : NewFs ,
2014-07-13 18:54:03 +02:00
Config : func ( name string ) {
2018-01-12 17:30:54 +01:00
if config . FileGet ( name , "service_account_file" ) != "" {
2016-04-22 20:58:52 +02:00
return
}
2016-01-04 16:13:36 +01:00
err := oauthutil . Config ( "google cloud storage" , name , storageConfig )
2015-08-18 09:55:09 +02:00
if err != nil {
log . Fatalf ( "Failed to configure token: %v" , err )
}
2014-07-13 18:54:03 +02:00
} ,
Options : [ ] fs . Option { {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientID ,
2015-10-03 15:23:12 +02:00
Help : "Google Application Client Id - leave blank normally." ,
2014-07-13 18:54:03 +02:00
} , {
2018-01-12 17:30:54 +01:00
Name : config . ConfigClientSecret ,
2015-10-03 15:23:12 +02:00
Help : "Google Application Client Secret - leave blank normally." ,
2014-07-15 00:35:41 +02:00
} , {
Name : "project_number" ,
Help : "Project number optional - needed only for list/create/delete buckets - see your developer console." ,
2016-04-20 16:40:40 +02:00
} , {
Name : "service_account_file" ,
Help : "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login." ,
2014-07-15 00:35:41 +02:00
} , {
Name : "object_acl" ,
Help : "Access Control List for new objects." ,
Examples : [ ] fs . OptionExample { {
Value : "authenticatedRead" ,
Help : "Object owner gets OWNER access, and all Authenticated Users get READER access." ,
} , {
Value : "bucketOwnerFullControl" ,
Help : "Object owner gets OWNER access, and project team owners get OWNER access." ,
} , {
Value : "bucketOwnerRead" ,
Help : "Object owner gets OWNER access, and project team owners get READER access." ,
} , {
Value : "private" ,
Help : "Object owner gets OWNER access [default if left blank]." ,
} , {
Value : "projectPrivate" ,
Help : "Object owner gets OWNER access, and project team members get access according to their roles." ,
} , {
Value : "publicRead" ,
Help : "Object owner gets OWNER access, and all Users get READER access." ,
} } ,
} , {
Name : "bucket_acl" ,
Help : "Access Control List for new buckets." ,
Examples : [ ] fs . OptionExample { {
Value : "authenticatedRead" ,
Help : "Project team owners get OWNER access, and all Authenticated Users get READER access." ,
} , {
Value : "private" ,
Help : "Project team owners get OWNER access [default if left blank]." ,
} , {
Value : "projectPrivate" ,
Help : "Project team members get access according to their roles." ,
} , {
Value : "publicRead" ,
Help : "Project team owners get OWNER access, and all Users get READER access." ,
} , {
Value : "publicReadWrite" ,
Help : "Project team owners get OWNER access, and all Users get WRITER access." ,
} } ,
2017-07-18 16:15:29 +02:00
} , {
Name : "location" ,
Help : "Location for the newly created buckets." ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Empty for default location (US)." ,
} , {
Value : "asia" ,
Help : "Multi-regional location for Asia." ,
} , {
Value : "eu" ,
Help : "Multi-regional location for Europe." ,
} , {
Value : "us" ,
Help : "Multi-regional location for United States." ,
} , {
Value : "asia-east1" ,
Help : "Taiwan." ,
} , {
Value : "asia-northeast1" ,
Help : "Tokyo." ,
} , {
Value : "asia-southeast1" ,
Help : "Singapore." ,
} , {
Value : "australia-southeast1" ,
Help : "Sydney." ,
} , {
Value : "europe-west1" ,
Help : "Belgium." ,
} , {
Value : "europe-west2" ,
Help : "London." ,
} , {
Value : "us-central1" ,
Help : "Iowa." ,
} , {
Value : "us-east1" ,
Help : "South Carolina." ,
} , {
Value : "us-east4" ,
Help : "Northern Virginia." ,
} , {
Value : "us-west1" ,
Help : "Oregon." ,
} } ,
} , {
Name : "storage_class" ,
Help : "The storage class to use when storing objects in Google Cloud Storage." ,
Examples : [ ] fs . OptionExample { {
Value : "" ,
Help : "Default" ,
} , {
Value : "MULTI_REGIONAL" ,
Help : "Multi-regional storage class" ,
} , {
Value : "REGIONAL" ,
Help : "Regional storage class" ,
} , {
Value : "NEARLINE" ,
Help : "Nearline storage class" ,
} , {
Value : "COLDLINE" ,
Help : "Coldline storage class" ,
} , {
Value : "DURABLE_REDUCED_AVAILABILITY" ,
Help : "Durable reduced availability storage class" ,
} } ,
2014-07-13 18:54:03 +02:00
} } ,
} )
}
2015-11-07 12:14:46 +01:00
// Fs represents a remote storage server
type Fs struct {
2015-08-22 17:53:11 +02:00
name string // name of this remote
2017-01-13 18:21:47 +01:00
root string // the path we are working on if any
features * fs . Features // optional features
2014-07-13 18:54:03 +02:00
svc * storage . Service // the connection to the storage server
client * http . Client // authorized client
bucket string // the bucket we are working on
2017-06-07 15:16:50 +02:00
bucketOKMu sync . Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
2014-07-13 18:54:03 +02:00
projectNumber string // used for finding buckets
2016-10-03 21:40:54 +02:00
objectACL string // used when creating new objects
bucketACL string // used when creating new buckets
2017-07-18 16:15:29 +02:00
location string // location of new buckets
storageClass string // storage class of new buckets
2014-07-13 18:54:03 +02:00
}
2015-11-07 12:14:46 +01:00
// Object describes a storage object
2014-07-13 18:54:03 +02:00
//
// Will definitely have info but maybe not meta
2015-11-07 12:14:46 +01:00
type Object struct {
2016-09-21 23:13:24 +02:00
fs * Fs // what this object is part of
remote string // The remote path
url string // download path
md5sum string // The MD5Sum of the object
bytes int64 // Bytes in the object
modTime time . Time // Modified time of the object
mimeType string
2014-07-13 18:54:03 +02:00
}
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Name of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Name ( ) string {
2015-08-22 17:53:11 +02:00
return f . name
}
2015-09-22 19:47:16 +02:00
// Root of the remote (as passed into NewFs)
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Root ( ) string {
2015-09-01 21:45:27 +02:00
if f . root == "" {
return f . bucket
}
return f . bucket + "/" + f . root
}
2015-11-07 12:14:46 +01:00
// String converts this Fs to a string
func ( f * Fs ) String ( ) string {
2014-07-13 18:54:03 +02:00
if f . root == "" {
return fmt . Sprintf ( "Storage bucket %s" , f . bucket )
}
return fmt . Sprintf ( "Storage bucket %s path %s" , f . bucket , f . root )
}
2017-01-13 18:21:47 +01:00
// Features returns the optional features of this Fs
func ( f * Fs ) Features ( ) * fs . Features {
return f . features
}
2014-07-13 18:54:03 +02:00
// Pattern to match a storage path
var matcher = regexp . MustCompile ( ` ^([^/]*)(.*)$ ` )
// parseParse parses a storage 'url'
func parsePath ( path string ) ( bucket , directory string , err error ) {
parts := matcher . FindStringSubmatch ( path )
if parts == nil {
2016-06-12 16:06:02 +02:00
err = errors . Errorf ( "couldn't find bucket in storage path %q" , path )
2014-07-13 18:54:03 +02:00
} else {
bucket , directory = parts [ 1 ] , parts [ 2 ]
directory = strings . Trim ( directory , "/" )
}
return
}
2016-04-20 16:40:40 +02:00
func getServiceAccountClient ( keyJsonfilePath string ) ( * http . Client , error ) {
data , err := ioutil . ReadFile ( os . ExpandEnv ( keyJsonfilePath ) )
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "error opening credentials file" )
2016-04-20 16:40:40 +02:00
}
conf , err := google . JWTConfigFromJSON ( data , storageConfig . Scopes ... )
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "error processing credentials" )
2016-04-20 16:40:40 +02:00
}
2018-01-12 17:30:54 +01:00
ctxWithSpecialClient := oauthutil . Context ( fshttp . NewClient ( fs . Config ) )
2016-04-20 16:40:40 +02:00
return oauth2 . NewClient ( ctxWithSpecialClient , conf . TokenSource ( ctxWithSpecialClient ) ) , nil
}
2015-11-07 12:14:46 +01:00
// NewFs contstructs an Fs from the path, bucket:path
2014-07-13 18:54:03 +02:00
func NewFs ( name , root string ) ( fs . Fs , error ) {
2016-04-20 16:40:40 +02:00
var oAuthClient * http . Client
var err error
2018-01-12 17:30:54 +01:00
serviceAccountPath := config . FileGet ( name , "service_account_file" )
2016-04-20 16:40:40 +02:00
if serviceAccountPath != "" {
oAuthClient , err = getServiceAccountClient ( serviceAccountPath )
if err != nil {
log . Fatalf ( "Failed configuring Google Cloud Storage Service Account: %v" , err )
}
} else {
2016-05-23 19:03:22 +02:00
oAuthClient , _ , err = oauthutil . NewClient ( name , storageConfig )
2016-04-20 16:40:40 +02:00
if err != nil {
log . Fatalf ( "Failed to configure Google Cloud Storage: %v" , err )
}
2014-07-13 18:54:03 +02:00
}
bucket , directory , err := parsePath ( root )
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
f := & Fs {
2015-08-22 17:53:11 +02:00
name : name ,
2014-07-13 18:54:03 +02:00
bucket : bucket ,
root : directory ,
2018-01-12 17:30:54 +01:00
projectNumber : config . FileGet ( name , "project_number" ) ,
objectACL : config . FileGet ( name , "object_acl" ) ,
bucketACL : config . FileGet ( name , "bucket_acl" ) ,
location : config . FileGet ( name , "location" ) ,
storageClass : config . FileGet ( name , "storage_class" ) ,
2014-07-15 00:35:41 +02:00
}
2017-08-09 16:27:43 +02:00
f . features = ( & fs . Features {
ReadMimeType : true ,
WriteMimeType : true ,
BucketBased : true ,
} ) . Fill ( f )
2016-10-03 21:40:54 +02:00
if f . objectACL == "" {
f . objectACL = "private"
2014-07-15 00:35:41 +02:00
}
2016-10-03 21:40:54 +02:00
if f . bucketACL == "" {
f . bucketACL = "private"
2014-07-13 18:54:03 +02:00
}
2017-07-18 16:15:29 +02:00
if * gcsLocation != "" {
f . location = * gcsLocation
}
if * gcsStorageClass != "" {
f . storageClass = * gcsStorageClass
}
2014-07-13 18:54:03 +02:00
// Create a new authorized Drive client.
2015-08-18 09:55:09 +02:00
f . client = oAuthClient
2014-07-13 18:54:03 +02:00
f . svc , err = storage . New ( f . client )
if err != nil {
2016-06-12 16:06:02 +02:00
return nil , errors . Wrap ( err , "couldn't create Google Cloud Storage client" )
2014-07-13 18:54:03 +02:00
}
if f . root != "" {
f . root += "/"
// Check to see if the object exists
2014-07-14 11:45:28 +02:00
_ , err = f . svc . Objects . Get ( bucket , directory ) . Do ( )
if err == nil {
f . root = path . Dir ( directory )
if f . root == "." {
f . root = ""
} else {
f . root += "/"
}
2016-06-21 19:01:53 +02:00
// return an error with an fs which points to the parent
return f , fs . ErrorIsFile
2014-07-14 11:45:28 +02:00
}
2014-07-13 18:54:03 +02:00
}
return f , nil
}
2016-06-25 22:58:34 +02:00
// Return an Object from a path
2014-07-13 18:54:03 +02:00
//
2016-06-25 22:23:20 +02:00
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) newObjectWithInfo ( remote string , info * storage . Object ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
o := & Object {
fs : f ,
remote : remote ,
2014-07-13 18:54:03 +02:00
}
if info != nil {
o . setMetaData ( info )
} else {
err := o . readMetaData ( ) // reads info and meta, returning an error
if err != nil {
2016-06-25 22:23:20 +02:00
return nil , err
2014-07-13 18:54:03 +02:00
}
}
2016-06-25 22:23:20 +02:00
return o , nil
2014-07-13 18:54:03 +02:00
}
2016-06-25 22:23:20 +02:00
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func ( f * Fs ) NewObject ( remote string ) ( fs . Object , error ) {
2016-06-25 22:58:34 +02:00
return f . newObjectWithInfo ( remote , nil )
2014-07-13 18:54:03 +02:00
}
2016-04-21 21:06:21 +02:00
// listFn is called from list to handle an object.
type listFn func ( remote string , object * storage . Object , isDirectory bool ) error
2014-07-13 18:54:03 +02:00
// list the objects into the function supplied
//
2016-04-23 22:46:52 +02:00
// dir is the starting directory, "" for root
//
2017-06-11 23:43:31 +02:00
// Set recurse to read sub directories
func ( f * Fs ) list ( dir string , recurse bool , fn listFn ) error {
2016-04-23 22:46:52 +02:00
root := f . root
2016-05-07 15:50:35 +02:00
rootLength := len ( root )
2016-04-23 22:46:52 +02:00
if dir != "" {
root += dir + "/"
}
list := f . svc . Objects . List ( f . bucket ) . Prefix ( root ) . MaxResults ( listChunks )
2017-06-11 23:43:31 +02:00
if ! recurse {
2014-07-13 18:54:03 +02:00
list = list . Delimiter ( "/" )
}
for {
objects , err := list . Do ( )
if err != nil {
2017-06-11 23:43:31 +02:00
if gErr , ok := err . ( * googleapi . Error ) ; ok {
if gErr . Code == http . StatusNotFound {
err = fs . ErrorDirNotFound
}
}
2016-04-21 21:06:21 +02:00
return err
2014-07-13 18:54:03 +02:00
}
2017-06-11 23:43:31 +02:00
if ! recurse {
2014-07-28 19:04:52 +02:00
var object storage . Object
for _ , prefix := range objects . Prefixes {
if ! strings . HasSuffix ( prefix , "/" ) {
continue
}
2017-02-25 17:03:29 +01:00
err = fn ( prefix [ rootLength : len ( prefix ) - 1 ] , & object , true )
2016-04-21 21:06:21 +02:00
if err != nil {
return err
}
}
}
for _ , object := range objects . Items {
2016-04-23 22:46:52 +02:00
if ! strings . HasPrefix ( object . Name , root ) {
2017-02-09 12:01:20 +01:00
fs . Logf ( f , "Odd name received %q" , object . Name )
2016-04-21 21:06:21 +02:00
continue
}
remote := object . Name [ rootLength : ]
2018-03-19 18:42:27 +01:00
// is this a directory marker?
2018-03-21 21:10:00 +01:00
if ( strings . HasSuffix ( remote , "/" ) || remote == "" ) && object . Size == 0 {
2018-03-19 18:42:27 +01:00
if recurse {
// add a directory in if --fast-list since will have no prefixes
err = fn ( remote [ : len ( remote ) - 1 ] , object , true )
if err != nil {
return err
}
}
continue // skip directory marker
}
2016-04-21 21:06:21 +02:00
err = fn ( remote , object , false )
if err != nil {
return err
2014-07-13 18:54:03 +02:00
}
}
if objects . NextPageToken == "" {
break
}
list . PageToken ( objects . NextPageToken )
}
2016-04-21 21:06:21 +02:00
return nil
2014-07-13 18:54:03 +02:00
}
2017-06-30 11:54:14 +02:00
// Convert a list item into a DirEntry
func ( f * Fs ) itemToDirEntry ( remote string , object * storage . Object , isDirectory bool ) ( fs . DirEntry , error ) {
2017-06-11 23:43:31 +02:00
if isDirectory {
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( remote , time . Time { } ) . SetSize ( int64 ( object . Size ) )
2017-06-11 23:43:31 +02:00
return d , nil
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
o , err := f . newObjectWithInfo ( remote , object )
if err != nil {
return nil , err
}
return o , nil
}
2018-03-01 13:11:34 +01:00
// mark the bucket as being OK
func ( f * Fs ) markBucketOK ( ) {
if f . bucket != "" {
f . bucketOKMu . Lock ( )
f . bucketOK = true
f . bucketOKMu . Unlock ( )
}
}
2017-06-11 23:43:31 +02:00
// listDir lists a single directory
func ( f * Fs ) listDir ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
// List the objects
2017-06-11 23:43:31 +02:00
err = f . list ( dir , false , func ( remote string , object * storage . Object , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( remote , object , isDirectory )
if err != nil {
return err
}
if entry != nil {
entries = append ( entries , entry )
2016-04-21 21:06:21 +02:00
}
return nil
} )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2014-07-13 18:54:03 +02:00
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return entries , err
2014-07-13 18:54:03 +02:00
}
2017-06-11 23:43:31 +02:00
// listBuckets lists the buckets
func ( f * Fs ) listBuckets ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-23 22:46:52 +02:00
if dir != "" {
2017-06-11 23:43:31 +02:00
return nil , fs . ErrorListBucketRequired
2016-04-23 22:46:52 +02:00
}
2016-04-21 21:06:21 +02:00
if f . projectNumber == "" {
2017-06-11 23:43:31 +02:00
return nil , errors . New ( "can't list buckets without project number" )
2016-04-21 21:06:21 +02:00
}
listBuckets := f . svc . Buckets . List ( f . projectNumber ) . MaxResults ( listChunks )
for {
buckets , err := listBuckets . Do ( )
if err != nil {
2017-06-11 23:43:31 +02:00
return nil , err
2016-04-21 21:06:21 +02:00
}
for _ , bucket := range buckets . Items {
2017-06-30 14:37:29 +02:00
d := fs . NewDir ( bucket . Name , time . Time { } )
2017-06-11 23:43:31 +02:00
entries = append ( entries , d )
2016-04-21 21:06:21 +02:00
}
if buckets . NextPageToken == "" {
break
}
listBuckets . PageToken ( buckets . NextPageToken )
}
2017-06-11 23:43:31 +02:00
return entries , nil
2016-04-21 21:06:21 +02:00
}
2017-06-11 23:43:31 +02:00
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func ( f * Fs ) List ( dir string ) ( entries fs . DirEntries , err error ) {
2016-04-21 21:06:21 +02:00
if f . bucket == "" {
2017-06-11 23:43:31 +02:00
return f . listBuckets ( dir )
2014-07-13 18:54:03 +02:00
}
2017-06-11 23:43:31 +02:00
return f . listDir ( dir )
2014-07-13 18:54:03 +02:00
}
2017-06-05 17:14:24 +02:00
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
2017-06-11 23:43:31 +02:00
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func ( f * Fs ) ListR ( dir string , callback fs . ListRCallback ) ( err error ) {
if f . bucket == "" {
return fs . ErrorListBucketRequired
}
2018-01-12 17:30:54 +01:00
list := walk . NewListRHelper ( callback )
2017-06-11 23:43:31 +02:00
err = f . list ( dir , true , func ( remote string , object * storage . Object , isDirectory bool ) error {
entry , err := f . itemToDirEntry ( remote , object , isDirectory )
if err != nil {
return err
}
return list . Add ( entry )
} )
if err != nil {
return err
}
2018-03-01 13:11:34 +01:00
// bucket must be present if listing succeeded
f . markBucketOK ( )
2017-06-11 23:43:31 +02:00
return list . Flush ( )
2017-06-05 17:14:24 +02:00
}
2014-07-13 18:54:03 +02:00
// Put the object into the bucket
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( f * Fs ) Put ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
2015-11-07 12:14:46 +01:00
// Temporary Object under construction
o := & Object {
fs : f ,
2016-02-18 12:35:25 +01:00
remote : src . Remote ( ) ,
2015-11-07 12:14:46 +01:00
}
2017-05-28 13:44:22 +02:00
return o , o . Update ( in , src , options ... )
2014-07-13 18:54:03 +02:00
}
2017-09-16 22:46:02 +02:00
// PutStream uploads to the remote path with the modTime given of indeterminate size
func ( f * Fs ) PutStream ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) ( fs . Object , error ) {
return f . Put ( in , src , options ... )
}
2014-07-13 18:54:03 +02:00
// Mkdir creates the bucket if it doesn't exist
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Mkdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
if f . bucketOK {
2016-11-25 22:52:43 +01:00
return nil
}
2018-04-05 16:17:53 +02:00
// List something from the bucket to see if it exists. Doing it like this enables the use of a
// service account that only has the "Storage Object Admin" role. See #2193 for details.
_ , err := f . svc . Objects . List ( f . bucket ) . MaxResults ( 1 ) . Do ( )
2014-07-13 18:54:03 +02:00
if err == nil {
// Bucket already exists
2017-06-07 15:16:50 +02:00
f . bucketOK = true
2014-07-13 18:54:03 +02:00
return nil
2017-08-10 11:29:21 +02:00
} else if gErr , ok := err . ( * googleapi . Error ) ; ok {
if gErr . Code != http . StatusNotFound {
return errors . Wrap ( err , "failed to get bucket" )
}
} else {
return errors . Wrap ( err , "failed to get bucket" )
2014-07-13 18:54:03 +02:00
}
if f . projectNumber == "" {
2016-06-12 16:06:02 +02:00
return errors . New ( "can't make bucket without project number" )
2014-07-13 18:54:03 +02:00
}
bucket := storage . Bucket {
2017-07-18 16:15:29 +02:00
Name : f . bucket ,
Location : f . location ,
StorageClass : f . storageClass ,
2014-07-13 18:54:03 +02:00
}
2016-10-03 21:40:54 +02:00
_ , err = f . svc . Buckets . Insert ( f . projectNumber , & bucket ) . PredefinedAcl ( f . bucketACL ) . Do ( )
2017-06-07 15:16:50 +02:00
if err == nil {
f . bucketOK = true
}
2014-07-13 18:54:03 +02:00
return err
}
2015-11-07 16:31:04 +01:00
// Rmdir deletes the bucket if the fs is at the root
2014-07-13 18:54:03 +02:00
//
// Returns an error if it isn't empty: Error 409: The bucket you tried
// to delete was not empty.
2016-11-25 22:52:43 +01:00
func ( f * Fs ) Rmdir ( dir string ) error {
2017-06-07 15:16:50 +02:00
f . bucketOKMu . Lock ( )
defer f . bucketOKMu . Unlock ( )
2016-11-25 22:52:43 +01:00
if f . root != "" || dir != "" {
2015-11-07 16:31:04 +01:00
return nil
}
2017-06-07 15:16:50 +02:00
err := f . svc . Buckets . Delete ( f . bucket ) . Do ( )
if err == nil {
f . bucketOK = false
}
return err
2014-07-13 18:54:03 +02:00
}
2015-09-22 19:47:16 +02:00
// Precision returns the precision
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Precision ( ) time . Duration {
2014-07-13 18:54:03 +02:00
return time . Nanosecond
}
2015-02-14 19:48:08 +01:00
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
2015-11-07 12:14:46 +01:00
func ( f * Fs ) Copy ( src fs . Object , remote string ) ( fs . Object , error ) {
2017-06-28 22:14:53 +02:00
err := f . Mkdir ( "" )
if err != nil {
return nil , err
}
2015-11-07 12:14:46 +01:00
srcObj , ok := src . ( * Object )
2015-02-14 19:48:08 +01:00
if ! ok {
2017-02-09 12:01:20 +01:00
fs . Debugf ( src , "Can't copy - not same remote type" )
2015-02-14 19:48:08 +01:00
return nil , fs . ErrorCantCopy
}
2015-11-07 12:14:46 +01:00
// Temporary Object under construction
dstObj := & Object {
fs : f ,
remote : remote ,
}
2015-02-14 19:48:08 +01:00
2015-11-07 12:14:46 +01:00
srcBucket := srcObj . fs . bucket
srcObject := srcObj . fs . root + srcObj . remote
2015-02-14 19:48:08 +01:00
dstBucket := f . bucket
dstObject := f . root + remote
newObject , err := f . svc . Objects . Copy ( srcBucket , srcObject , dstBucket , dstObject , nil ) . Do ( )
if err != nil {
return nil , err
}
// Set the metadata for the new object while we have it
dstObj . setMetaData ( newObject )
return dstObj , nil
}
2016-01-11 13:39:33 +01:00
// Hashes returns the supported hash sets.
2018-01-12 17:30:54 +01:00
func ( f * Fs ) Hashes ( ) hash . Set {
2018-01-18 21:27:52 +01:00
return hash . Set ( hash . MD5 )
2016-01-11 13:39:33 +01:00
}
2014-07-13 18:54:03 +02:00
// ------------------------------------------------------------
2015-09-22 19:47:16 +02:00
// Fs returns the parent Fs
2016-02-18 12:35:25 +01:00
func ( o * Object ) Fs ( ) fs . Info {
2015-11-07 12:14:46 +01:00
return o . fs
2014-07-13 18:54:03 +02:00
}
// Return a string version
2015-11-07 12:14:46 +01:00
func ( o * Object ) String ( ) string {
2014-07-13 18:54:03 +02:00
if o == nil {
return "<nil>"
}
return o . remote
}
2015-09-22 19:47:16 +02:00
// Remote returns the remote path
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remote ( ) string {
2014-07-13 18:54:03 +02:00
return o . remote
}
2016-01-11 13:39:33 +01:00
// Hash returns the Md5sum of an object returning a lowercase hex string
2018-01-12 17:30:54 +01:00
func ( o * Object ) Hash ( t hash . Type ) ( string , error ) {
2018-01-18 21:27:52 +01:00
if t != hash . MD5 {
return "" , hash . ErrUnsupported
2016-01-11 13:39:33 +01:00
}
2014-07-13 18:54:03 +02:00
return o . md5sum , nil
}
// Size returns the size of an object in bytes
2015-11-07 12:14:46 +01:00
func ( o * Object ) Size ( ) int64 {
2014-07-13 18:54:03 +02:00
return o . bytes
}
// setMetaData sets the fs data from a storage.Object
2015-11-07 12:14:46 +01:00
func ( o * Object ) setMetaData ( info * storage . Object ) {
2014-07-13 18:54:03 +02:00
o . url = info . MediaLink
o . bytes = int64 ( info . Size )
2016-09-21 23:13:24 +02:00
o . mimeType = info . ContentType
2014-07-13 18:54:03 +02:00
// Read md5sum
md5sumData , err := base64 . StdEncoding . DecodeString ( info . Md5Hash )
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Bad MD5 decode: %v" , err )
2014-07-13 18:54:03 +02:00
} else {
o . md5sum = hex . EncodeToString ( md5sumData )
}
// read mtime out of metadata if available
mtimeString , ok := info . Metadata [ metaMtime ]
if ok {
2014-07-29 18:50:07 +02:00
modTime , err := time . Parse ( timeFormatIn , mtimeString )
2014-07-13 18:54:03 +02:00
if err == nil {
o . modTime = modTime
return
}
2017-02-09 12:01:20 +01:00
fs . Debugf ( o , "Failed to read mtime from metadata: %s" , err )
2014-07-13 18:54:03 +02:00
}
// Fallback to the Updated time
2014-07-29 18:50:07 +02:00
modTime , err := time . Parse ( timeFormatIn , info . Updated )
2014-07-13 18:54:03 +02:00
if err != nil {
2017-02-09 12:01:20 +01:00
fs . Logf ( o , "Bad time decode: %v" , err )
2014-07-13 18:54:03 +02:00
} else {
o . modTime = modTime
}
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
2015-11-07 12:14:46 +01:00
func ( o * Object ) readMetaData ( ) ( err error ) {
2014-07-13 18:54:03 +02:00
if ! o . modTime . IsZero ( ) {
return nil
}
2015-11-07 12:14:46 +01:00
object , err := o . fs . svc . Objects . Get ( o . fs . bucket , o . fs . root + o . remote ) . Do ( )
2014-07-13 18:54:03 +02:00
if err != nil {
2016-06-25 22:23:20 +02:00
if gErr , ok := err . ( * googleapi . Error ) ; ok {
if gErr . Code == http . StatusNotFound {
return fs . ErrorObjectNotFound
}
}
2014-07-13 18:54:03 +02:00
return err
}
o . setMetaData ( object )
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
2015-11-07 12:14:46 +01:00
func ( o * Object ) ModTime ( ) time . Time {
2014-07-13 18:54:03 +02:00
err := o . readMetaData ( )
if err != nil {
2017-02-09 12:01:20 +01:00
// fs.Logf(o, "Failed to read metadata: %v", err)
2014-07-13 18:54:03 +02:00
return time . Now ( )
}
return o . modTime
}
// Returns metadata for an object
func metadataFromModTime ( modTime time . Time ) map [ string ] string {
metadata := make ( map [ string ] string , 1 )
2014-07-29 18:50:07 +02:00
metadata [ metaMtime ] = modTime . Format ( timeFormatOut )
2014-07-13 18:54:03 +02:00
return metadata
}
2015-09-22 19:47:16 +02:00
// SetModTime sets the modification time of the local fs object
2016-03-22 16:07:10 +01:00
func ( o * Object ) SetModTime ( modTime time . Time ) error {
2014-07-13 18:54:03 +02:00
// This only adds metadata so will perserve other metadata
object := storage . Object {
2015-11-07 12:14:46 +01:00
Bucket : o . fs . bucket ,
Name : o . fs . root + o . remote ,
2014-07-13 18:54:03 +02:00
Metadata : metadataFromModTime ( modTime ) ,
}
2015-11-07 12:14:46 +01:00
newObject , err := o . fs . svc . Objects . Patch ( o . fs . bucket , o . fs . root + o . remote , & object ) . Do ( )
2014-07-13 18:54:03 +02:00
if err != nil {
2016-03-22 16:07:10 +01:00
return err
2014-07-13 18:54:03 +02:00
}
2014-07-28 21:07:02 +02:00
o . setMetaData ( newObject )
2016-03-22 16:07:10 +01:00
return nil
2014-07-13 18:54:03 +02:00
}
2015-09-22 19:47:16 +02:00
// Storable returns a boolean as to whether this object is storable
2015-11-07 12:14:46 +01:00
func ( o * Object ) Storable ( ) bool {
2014-07-13 18:54:03 +02:00
return true
}
// Open an object for read
2016-09-10 12:29:57 +02:00
func ( o * Object ) Open ( options ... fs . OpenOption ) ( in io . ReadCloser , err error ) {
2016-09-05 17:08:17 +02:00
req , err := http . NewRequest ( "GET" , o . url , nil )
2014-07-15 12:18:43 +02:00
if err != nil {
return nil , err
}
2016-09-10 12:29:57 +02:00
fs . OpenOptionAddHTTPHeaders ( req . Header , options )
2015-11-07 12:14:46 +01:00
res , err := o . fs . client . Do ( req )
2014-07-13 18:54:03 +02:00
if err != nil {
return nil , err
}
2016-09-10 12:29:57 +02:00
_ , isRanging := req . Header [ "Range" ]
if ! ( res . StatusCode == http . StatusOK || ( isRanging && res . StatusCode == http . StatusPartialContent ) ) {
2014-07-25 19:19:49 +02:00
_ = res . Body . Close ( ) // ignore error
2016-06-12 16:06:02 +02:00
return nil , errors . Errorf ( "bad response: %d: %s" , res . StatusCode , res . Status )
2014-07-13 18:54:03 +02:00
}
return res . Body , nil
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
2017-05-28 13:44:22 +02:00
func ( o * Object ) Update ( in io . Reader , src fs . ObjectInfo , options ... fs . OpenOption ) error {
2017-06-07 15:16:50 +02:00
err := o . fs . Mkdir ( "" )
if err != nil {
return err
}
2016-02-18 12:35:25 +01:00
modTime := src . ModTime ( )
2014-07-13 18:54:03 +02:00
object := storage . Object {
2015-11-07 12:14:46 +01:00
Bucket : o . fs . bucket ,
Name : o . fs . root + o . remote ,
2016-09-21 23:13:24 +02:00
ContentType : fs . MimeType ( src ) ,
2014-07-29 18:50:07 +02:00
Updated : modTime . Format ( timeFormatOut ) , // Doesn't get set
2014-07-14 13:44:31 +02:00
Metadata : metadataFromModTime ( modTime ) ,
2014-07-13 18:54:03 +02:00
}
2016-10-03 21:40:54 +02:00
newObject , err := o . fs . svc . Objects . Insert ( o . fs . bucket , & object ) . Media ( in , googleapi . ContentType ( "" ) ) . Name ( object . Name ) . PredefinedAcl ( o . fs . objectACL ) . Do ( )
2014-07-21 22:25:46 +02:00
if err != nil {
return err
}
2014-07-16 13:12:36 +02:00
// Set the metadata for the new object while we have it
o . setMetaData ( newObject )
2014-07-21 22:25:46 +02:00
return nil
2014-07-13 18:54:03 +02:00
}
// Remove an object
2015-11-07 12:14:46 +01:00
func ( o * Object ) Remove ( ) error {
return o . fs . svc . Objects . Delete ( o . fs . bucket , o . fs . root + o . remote ) . Do ( )
2014-07-13 18:54:03 +02:00
}
2016-09-21 23:13:24 +02:00
// MimeType of an Object if known, "" otherwise
func ( o * Object ) MimeType ( ) string {
return o . mimeType
}
2014-07-13 18:54:03 +02:00
// Check the interfaces are satisfied
2015-11-07 12:14:46 +01:00
var (
2017-09-16 22:46:02 +02:00
_ fs . Fs = & Fs { }
_ fs . Copier = & Fs { }
_ fs . PutStreamer = & Fs { }
_ fs . ListRer = & Fs { }
_ fs . Object = & Object { }
_ fs . MimeTyper = & Object { }
2015-11-07 12:14:46 +01:00
)